Skip to content

Commit

Permalink
feat(client-bedrock-runtime): This release introduces Guardrails for …
Browse files Browse the repository at this point in the history
…Amazon Bedrock.
  • Loading branch information
awstools committed Apr 23, 2024
1 parent 4fcf9f7 commit 7ddf8df
Show file tree
Hide file tree
Showing 9 changed files with 253 additions and 41 deletions.
2 changes: 1 addition & 1 deletion clients/client-bedrock-runtime/README.md
Expand Up @@ -6,7 +6,7 @@

AWS SDK for JavaScript BedrockRuntime Client for Node.js, Browser and React Native.

<p>Describes the API operations for running inference using Bedrock models.</p>
<p>Describes the API operations for running inference using Amazon Bedrock models.</p>

## Installing

Expand Down
2 changes: 1 addition & 1 deletion clients/client-bedrock-runtime/src/BedrockRuntime.ts
Expand Up @@ -46,7 +46,7 @@ export interface BedrockRuntime {
}

/**
* <p>Describes the API operations for running inference using Bedrock models.</p>
* <p>Describes the API operations for running inference using Amazon Bedrock models.</p>
* @public
*/
export class BedrockRuntime extends BedrockRuntimeClient implements BedrockRuntime {}
Expand Down
2 changes: 1 addition & 1 deletion clients/client-bedrock-runtime/src/BedrockRuntimeClient.ts
Expand Up @@ -264,7 +264,7 @@ export type BedrockRuntimeClientResolvedConfigType = __SmithyResolvedConfigurati
export interface BedrockRuntimeClientResolvedConfig extends BedrockRuntimeClientResolvedConfigType {}

/**
* <p>Describes the API operations for running inference using Bedrock models.</p>
* <p>Describes the API operations for running inference using Amazon Bedrock models.</p>
* @public
*/
export class BedrockRuntimeClient extends __Client<
Expand Down
Expand Up @@ -47,10 +47,11 @@ export type InvokeModelCommandOutputType = Omit<InvokeModelResponse, "body"> & {
export interface InvokeModelCommandOutput extends InvokeModelCommandOutputType, __MetadataBearer {}

/**
* <p>Invokes the specified Bedrock model to run inference using the input provided in the request body.
* You use InvokeModel to run inference for text models, image models, and embedding models.</p>
* <p>For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
* <p>For example requests, see Examples (after the Errors section).</p>
* <p>Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body.
* You use model inference to generate text, images, and embeddings.</p>
* <p>For example code, see <i>Invoke model code examples</i> in the <i>Amazon Bedrock User Guide</i>.
* </p>
* <p>This operation requires permission for the <code>bedrock:InvokeModel</code> action.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
Expand All @@ -62,6 +63,9 @@ export interface InvokeModelCommandOutput extends InvokeModelCommandOutputType,
* contentType: "STRING_VALUE",
* accept: "STRING_VALUE",
* modelId: "STRING_VALUE", // required
* trace: "ENABLED" || "DISABLED",
* guardrailIdentifier: "STRING_VALUE",
* guardrailVersion: "STRING_VALUE",
* };
* const command = new InvokeModelCommand(input);
* const response = await client.send(command);
Expand Down
Expand Up @@ -44,10 +44,16 @@ export interface InvokeModelWithResponseStreamCommandOutput
__MetadataBearer {}

/**
* <p>Invoke the specified Bedrock model to run inference using the input provided.
* Return the response in a stream.</p>
* <p>For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
* <p>For an example request and response, see Examples (after the Errors section).</p>
* <p>Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.</p>
* <p>To see if a model supports streaming, call <a href="https://docs.aws.amazon.com/bedrock/latest/APIReference/API_GetFoundationModel.html">GetFoundationModel</a>
* and check the <code>responseStreamingSupported</code> field in the response.</p>
* <note>
* <p>The CLI doesn't support <code>InvokeModelWithResponseStream</code>.</p>
* </note>
* <p>For example code, see <i>Invoke model with streaming code
* example</i> in the <i>Amazon Bedrock User Guide</i>.
* </p>
* <p>This operation requires permissions to perform the <code>bedrock:InvokeModelWithResponseStream</code> action. </p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
Expand All @@ -59,6 +65,9 @@ export interface InvokeModelWithResponseStreamCommandOutput
* contentType: "STRING_VALUE",
* accept: "STRING_VALUE",
* modelId: "STRING_VALUE", // required
* trace: "ENABLED" || "DISABLED",
* guardrailIdentifier: "STRING_VALUE",
* guardrailVersion: "STRING_VALUE",
* };
* const command = new InvokeModelWithResponseStreamCommand(input);
* const response = await client.send(command);
Expand Down Expand Up @@ -109,7 +118,7 @@ export interface InvokeModelWithResponseStreamCommandOutput
* <p>The model specified in the request is not ready to serve inference requests.</p>
*
* @throws {@link ModelStreamErrorException} (client fault)
* <p>An error occurred while streaming the response.</p>
* <p>An error occurred while streaming the response. Retry your request.</p>
*
* @throws {@link ModelTimeoutException} (client fault)
* <p>The request took too long to process. Processing time exceeded the model timeout length.</p>
Expand Down
2 changes: 1 addition & 1 deletion clients/client-bedrock-runtime/src/index.ts
@@ -1,7 +1,7 @@
// smithy-typescript generated code
/* eslint-disable */
/**
* <p>Describes the API operations for running inference using Bedrock models.</p>
* <p>Describes the API operations for running inference using Amazon Bedrock models.</p>
*
* @packageDocumentation
*/
Expand Down
122 changes: 110 additions & 12 deletions clients/client-bedrock-runtime/src/models/models_0.ts
Expand Up @@ -43,12 +43,26 @@ export class InternalServerException extends __BaseException {
}
}

/**
* @public
* @enum
*/
export const Trace = {
DISABLED: "DISABLED",
ENABLED: "ENABLED",
} as const;

/**
* @public
*/
export type Trace = (typeof Trace)[keyof typeof Trace];

/**
* @public
*/
export interface InvokeModelRequest {
/**
* <p>Input data in the format specified in the content-type request header. To see the format and content of this field for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
* @public
*/
body: Uint8Array | undefined;
Expand All @@ -61,25 +75,67 @@ export interface InvokeModelRequest {
contentType?: string;

/**
* <p>The desired MIME type of the inference body in the response. The default value is
* <code>application/json</code>.</p>
* <p>The desired MIME type of the inference body in the response. The default value is <code>application/json</code>.</p>
* @public
*/
accept?: string;

/**
* <p>Identifier of the model. </p>
* <p>The unique identifier of the model to invoke to run inference.</p>
* <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
* <ul>
* <li>
* <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p>
* </li>
* <li>
* <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p>
* </li>
* <li>
* <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p>
* </li>
* </ul>
* @public
*/
modelId: string | undefined;

/**
* <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
* @public
*/
trace?: Trace;

/**
* <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied
* to the invocation.</p>
* <p>An error will be thrown in the following situations.</p>
* <ul>
* <li>
* <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p>
* </li>
* <li>
* <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p>
* </li>
* <li>
* <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p>
* </li>
* </ul>
* @public
*/
guardrailIdentifier?: string;

/**
* <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
* @public
*/
guardrailVersion?: string;
}

/**
* @public
*/
export interface InvokeModelResponse {
/**
* <p>Inference response from the model in the format specified in the content-type header field. To see the format and content of this field for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* <p>Inference response from the model in the format specified in the <code>contentType</code> header. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* @public
*/
body: Uint8Array | undefined;
Expand Down Expand Up @@ -250,8 +306,7 @@ export class ValidationException extends __BaseException {
*/
export interface InvokeModelWithResponseStreamRequest {
/**
* <p>Inference input in the format specified by the
* content-type. To see the format and content of this field for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* <p>The prompt and inference parameters in the format specified in the <code>contentType</code> in the header. To see the format and content of the request and response bodies for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/api-methods-run.html">Run inference</a> in the Bedrock User Guide.</p>
* @public
*/
body: Uint8Array | undefined;
Expand All @@ -271,10 +326,53 @@ export interface InvokeModelWithResponseStreamRequest {
accept?: string;

/**
* <p>Id of the model to invoke using the streaming request.</p>
* <p>The unique identifier of the model to invoke to run inference.</p>
* <p>The <code>modelId</code> to provide depends on the type of model that you use:</p>
* <ul>
* <li>
* <p>If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns">Amazon Bedrock base model IDs (on-demand throughput)</a> in the Amazon Bedrock User Guide.</p>
* </li>
* <li>
* <p>If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html">Run inference using a Provisioned Throughput</a> in the Amazon Bedrock User Guide.</p>
* </li>
* <li>
* <p>If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html">Use a custom model in Amazon Bedrock</a> in the Amazon Bedrock User Guide.</p>
* </li>
* </ul>
* @public
*/
modelId: string | undefined;

/**
* <p>Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.</p>
* @public
*/
trace?: Trace;

/**
* <p>The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied
* to the invocation.</p>
* <p>An error is thrown in the following situations.</p>
* <ul>
* <li>
* <p>You don't provide a guardrail identifier but you specify the <code>amazon-bedrock-guardrailConfig</code> field in the request body.</p>
* </li>
* <li>
* <p>You enable the guardrail but the <code>contentType</code> isn't <code>application/json</code>.</p>
* </li>
* <li>
* <p>You provide a guardrail identifier, but <code>guardrailVersion</code> isn't specified.</p>
* </li>
* </ul>
* @public
*/
guardrailIdentifier?: string;

/**
* <p>The version number for the guardrail. The value can also be <code>DRAFT</code>.</p>
* @public
*/
guardrailVersion?: string;
}

/**
Expand All @@ -290,7 +388,7 @@ export interface PayloadPart {
}

/**
* <p>An error occurred while streaming the response.</p>
* <p>An error occurred while streaming the response. Retry your request.</p>
* @public
*/
export class ModelStreamErrorException extends __BaseException {
Expand Down Expand Up @@ -369,7 +467,7 @@ export namespace ResponseStream {
}

/**
* <p>An error occurred while streaming the response.</p>
* <p>An error occurred while streaming the response. Retry your request.</p>
* @public
*/
export interface ModelStreamErrorExceptionMember {
Expand Down Expand Up @@ -397,7 +495,7 @@ export namespace ResponseStream {
}

/**
* <p>The number of requests exceeds the limit. Resubmit your request later.</p>
* <p>The number or frequency of requests exceeds the limit. Resubmit your request later.</p>
* @public
*/
export interface ThrottlingExceptionMember {
Expand Down Expand Up @@ -465,7 +563,7 @@ export namespace ResponseStream {
*/
export interface InvokeModelWithResponseStreamResponse {
/**
* <p>Inference response from the model in the format specified by Content-Type. To see the format and content of this field for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* <p>Inference response from the model in the format specified by the <code>contentType</code> header. To see the format and content of this field for different models, refer to <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html">Inference parameters</a>.</p>
* @public
*/
body: AsyncIterable<ResponseStream> | undefined;
Expand Down
12 changes: 12 additions & 0 deletions clients/client-bedrock-runtime/src/protocols/Aws_restJson1.ts
Expand Up @@ -52,6 +52,9 @@ export const se_InvokeModelCommand = async (
const headers: any = map({}, isSerializableHeaderValue, {
[_ct]: input[_cT]! || "application/octet-stream",
[_a]: input[_a]!,
[_xabt]: input[_t]!,
[_xabg]: input[_gI]!,
[_xabg_]: input[_gV]!,
});
b.bp("/model/{modelId}/invoke");
b.p("modelId", () => input.modelId!, "{modelId}", false);
Expand All @@ -74,6 +77,9 @@ export const se_InvokeModelWithResponseStreamCommand = async (
const headers: any = map({}, isSerializableHeaderValue, {
[_ct]: input[_cT]! || "application/octet-stream",
[_xaba]: input[_a]!,
[_xabt]: input[_t]!,
[_xabg]: input[_gI]!,
[_xabg_]: input[_gV]!,
});
b.bp("/model/{modelId}/invoke-with-response-stream");
b.p("modelId", () => input.modelId!, "{modelId}", false);
Expand Down Expand Up @@ -491,5 +497,11 @@ const isSerializableHeaderValue = (value: any): boolean =>
const _a = "accept";
const _cT = "contentType";
const _ct = "content-type";
const _gI = "guardrailIdentifier";
const _gV = "guardrailVersion";
const _t = "trace";
const _xaba = "x-amzn-bedrock-accept";
const _xabct = "x-amzn-bedrock-content-type";
const _xabg = "x-amzn-bedrock-guardrailidentifier";
const _xabg_ = "x-amzn-bedrock-guardrailversion";
const _xabt = "x-amzn-bedrock-trace";

0 comments on commit 7ddf8df

Please sign in to comment.