Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: vercel/ai
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: ai@3.0.14
Choose a base ref
...
head repository: vercel/ai
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: ai@3.0.15
Choose a head ref
  • 3 commits
  • 76 files changed
  • 3 contributors

Commits on Mar 27, 2024

  1. Rename model specification export to ai/spec. (#1237)

    lgrammel authored Mar 27, 2024
    Copy the full SHA
    cf8d12f View commit details
  2. Add JSDoc comments for ai/core functions. (#1227)

    lgrammel authored Mar 27, 2024
    Copy the full SHA
    4aed2a5 View commit details
  3. Version Packages (#1239)

    Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
    github-actions[bot] and github-actions[bot] authored Mar 27, 2024
    Copy the full SHA
    48a5b47 View commit details
Showing with 482 additions and 120 deletions.
  1. +7 −0 packages/core/CHANGELOG.md
  2. +66 −2 packages/core/core/generate-object/generate-object.ts
  3. +54 −2 packages/core/core/generate-object/stream-object.ts
  4. +69 −2 packages/core/core/generate-text/generate-text.ts
  5. +1 −4 packages/core/core/generate-text/run-tools-transformation.ts
  6. +69 −6 packages/core/core/generate-text/stream-text.ts
  7. +16 −1 packages/core/core/generate-text/tool-call.ts
  8. +19 −0 packages/core/core/generate-text/tool-result.ts
  9. +33 −33 packages/core/core/prompt/call-settings.ts
  10. +38 −6 packages/core/core/prompt/content-part.ts
  11. +1 −1 packages/core/core/prompt/convert-to-language-model-prompt.ts
  12. +14 −2 packages/core/core/prompt/data-content.ts
  13. +2 −2 packages/core/core/prompt/get-input-format.ts
  14. +28 −2 packages/core/core/prompt/message.ts
  15. +1 −1 packages/core/core/prompt/prepare-call-settings.ts
  16. +14 −0 packages/core/core/prompt/prompt.ts
  17. +1 −1 packages/core/core/test/mock-language-model-v1.ts
  18. +10 −10 packages/core/core/tool/tool.ts
  19. +1 −5 packages/core/core/util/retry-with-exponential-backoff.ts
  20. +1 −4 packages/core/mistral/convert-to-mistral-chat-messages.ts
  21. +1 −1 packages/core/mistral/map-mistral-finish-reason.ts
  22. +4 −4 packages/core/mistral/mistral-chat-language-model.test.ts
  23. +1 −1 packages/core/mistral/mistral-chat-language-model.ts
  24. +1 −1 packages/core/mistral/mistral-error.ts
  25. +1 −1 packages/core/mistral/mistral-facade.ts
  26. +1 −4 packages/core/openai/convert-to-openai-chat-messages.ts
  27. +1 −1 packages/core/openai/convert-to-openai-completion-prompt.ts
  28. +1 −1 packages/core/openai/map-openai-finish-reason.ts
  29. +4 −4 packages/core/openai/openai-chat-language-model.test.ts
  30. +1 −1 packages/core/openai/openai-chat-language-model.ts
  31. +4 −4 packages/core/openai/openai-completion-language-model.test.ts
  32. +1 −1 packages/core/openai/openai-completion-language-model.ts
  33. +1 −1 packages/core/openai/openai-error.ts
  34. +1 −1 packages/core/openai/openai-facade.ts
  35. +8 −8 packages/core/package.json
  36. 0 packages/core/{ai-model-specification → spec}/errors/api-call-error.ts
  37. 0 packages/core/{ai-model-specification → spec}/errors/index.ts
  38. 0 packages/core/{ai-model-specification → spec}/errors/invalid-argument-error.ts
  39. 0 packages/core/{ai-model-specification → spec}/errors/invalid-data-content-error.ts
  40. 0 packages/core/{ai-model-specification → spec}/errors/invalid-prompt-error.ts
  41. 0 packages/core/{ai-model-specification → spec}/errors/invalid-response-data-error.ts
  42. 0 packages/core/{ai-model-specification → spec}/errors/invalid-tool-arguments-error.ts
  43. 0 packages/core/{ai-model-specification → spec}/errors/json-parse-error.ts
  44. 0 packages/core/{ai-model-specification → spec}/errors/load-api-key-error.ts
  45. 0 packages/core/{ai-model-specification → spec}/errors/no-object-generated-error.ts
  46. 0 packages/core/{ai-model-specification → spec}/errors/no-response-body-error.ts
  47. 0 packages/core/{ai-model-specification → spec}/errors/no-such-tool-error.ts
  48. 0 packages/core/{ai-model-specification → spec}/errors/retry-error.ts
  49. 0 packages/core/{ai-model-specification → spec}/errors/type-validation-error.ts
  50. 0 packages/core/{ai-model-specification → spec}/errors/unsupported-functionality-error.ts
  51. 0 packages/core/{ai-model-specification → spec}/index.ts
  52. 0 packages/core/{ai-model-specification → spec}/language-model/index.ts
  53. 0 packages/core/{ai-model-specification → spec}/language-model/v1/index.ts
  54. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-call-options.ts
  55. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-call-settings.ts
  56. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-call-warning.ts
  57. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-finish-reason.ts
  58. 0 ...es/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-function-tool-call.ts
  59. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-function-tool.ts
  60. 0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1-prompt.ts
  61. +4 −0 packages/core/{ai-model-specification → spec}/language-model/v1/language-model-v1.ts
  62. 0 packages/core/{ai-model-specification → spec}/test/convert-stream-to-array.ts
  63. 0 packages/core/{ai-model-specification → spec}/test/json-test-server.ts
  64. 0 packages/core/{ai-model-specification → spec}/test/streaming-test-server.ts
  65. 0 packages/core/{ai-model-specification → spec}/util/generate-id.ts
  66. 0 packages/core/{ai-model-specification → spec}/util/get-error-message.ts
  67. 0 packages/core/{ai-model-specification → spec}/util/index.ts
  68. 0 packages/core/{ai-model-specification → spec}/util/json-schema.ts
  69. 0 packages/core/{ai-model-specification → spec}/util/load-api-key.ts
  70. 0 packages/core/{ai-model-specification → spec}/util/parse-json.ts
  71. 0 packages/core/{ai-model-specification → spec}/util/post-to-api.ts
  72. 0 packages/core/{ai-model-specification → spec}/util/response-handler.ts
  73. 0 packages/core/{ai-model-specification → spec}/util/scale.ts
  74. 0 packages/core/{ai-model-specification → spec}/util/uint8-utils.ts
  75. 0 packages/core/{ai-model-specification → spec}/util/validate-types.ts
  76. +2 −2 packages/core/tsup.config.ts
7 changes: 7 additions & 0 deletions packages/core/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
# ai

## 3.0.15

### Patch Changes

- 4aed2a5: Add JSDoc comments for ai/core functions.
- cf8d12f: Export experimental language model specification under `ai/spec`.

## 3.0.14

### Patch Changes
68 changes: 66 additions & 2 deletions packages/core/core/generate-object/generate-object.ts
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@ import {
LanguageModelV1FinishReason,
NoTextGeneratedError,
safeParseJSON,
} from '../../ai-model-specification';
} from '../../spec';
import { TokenUsage, calculateTokenUsage } from '../generate-text/token-usage';
import { CallSettings } from '../prompt/call-settings';
import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
@@ -17,7 +17,40 @@ import { retryWithExponentialBackoff } from '../util/retry-with-exponential-back
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';

/**
* Generate a structured, typed object using a language model.
Generate a structured, typed object for a given prompt and schema using a language model.
This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
@param model - The language model to use.
@param schema - The schema of the object that the model should generate.
@param system - A system message that will be part of the prompt.
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
@param maxTokens - Maximum number of tokens to generate.
@param temperature - Temperature setting.
This is a number between 0 (almost no randomness) and 1 (very random).
It is recommended to set either `temperature` or `topP`, but not both.
@param topP - Nucleus sampling. This is a number between 0 and 1.
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
It is recommended to set either `temperature` or `topP`, but not both.
@param presencePenalty - Presence penalty setting.
It affects the likelihood of the model to repeat information that is already in the prompt.
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param frequencyPenalty - Frequency penalty setting.
It affects the likelihood of the model to repeatedly use the same words or phrases.
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param seed - The seed (integer) to use for random sampling.
If set and supported by the model, calls will generate deterministic results.
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
@param abortSignal - An optional abort signal that can be used to cancel the call.
@returns
A result object that contains the generated object, the finish reason, the token usage, and additional information.
*/
export async function experimental_generateObject<T>({
model,
@@ -31,8 +64,21 @@ export async function experimental_generateObject<T>({
...settings
}: CallSettings &
Prompt & {
/**
The language model to use.
*/
model: LanguageModelV1;

/**
The schema of the object that the model should generate.
*/
schema: z.Schema<T>;

/**
The mode to use for object generation. Not all models support all modes.
Default and recommended: 'auto' (best mode for the model).
*/
mode?: 'auto' | 'json' | 'tool' | 'grammar';
}): Promise<GenerateObjectResult<T>> {
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -160,10 +206,28 @@ export async function experimental_generateObject<T>({
});
}

/**
The result of a `generateObject` call.
*/
export class GenerateObjectResult<T> {
/**
The generated object (typed according to the schema).
*/
readonly object: T;

/**
The reason why the generation finished.
*/
readonly finishReason: LanguageModelV1FinishReason;

/**
The token usage of the generated text.
*/
readonly usage: TokenUsage;

/**
Warnings from the model provider (e.g. unsupported settings)
*/
readonly warnings: LanguageModelV1CallWarning[] | undefined;

constructor(options: {
56 changes: 54 additions & 2 deletions packages/core/core/generate-object/stream-object.ts
Original file line number Diff line number Diff line change
@@ -5,7 +5,7 @@ import {
LanguageModelV1CallOptions,
LanguageModelV1CallWarning,
LanguageModelV1StreamPart,
} from '../../ai-model-specification';
} from '../../spec';
import { CallSettings } from '../prompt/call-settings';
import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
import { getInputFormat } from '../prompt/get-input-format';
@@ -22,7 +22,40 @@ import { retryWithExponentialBackoff } from '../util/retry-with-exponential-back
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';

/**
* Stream an object as a partial object stream.
Generate a structured, typed object for a given prompt and schema using a language model.
This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
@param model - The language model to use.
@param schema - The schema of the object that the model should generate.
@param system - A system message that will be part of the prompt.
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
@param maxTokens - Maximum number of tokens to generate.
@param temperature - Temperature setting.
This is a number between 0 (almost no randomness) and 1 (very random).
It is recommended to set either `temperature` or `topP`, but not both.
@param topP - Nucleus sampling. This is a number between 0 and 1.
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
It is recommended to set either `temperature` or `topP`, but not both.
@param presencePenalty - Presence penalty setting.
It affects the likelihood of the model to repeat information that is already in the prompt.
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param frequencyPenalty - Frequency penalty setting.
It affects the likelihood of the model to repeatedly use the same words or phrases.
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param seed - The seed (integer) to use for random sampling.
If set and supported by the model, calls will generate deterministic results.
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
@param abortSignal - An optional abort signal that can be used to cancel the call.
@return
A result object for accessing the partial object stream and additional information.
*/
export async function experimental_streamObject<T>({
model,
@@ -36,8 +69,21 @@ export async function experimental_streamObject<T>({
...settings
}: CallSettings &
Prompt & {
/**
The language model to use.
*/
model: LanguageModelV1;

/**
The schema of the object that the model should generate.
*/
schema: z.Schema<T>;

/**
The mode to use for object generation. Not all models support all modes.
Default and recommended: 'auto' (best mode for the model).
*/
mode?: 'auto' | 'json' | 'tool' | 'grammar';
}): Promise<StreamObjectResult<T>> {
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -161,9 +207,15 @@ export async function experimental_streamObject<T>({
});
}

/**
The result of a `streamObject` call that contains the partial object stream and additional information.
*/
export class StreamObjectResult<T> {
private readonly originalStream: ReadableStream<string | ErrorStreamPart>;

/**
Warnings from the model provider (e.g. unsupported settings)
*/
readonly warnings: LanguageModelV1CallWarning[] | undefined;

constructor({
71 changes: 69 additions & 2 deletions packages/core/core/generate-text/generate-text.ts
Original file line number Diff line number Diff line change
@@ -3,7 +3,7 @@ import {
LanguageModelV1,
LanguageModelV1CallWarning,
LanguageModelV1FinishReason,
} from '../../ai-model-specification';
} from '../../spec';
import { CallSettings } from '../prompt/call-settings';
import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
import { getInputFormat } from '../prompt/get-input-format';
@@ -16,7 +16,40 @@ import { ToToolCallArray, parseToolCall } from './tool-call';
import { ToToolResultArray } from './tool-result';

/**
* Generate a text and call tools using a language model.
Generate a text and call tools for a given prompt using a language model.
This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
@param model - The language model to use.
@param tools - The tools that the model can call. The model needs to support calling tools.
@param system - A system message that will be part of the prompt.
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
@param maxTokens - Maximum number of tokens to generate.
@param temperature - Temperature setting.
This is a number between 0 (almost no randomness) and 1 (very random).
It is recommended to set either `temperature` or `topP`, but not both.
@param topP - Nucleus sampling. This is a number between 0 and 1.
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
It is recommended to set either `temperature` or `topP`, but not both.
@param presencePenalty - Presence penalty setting.
It affects the likelihood of the model to repeat information that is already in the prompt.
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param frequencyPenalty - Frequency penalty setting.
It affects the likelihood of the model to repeatedly use the same words or phrases.
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
0 means no penalty.
@param seed - The seed (integer) to use for random sampling.
If set and supported by the model, calls will generate deterministic results.
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
@param abortSignal - An optional abort signal that can be used to cancel the call.
@returns
A result object that contains the generated text, the results of the tool calls, and additional information.
*/
export async function experimental_generateText<
TOOLS extends Record<string, ExperimentalTool>,
@@ -31,7 +64,14 @@ export async function experimental_generateText<
...settings
}: CallSettings &
Prompt & {
/**
The language model to use.
*/
model: LanguageModelV1;

/**
The tools that the model can call. The model needs to support calling tools.
*/
tools?: TOOLS;
}): Promise<GenerateTextResult<TOOLS>> {
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -114,14 +154,41 @@ async function executeTools<TOOLS extends Record<string, ExperimentalTool>>({
);
}

/**
The result of a `generateText` call.
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
*/
export class GenerateTextResult<
TOOLS extends Record<string, ExperimentalTool>,
> {
/**
The generated text.
*/
readonly text: string;

/**
The tool calls that were made during the generation.
*/
readonly toolCalls: ToToolCallArray<TOOLS>;

/**
The results of the tool calls.
*/
readonly toolResults: ToToolResultArray<TOOLS>;

/**
The reason why the generation finished.
*/
readonly finishReason: LanguageModelV1FinishReason;

/**
The token usage of the generated text.
*/
readonly usage: TokenUsage;

/**
Warnings from the model provider (e.g. unsupported settings)
*/
readonly warnings: LanguageModelV1CallWarning[] | undefined;

constructor(options: {
5 changes: 1 addition & 4 deletions packages/core/core/generate-text/run-tools-transformation.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
import {
LanguageModelV1StreamPart,
NoSuchToolError,
} from '../../ai-model-specification';
import { LanguageModelV1StreamPart, NoSuchToolError } from '../../spec';
import { generateId } from '../../shared/generate-id';
import { ExperimentalTool } from '../tool';
import { TextStreamPart } from './stream-text';
Loading