Skip to content

Commit 4aed2a5

Browse files
authoredMar 27, 2024··
Add JSDoc comments for ai/core functions. (#1227)
1 parent cf8d12f commit 4aed2a5

14 files changed

+430
-58
lines changed
 

‎.changeset/gorgeous-goats-know.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
Add JSDoc comments for ai/core functions.

‎packages/core/core/generate-object/generate-object.ts

+65-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,40 @@ import { retryWithExponentialBackoff } from '../util/retry-with-exponential-back
1717
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';
1818

1919
/**
20-
* Generate a structured, typed object using a language model.
20+
Generate a structured, typed object for a given prompt and schema using a language model.
21+
22+
This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
23+
24+
@param model - The language model to use.
25+
@param schema - The schema of the object that the model should generate.
26+
27+
@param system - A system message that will be part of the prompt.
28+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
29+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
30+
31+
@param maxTokens - Maximum number of tokens to generate.
32+
@param temperature - Temperature setting.
33+
This is a number between 0 (almost no randomness) and 1 (very random).
34+
It is recommended to set either `temperature` or `topP`, but not both.
35+
@param topP - Nucleus sampling. This is a number between 0 and 1.
36+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
37+
It is recommended to set either `temperature` or `topP`, but not both.
38+
@param presencePenalty - Presence penalty setting.
39+
It affects the likelihood of the model to repeat information that is already in the prompt.
40+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
41+
0 means no penalty.
42+
@param frequencyPenalty - Frequency penalty setting.
43+
It affects the likelihood of the model to repeatedly use the same words or phrases.
44+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
45+
0 means no penalty.
46+
@param seed - The seed (integer) to use for random sampling.
47+
If set and supported by the model, calls will generate deterministic results.
48+
49+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50+
@param abortSignal - An optional abort signal that can be used to cancel the call.
51+
52+
@returns
53+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
2154
*/
2255
export async function experimental_generateObject<T>({
2356
model,
@@ -31,8 +64,21 @@ export async function experimental_generateObject<T>({
3164
...settings
3265
}: CallSettings &
3366
Prompt & {
67+
/**
68+
The language model to use.
69+
*/
3470
model: LanguageModelV1;
71+
72+
/**
73+
The schema of the object that the model should generate.
74+
*/
3575
schema: z.Schema<T>;
76+
77+
/**
78+
The mode to use for object generation. Not all models support all modes.
79+
80+
Default and recommended: 'auto' (best mode for the model).
81+
*/
3682
mode?: 'auto' | 'json' | 'tool' | 'grammar';
3783
}): Promise<GenerateObjectResult<T>> {
3884
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -160,10 +206,28 @@ export async function experimental_generateObject<T>({
160206
});
161207
}
162208

209+
/**
210+
The result of a `generateObject` call.
211+
*/
163212
export class GenerateObjectResult<T> {
213+
/**
214+
The generated object (typed according to the schema).
215+
*/
164216
readonly object: T;
217+
218+
/**
219+
The reason why the generation finished.
220+
*/
165221
readonly finishReason: LanguageModelV1FinishReason;
222+
223+
/**
224+
The token usage of the generated text.
225+
*/
166226
readonly usage: TokenUsage;
227+
228+
/**
229+
Warnings from the model provider (e.g. unsupported settings)
230+
*/
167231
readonly warnings: LanguageModelV1CallWarning[] | undefined;
168232

169233
constructor(options: {

‎packages/core/core/generate-object/stream-object.ts

+53-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,40 @@ import { retryWithExponentialBackoff } from '../util/retry-with-exponential-back
2222
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';
2323

2424
/**
25-
* Stream an object as a partial object stream.
25+
Generate a structured, typed object for a given prompt and schema using a language model.
26+
27+
This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
28+
29+
@param model - The language model to use.
30+
@param schema - The schema of the object that the model should generate.
31+
32+
@param system - A system message that will be part of the prompt.
33+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
34+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
35+
36+
@param maxTokens - Maximum number of tokens to generate.
37+
@param temperature - Temperature setting.
38+
This is a number between 0 (almost no randomness) and 1 (very random).
39+
It is recommended to set either `temperature` or `topP`, but not both.
40+
@param topP - Nucleus sampling. This is a number between 0 and 1.
41+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
42+
It is recommended to set either `temperature` or `topP`, but not both.
43+
@param presencePenalty - Presence penalty setting.
44+
It affects the likelihood of the model to repeat information that is already in the prompt.
45+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
46+
0 means no penalty.
47+
@param frequencyPenalty - Frequency penalty setting.
48+
It affects the likelihood of the model to repeatedly use the same words or phrases.
49+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
50+
0 means no penalty.
51+
@param seed - The seed (integer) to use for random sampling.
52+
If set and supported by the model, calls will generate deterministic results.
53+
54+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
55+
@param abortSignal - An optional abort signal that can be used to cancel the call.
56+
57+
@return
58+
A result object for accessing the partial object stream and additional information.
2659
*/
2760
export async function experimental_streamObject<T>({
2861
model,
@@ -36,8 +69,21 @@ export async function experimental_streamObject<T>({
3669
...settings
3770
}: CallSettings &
3871
Prompt & {
72+
/**
73+
The language model to use.
74+
*/
3975
model: LanguageModelV1;
76+
77+
/**
78+
The schema of the object that the model should generate.
79+
*/
4080
schema: z.Schema<T>;
81+
82+
/**
83+
The mode to use for object generation. Not all models support all modes.
84+
85+
Default and recommended: 'auto' (best mode for the model).
86+
*/
4187
mode?: 'auto' | 'json' | 'tool' | 'grammar';
4288
}): Promise<StreamObjectResult<T>> {
4389
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -161,9 +207,15 @@ export async function experimental_streamObject<T>({
161207
});
162208
}
163209

210+
/**
211+
The result of a `streamObject` call that contains the partial object stream and additional information.
212+
*/
164213
export class StreamObjectResult<T> {
165214
private readonly originalStream: ReadableStream<string | ErrorStreamPart>;
166215

216+
/**
217+
Warnings from the model provider (e.g. unsupported settings)
218+
*/
167219
readonly warnings: LanguageModelV1CallWarning[] | undefined;
168220

169221
constructor({

‎packages/core/core/generate-text/generate-text.ts

+68-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,40 @@ import { ToToolCallArray, parseToolCall } from './tool-call';
1616
import { ToToolResultArray } from './tool-result';
1717

1818
/**
19-
* Generate a text and call tools using a language model.
19+
Generate a text and call tools for a given prompt using a language model.
20+
21+
This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
22+
23+
@param model - The language model to use.
24+
@param tools - The tools that the model can call. The model needs to support calling tools.
25+
26+
@param system - A system message that will be part of the prompt.
27+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
28+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
29+
30+
@param maxTokens - Maximum number of tokens to generate.
31+
@param temperature - Temperature setting.
32+
This is a number between 0 (almost no randomness) and 1 (very random).
33+
It is recommended to set either `temperature` or `topP`, but not both.
34+
@param topP - Nucleus sampling. This is a number between 0 and 1.
35+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
36+
It is recommended to set either `temperature` or `topP`, but not both.
37+
@param presencePenalty - Presence penalty setting.
38+
It affects the likelihood of the model to repeat information that is already in the prompt.
39+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
40+
0 means no penalty.
41+
@param frequencyPenalty - Frequency penalty setting.
42+
It affects the likelihood of the model to repeatedly use the same words or phrases.
43+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
44+
0 means no penalty.
45+
@param seed - The seed (integer) to use for random sampling.
46+
If set and supported by the model, calls will generate deterministic results.
47+
48+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
49+
@param abortSignal - An optional abort signal that can be used to cancel the call.
50+
51+
@returns
52+
A result object that contains the generated text, the results of the tool calls, and additional information.
2053
*/
2154
export async function experimental_generateText<
2255
TOOLS extends Record<string, ExperimentalTool>,
@@ -31,7 +64,14 @@ export async function experimental_generateText<
3164
...settings
3265
}: CallSettings &
3366
Prompt & {
67+
/**
68+
The language model to use.
69+
*/
3470
model: LanguageModelV1;
71+
72+
/**
73+
The tools that the model can call. The model needs to support calling tools.
74+
*/
3575
tools?: TOOLS;
3676
}): Promise<GenerateTextResult<TOOLS>> {
3777
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -114,14 +154,41 @@ async function executeTools<TOOLS extends Record<string, ExperimentalTool>>({
114154
);
115155
}
116156

157+
/**
158+
The result of a `generateText` call.
159+
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
160+
*/
117161
export class GenerateTextResult<
118162
TOOLS extends Record<string, ExperimentalTool>,
119163
> {
164+
/**
165+
The generated text.
166+
*/
120167
readonly text: string;
168+
169+
/**
170+
The tool calls that were made during the generation.
171+
*/
121172
readonly toolCalls: ToToolCallArray<TOOLS>;
173+
174+
/**
175+
The results of the tool calls.
176+
*/
122177
readonly toolResults: ToToolResultArray<TOOLS>;
178+
179+
/**
180+
The reason why the generation finished.
181+
*/
123182
readonly finishReason: LanguageModelV1FinishReason;
183+
184+
/**
185+
The token usage of the generated text.
186+
*/
124187
readonly usage: TokenUsage;
188+
189+
/**
190+
Warnings from the model provider (e.g. unsupported settings)
191+
*/
125192
readonly warnings: LanguageModelV1CallWarning[] | undefined;
126193

127194
constructor(options: {

‎packages/core/core/generate-text/stream-text.ts

+67-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,40 @@ import { ToToolCall } from './tool-call';
2323
import { ToToolResult } from './tool-result';
2424

2525
/**
26-
* Stream text generated by a language model.
26+
Generate a text and call tools for a given prompt using a language model.
27+
28+
This function streams the output. If you do not want to stream the output, use `experimental_generateText` instead.
29+
30+
@param model - The language model to use.
31+
@param tools - The tools that the model can call. The model needs to support calling tools.
32+
33+
@param system - A system message that will be part of the prompt.
34+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
35+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
36+
37+
@param maxTokens - Maximum number of tokens to generate.
38+
@param temperature - Temperature setting.
39+
This is a number between 0 (almost no randomness) and 1 (very random).
40+
It is recommended to set either `temperature` or `topP`, but not both.
41+
@param topP - Nucleus sampling. This is a number between 0 and 1.
42+
E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
43+
It is recommended to set either `temperature` or `topP`, but not both.
44+
@param presencePenalty - Presence penalty setting.
45+
It affects the likelihood of the model to repeat information that is already in the prompt.
46+
The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
47+
0 means no penalty.
48+
@param frequencyPenalty - Frequency penalty setting.
49+
It affects the likelihood of the model to repeatedly use the same words or phrases.
50+
The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
51+
0 means no penalty.
52+
@param seed - The seed (integer) to use for random sampling.
53+
If set and supported by the model, calls will generate deterministic results.
54+
55+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
56+
@param abortSignal - An optional abort signal that can be used to cancel the call.
57+
58+
@return
59+
A result object for accessing different stream types and additional information.
2760
*/
2861
export async function experimental_streamText<
2962
TOOLS extends Record<string, ExperimentalTool>,
@@ -38,7 +71,14 @@ export async function experimental_streamText<
3871
...settings
3972
}: CallSettings &
4073
Prompt & {
74+
/**
75+
The language model to use.
76+
*/
4177
model: LanguageModelV1;
78+
79+
/**
80+
The tools that the model can call. The model needs to support calling tools.
81+
*/
4282
tools?: TOOLS;
4383
}): Promise<StreamTextResult<TOOLS>> {
4484
const retry = retryWithExponentialBackoff({ maxRetries });
@@ -101,9 +141,15 @@ export type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> =
101141
};
102142
};
103143

144+
/**
145+
A result object for accessing different stream types and additional information.
146+
*/
104147
export class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
105148
private readonly originalStream: ReadableStream<TextStreamPart<TOOLS>>;
106149

150+
/**
151+
Warnings from the model provider (e.g. unsupported settings)
152+
*/
107153
readonly warnings: LanguageModelV1CallWarning[] | undefined;
108154

109155
constructor({
@@ -117,6 +163,11 @@ export class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
117163
this.warnings = warnings;
118164
}
119165

166+
/**
167+
A text stream that returns only the generated text deltas. You can use it
168+
as either an AsyncIterable or a ReadableStream. When an error occurs, the
169+
stream will throw the error.
170+
*/
120171
get textStream(): AsyncIterableStream<string> {
121172
return createAsyncIterableStream(this.originalStream, {
122173
transform(chunk, controller) {
@@ -132,6 +183,12 @@ export class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
132183
});
133184
}
134185

186+
/**
187+
A stream with all events, including text deltas, tool calls, tool results, and
188+
errors.
189+
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
190+
stream will throw the error.
191+
*/
135192
get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>> {
136193
return createAsyncIterableStream(this.originalStream, {
137194
transform(chunk, controller) {
@@ -147,6 +204,15 @@ export class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
147204
});
148205
}
149206

207+
/**
208+
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
209+
It can be used with the `useChat` and `useCompletion` hooks.
210+
211+
@param callbacks
212+
Stream callbacks that will be called when the stream emits events.
213+
214+
@returns an `AIStream` object.
215+
*/
150216
toAIStream(callbacks?: AIStreamCallbacksAndOptions) {
151217
// TODO add support for tool calls
152218
return readableFromAsyncIterable(this.textStream)

‎packages/core/core/generate-text/tool-call.ts

+15
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,24 @@ import {
88
import { ExperimentalTool } from '../tool';
99
import { ValueOf } from '../util/value-of';
1010

11+
/**
12+
Typed tool call that is returned by generateText and streamText.
13+
It contains the tool call ID, the tool name, and the tool arguments.
14+
*/
1115
export interface ToolCall<NAME extends string, ARGS> {
16+
/**
17+
ID of the tool call. This ID is used to match the tool call with the tool result.
18+
*/
1219
toolCallId: string;
20+
21+
/**
22+
Name of the tool that is being called.
23+
*/
1324
toolName: NAME;
25+
26+
/**
27+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
28+
*/
1429
args: ARGS;
1530
}
1631

‎packages/core/core/generate-text/tool-result.ts

+19
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,29 @@ import { z } from 'zod';
22
import { ExperimentalTool } from '../tool';
33
import { ValueOf } from '../util/value-of';
44

5+
/**
6+
Typed tool result that is returned by generateText and streamText.
7+
It contains the tool call ID, the tool name, the tool arguments, and the tool result.
8+
*/
59
export interface ToolResult<NAME extends string, ARGS, RESULT> {
10+
/**
11+
ID of the tool call. This ID is used to match the tool call with the tool result.
12+
*/
613
toolCallId: string;
14+
15+
/**
16+
Name of the tool that was called.
17+
*/
718
toolName: NAME;
19+
20+
/**
21+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
22+
*/
823
args: ARGS;
24+
25+
/**
26+
Result of the tool call. This is the result of the tool's execution.
27+
*/
928
result: RESULT;
1029
}
1130

+33-33
Original file line numberDiff line numberDiff line change
@@ -1,66 +1,66 @@
11
export type CallSettings = {
22
/**
3-
* Maximum number of tokens to generate.
3+
Maximum number of tokens to generate.
44
*/
55
maxTokens?: number;
66

77
/**
8-
* Temperature setting. This is a number between 0 (almost no randomness) and
9-
* 1 (very random).
10-
*
11-
* It is recommended to set either `temperature` or `topP`, but not both.
12-
*
13-
* @default 0
8+
Temperature setting. This is a number between 0 (almost no randomness) and
9+
1 (very random).
10+
11+
It is recommended to set either `temperature` or `topP`, but not both.
12+
13+
@default 0
1414
*/
1515
temperature?: number;
1616

1717
/**
18-
* Nucleus sampling. This is a number between 0 and 1.
19-
*
20-
* E.g. 0.1 would mean that only tokens with the top 10% probability mass
21-
* are considered.
22-
*
23-
* It is recommended to set either `temperature` or `topP`, but not both.
18+
Nucleus sampling. This is a number between 0 and 1.
19+
20+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
21+
are considered.
22+
23+
It is recommended to set either `temperature` or `topP`, but not both.
2424
*/
2525
topP?: number;
2626

2727
/**
28-
* Presence penalty setting. It affects the likelihood of the model to
29-
* repeat information that is already in the prompt.
30-
*
31-
* The presence penalty is a number between -1 (increase repetition)
32-
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
33-
*
34-
* @default 0
28+
Presence penalty setting. It affects the likelihood of the model to
29+
repeat information that is already in the prompt.
30+
31+
The presence penalty is a number between -1 (increase repetition)
32+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
33+
34+
@default 0
3535
*/
3636
presencePenalty?: number;
3737

3838
/**
39-
* Frequency penalty setting. It affects the likelihood of the model
40-
* to repeatedly use the same words or phrases.
41-
*
42-
* The frequency penalty is a number between -1 (increase repetition)
43-
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
44-
*
45-
* @default 0
39+
Frequency penalty setting. It affects the likelihood of the model
40+
to repeatedly use the same words or phrases.
41+
42+
The frequency penalty is a number between -1 (increase repetition)
43+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
44+
45+
@default 0
4646
*/
4747
frequencyPenalty?: number;
4848

4949
/**
50-
* The seed (integer) to use for random sampling. If set and supported
51-
* by the model, calls will generate deterministic results.
50+
The seed (integer) to use for random sampling. If set and supported
51+
by the model, calls will generate deterministic results.
5252
*/
5353
seed?: number;
5454

5555
/**
56-
* Maximum number of retries. Set to 0 to disable retries.
57-
*
58-
* @default 2
56+
Maximum number of retries. Set to 0 to disable retries.
57+
58+
@default 2
5959
*/
6060
maxRetries?: number;
6161

6262
/**
63-
* Abort signal.
63+
Abort signal.
6464
*/
6565
abortSignal?: AbortSignal;
6666
};
+38-6
Original file line numberDiff line numberDiff line change
@@ -1,45 +1,77 @@
11
import { DataContent } from './data-content';
22

3+
/**
4+
Text content part of a prompt. It contains a string of text.
5+
*/
36
export interface TextPart {
47
type: 'text';
58

69
/**
7-
* The text content.
10+
The text content.
811
*/
912
text: string;
1013
}
1114

15+
/**
16+
Image content part of a prompt. It contains an image.
17+
*/
1218
export interface ImagePart {
1319
type: 'image';
1420

1521
/**
16-
* Image data. Can either be:
17-
*
18-
* - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
19-
* - URL: a URL that points to the image
22+
Image data. Can either be:
23+
24+
- data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
25+
- URL: a URL that points to the image
2026
*/
2127
image: DataContent | URL;
2228

2329
/**
24-
* Optional mime type of the image.
30+
Optional mime type of the image.
2531
*/
2632
mimeType?: string;
2733
}
2834

35+
/**
36+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
37+
*/
2938
export interface ToolCallPart {
3039
type: 'tool-call';
3140

41+
/**
42+
ID of the tool call. This ID is used to match the tool call with the tool result.
43+
*/
3244
toolCallId: string;
45+
46+
/**
47+
Name of the tool that is being called.
48+
*/
3349
toolName: string;
3450

51+
/**
52+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
53+
*/
3554
args: unknown;
3655
}
3756

57+
/**
58+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
59+
*/
3860
export interface ToolResultPart {
3961
type: 'tool-result';
4062

63+
/**
64+
ID of the tool call that this result is associated with.
65+
*/
4166
toolCallId: string;
67+
68+
/**
69+
Name of the tool that generated this result.
70+
*/
4271
toolName: string;
4372

73+
/**
74+
Result of the tool call. This is a JSON-serializable object.
75+
*/
4476
result: unknown;
4577
}

‎packages/core/core/prompt/data-content.ts

+13-1
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,16 @@ import {
55
} from '../../spec';
66

77
/**
8-
* Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
8+
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
99
*/
1010
export type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
1111

12+
/**
13+
Converts data content to a base64-encoded string.
14+
15+
@param content - Data content to convert.
16+
@returns Base64-encoded string.
17+
*/
1218
export function convertDataContentToBase64String(content: DataContent): string {
1319
if (typeof content === 'string') {
1420
return content;
@@ -21,6 +27,12 @@ export function convertDataContentToBase64String(content: DataContent): string {
2127
return convertUint8ArrayToBase64(content);
2228
}
2329

30+
/**
31+
Converts data content to a Uint8Array.
32+
33+
@param content - Data content to convert.
34+
@returns Uint8Array.
35+
*/
2436
export function convertDataContentToUint8Array(
2537
content: DataContent,
2638
): Uint8Array {

‎packages/core/core/prompt/get-input-format.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@ export function getInputFormat({
55
messages,
66
}: Prompt): 'prompt' | 'messages' {
77
if (prompt == null && messages == null) {
8-
throw new Error('prompt or messages must be defined');
8+
throw new Error('prompt or messages must be defined'); // TODO InvalidPromptError
99
}
1010

1111
if (prompt != null && messages != null) {
12-
throw new Error('prompt and messages cannot be defined at the same time');
12+
throw new Error('prompt and messages cannot be defined at the same time'); // TODO InvalidPromptError
1313
}
1414

1515
return prompt != null ? 'prompt' : 'messages';

‎packages/core/core/prompt/message.ts

+28-2
Original file line numberDiff line numberDiff line change
@@ -5,18 +5,44 @@ import {
55
ToolResultPart,
66
} from './content-part';
77

8+
/**
9+
A message that can be used in the `messages` field of a prompt.
10+
It can be a user message, an assistant message, or a tool message.
11+
*/
812
export type ExperimentalMessage =
913
| ExperimentalUserMessage
1014
| ExperimentalAssistantMessage
1115
| ExperimentalToolMessage;
1216

17+
/**
18+
A user message. It can contain text or a combination of text and images.
19+
*/
1320
export type ExperimentalUserMessage = { role: 'user'; content: UserContent };
21+
22+
/**
23+
Content of a user message. It can be a string or an array of text and image parts.
24+
*/
25+
export type UserContent = string | Array<TextPart | ImagePart>;
26+
27+
/**
28+
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
29+
*/
1430
export type ExperimentalAssistantMessage = {
1531
role: 'assistant';
1632
content: AssistantContent;
1733
};
18-
export type ExperimentalToolMessage = { role: 'tool'; content: ToolContent };
1934

20-
export type UserContent = string | Array<TextPart | ImagePart>;
35+
/**
36+
Content of an assistant message. It can be a string or an array of text and tool call parts.
37+
*/
2138
export type AssistantContent = string | Array<TextPart | ToolCallPart>;
39+
40+
/**
41+
A tool message. It contains the result of one or more tool calls.
42+
*/
43+
export type ExperimentalToolMessage = { role: 'tool'; content: ToolContent };
44+
45+
/**
46+
Content of a tool message. It is an array of tool result parts.
47+
*/
2248
export type ToolContent = Array<ToolResultPart>;

‎packages/core/core/prompt/prompt.ts

+14
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,21 @@
11
import { ExperimentalMessage } from './message';
22

3+
/**
4+
Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
5+
*/
36
export type Prompt = {
7+
/**
8+
System message to include in the prompt. Can be used with `prompt` or `messages`.
9+
*/
410
system?: string;
11+
12+
/**
13+
A simple text prompt. You can either use `prompt` or `messages` but not both.
14+
*/
515
prompt?: string;
16+
17+
/**
18+
A list of messsages. You can either use `prompt` or `messages` but not both.
19+
*/
620
messages?: Array<ExperimentalMessage>;
721
};

‎packages/core/core/tool/tool.ts

+10-10
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,35 @@
11
import { z } from 'zod';
22

33
/**
4-
* A tool contains the description and the schema of the input that the tool expects.
5-
* This enables the language model to generate the input.
6-
*
7-
* The tool can also contain an optional execute function for the actual execution function of the tool.
4+
A tool contains the description and the schema of the input that the tool expects.
5+
This enables the language model to generate the input.
6+
7+
The tool can also contain an optional execute function for the actual execution function of the tool.
88
*/
99
export interface ExperimentalTool<
1010
PARAMETERS extends z.ZodTypeAny = any,
1111
RESULT = any,
1212
> {
1313
/**
14-
* A optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
14+
An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
1515
*/
1616
description?: string;
1717

1818
/**
19-
* The schema of the input that the tool expects. The language model will use this to generate the input.
20-
* Use descriptions to make the input understandable for the language model.
19+
The schema of the input that the tool expects. The language model will use this to generate the input.
20+
Use descriptions to make the input understandable for the language model.
2121
*/
2222
parameters: PARAMETERS;
2323

2424
/**
25-
* An optional execute function for the actual execution function of the tool.
26-
* If not provided, the tool will not be executed automatically.
25+
An optional execute function for the actual execution function of the tool.
26+
If not provided, the tool will not be executed automatically.
2727
*/
2828
execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
2929
}
3030

3131
/**
32-
* Helper function for inferring the execute args of a tool.
32+
Helper function for inferring the execute args of a tool.
3333
*/
3434
// Note: special type inference is needed for the execute function args to make sure they are inferred correctly.
3535
export function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(

0 commit comments

Comments
 (0)
Please sign in to comment.