Skip to content

Commit dd0d854

Browse files
lgrammelKunoacc
andauthoredJul 11, 2024··
feat (ai/vue): add useAssistant (#2245)
Co-authored-by: Nelson Nelson-Atuonwu <nellyatuonwu@gmail.com>
1 parent 8ef3386 commit dd0d854

19 files changed

+883
-26
lines changed
 

‎.changeset/smart-fishes-pay.md

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'ai': patch
3+
'@ai-sdk/vue': patch
4+
---
5+
6+
feat (ai/vue): add useAssistant

‎content/docs/05-ai-sdk-ui/01-overview.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ Here is a comparison of the supported functions across these frameworks:
2727
| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> | <Check size={18} /> |
2828
| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
2929
| [useObject](/docs/reference/ai-sdk-ui/use-object) | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> | <Cross size={18} /> |
30-
| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | <Check size={18} /> | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> |
30+
| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Cross size={18} /> |
3131

3232
<Note>
3333
[Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are

‎content/docs/05-ai-sdk-ui/10-openai-assistants.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ The `useAssistant` hook allows you to handle the client state when interacting w
99
This hook is useful when you want to integrate assistant capabilities into your application,
1010
with the UI updated automatically as the assistant is streaming its execution.
1111

12-
The `useAssistant` hook is currently supported with `ai/react` and `ai/svelte`.
12+
The `useAssistant` hook is supported in `ai/react`, `ai/svelte`, and `ai/vue`.
1313

1414
## Example
1515

‎content/docs/07-reference/ai-sdk-ui/20-use-assistant.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ with the UI updated automatically as the assistant is streaming its execution.
1212
This works in conjunction with [`AssistantResponse`](./assistant-response) in the backend.
1313

1414
<Note>
15-
`useAssistant` is currently supported with `ai/react` and `ai/svelte`.
15+
`useAssistant` is supported in `ai/react`, `ai/svelte`, and `ai/vue`.
1616
</Note>
1717

1818
## Import

‎content/docs/07-reference/ai-sdk-ui/index.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ Here is a comparison of the supported functions across these frameworks:
6060
| [useChat](/docs/reference/ai-sdk-ui/use-chat) tool calling | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> | <Check size={18} /> |
6161
| [useCompletion](/docs/reference/ai-sdk-ui/use-completion) | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
6262
| [useObject](/docs/reference/ai-sdk-ui/use-object) | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> | <Cross size={18} /> |
63-
| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | <Check size={18} /> | <Check size={18} /> | <Cross size={18} /> | <Cross size={18} /> |
63+
| [useAssistant](/docs/reference/ai-sdk-ui/use-assistant) | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Cross size={18} /> |
6464

6565
<Note>
6666
[Contributions](https://github.com/vercel/ai/blob/main/CONTRIBUTING.md) are

‎examples/nuxt-openai/.env.example

+1
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
NUXT_OPENAI_API_KEY=xxxxxxx
2+
NUXT_ASSISTANT_ID=xxxxxxx

‎examples/nuxt-openai/nuxt.config.ts

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ export default defineNuxtConfig({
99

1010
runtimeConfig: {
1111
openaiApiKey: '',
12+
assistantId: '',
1213
},
1314

1415
compatibilityDate: '2024-07-05',

‎examples/nuxt-openai/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"@vue/shared": "^3.4.31",
2222
"ai": "latest",
2323
"nuxt": "^3.12.3",
24-
"openai": "4.52.3",
24+
"openai": "4.47.1",
2525
"tailwindcss": "^3.4.4",
2626
"ufo": "^1.5.3",
2727
"unctx": "^2.3.1",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
<script lang="ts" setup>
2+
import { useAssistant } from '@ai-sdk/vue';
3+
import type { Message } from '@ai-sdk/vue';
4+
5+
const roleToColorMap: Record<Message['role'], string> = {
6+
system: 'red',
7+
user: 'black',
8+
function: 'blue',
9+
tool: 'purple',
10+
assistant: 'green',
11+
data: 'orange',
12+
};
13+
14+
const { messages, status, input, handleSubmit, error, stop } = useAssistant({
15+
api: '/api/assistant',
16+
});
17+
18+
// Create a reference of the input element and focus on it when the component is mounted & the assistant status is 'awaiting_message'
19+
const inputRef = ref<HTMLInputElement | null>(null);
20+
21+
watchEffect(() => {
22+
if (inputRef.value && status.value === 'awaiting_message') {
23+
inputRef.value.focus();
24+
}
25+
});
26+
</script>
27+
28+
<template>
29+
<div class="flex flex-col w-full max-w-md py-24 mx-auto stretch">
30+
<!-- Render Assistant API errors if any -->
31+
<div
32+
class="relative px-6 py-4 text-white bg-red-500 rounded-md"
33+
v-if="error"
34+
>
35+
<span class="block sm:inline"> Error: {{ error?.toString() }} </span>
36+
</div>
37+
38+
<!-- Render Assistant Messages -->
39+
<div
40+
class="whitespace-pre-wrap"
41+
v-for="(message, index) in messages"
42+
:key="index"
43+
:style="{ color: roleToColorMap[message.role] }"
44+
>
45+
<strong>{{ `${message.role}: ` }}</strong>
46+
{{ message.role !== 'data' && message.content }}
47+
<template v-if="message.role === 'data'">
48+
{{ (message.data as any)?.description }}
49+
<br />
50+
<pre class="bg-gray-200">{{
51+
JSON.stringify(message.data, null, 2)
52+
}}</pre>
53+
</template>
54+
<br />
55+
<br />
56+
</div>
57+
58+
<!-- Render Assistant Status Indicator (In Progress) -->
59+
<div
60+
class="w-full h-8 max-w-md p-2 mb-8 bg-gray-300 rounded-lg dark:bg-gray-600 animate-pulse"
61+
v-if="status === 'in_progress'"
62+
></div>
63+
64+
<!-- Render Assistant Message Input Form -->
65+
<form @submit.prevent="(e) => handleSubmit(e as any)">
66+
<input
67+
ref="inputRef"
68+
:disabled="status === 'in_progress'"
69+
class="fixed w-full max-w-md p-2 mb-8 border border-gray-300 rounded shadow-xl bottom-14 ax-w-md"
70+
v-model="input"
71+
placeholder="What is the temperature in the living room?"
72+
/>
73+
</form>
74+
75+
<button
76+
@click="stop"
77+
:disabled="status === 'awaiting_message'"
78+
class="fixed bottom-0 w-full max-w-md p-2 mb-8 text-white bg-red-500 rounded-lg disabled:opacity-50"
79+
>
80+
Stop
81+
</button>
82+
</div>
83+
</template>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import { AssistantResponse } from 'ai';
2+
import OpenAI from 'openai';
3+
4+
type AssistantRequest = {
5+
threadId: string | null;
6+
message: string;
7+
};
8+
9+
// Allow streaming responses up to 30 seconds
10+
export const maxDuration = 30;
11+
12+
export default defineLazyEventHandler(async () => {
13+
// Validate the OpenAI API key and Assistant ID are set
14+
const apiKey = useRuntimeConfig().openaiApiKey;
15+
if (!apiKey)
16+
throw new Error('Missing OpenAI API key, `NUXT_OPEN_API_KEY` not set');
17+
18+
const assistantId = useRuntimeConfig().assistantId;
19+
if (!assistantId)
20+
throw new Error('Missing Assistant ID, `NUXT_ASSISTANT_ID` not set');
21+
22+
// Create an OpenAI API client (that's edge friendly!)
23+
const openai = new OpenAI({ apiKey });
24+
25+
const homeTemperatures = {
26+
bedroom: 20,
27+
'home office': 21,
28+
'living room': 21,
29+
kitchen: 22,
30+
bathroom: 23,
31+
};
32+
33+
return defineEventHandler(async (event: any) => {
34+
const { threadId: userThreadId, message }: AssistantRequest =
35+
await readBody(event);
36+
37+
// Extract the signal from the H3 request if available
38+
const signal = event?.web?.request?.signal;
39+
40+
// Create a thread if needed
41+
const threadId = userThreadId ?? (await openai.beta.threads.create({})).id;
42+
43+
// Add a message to the thread
44+
const createdMessage = await openai.beta.threads.messages.create(
45+
threadId,
46+
{
47+
role: 'user',
48+
content: message,
49+
},
50+
{ signal },
51+
);
52+
53+
return AssistantResponse(
54+
{ threadId, messageId: createdMessage.id },
55+
async ({ forwardStream, sendDataMessage }) => {
56+
// Run the assistant on the thread
57+
const runStream = openai.beta.threads.runs.stream(
58+
threadId,
59+
{ assistant_id: assistantId },
60+
{ signal },
61+
);
62+
63+
// forward run status would stream message deltas
64+
let runResult = await forwardStream(runStream);
65+
66+
// status can be: queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired
67+
while (
68+
runResult?.status === 'requires_action' &&
69+
runResult?.required_action?.type === 'submit_tool_outputs'
70+
) {
71+
// Process the required action to submit tool outputs
72+
const tool_outputs =
73+
runResult.required_action.submit_tool_outputs.tool_calls.map(
74+
(toolCall: any) => {
75+
const parameters = JSON.parse(toolCall.function.arguments);
76+
77+
switch (toolCall.function.name) {
78+
case 'getRoomTemperature': {
79+
const room: keyof typeof homeTemperatures = parameters.room;
80+
const temperature = homeTemperatures[room];
81+
82+
return {
83+
tool_call_id: toolCall.id,
84+
output: temperature.toString(),
85+
};
86+
}
87+
88+
case 'setRoomTemperature': {
89+
const room: keyof typeof homeTemperatures = parameters.room;
90+
const oldTemperature = homeTemperatures[room];
91+
92+
homeTemperatures[room] = parameters.temperature;
93+
94+
sendDataMessage({
95+
role: 'data',
96+
data: {
97+
oldTemperature,
98+
newTemperature: parameters.temperature,
99+
description: `Temperature in the ${room} changed from ${oldTemperature} to ${parameters.temperature}`,
100+
},
101+
});
102+
103+
return {
104+
tool_call_id: toolCall.id,
105+
output: 'Temperature set successfully',
106+
};
107+
}
108+
default: {
109+
throw new Error(
110+
`Unknown tool call function: ${toolCall.function.name}`,
111+
);
112+
}
113+
}
114+
},
115+
);
116+
117+
// Submit the tool outputs
118+
runResult = await forwardStream(
119+
openai.beta.threads.runs.submitToolOutputsStream(
120+
threadId,
121+
runResult.id,
122+
{ tool_outputs },
123+
{ signal },
124+
),
125+
);
126+
}
127+
},
128+
);
129+
});
130+
});

‎packages/core/vue/index.ts

+7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import {
22
useChat as useChatVue,
33
useCompletion as useCompletionVue,
4+
useAssistant as useAssistantVue,
45
} from '@ai-sdk/vue';
56

67
/**
@@ -13,6 +14,11 @@ export const useChat = useChatVue;
1314
*/
1415
export const useCompletion = useCompletionVue;
1516

17+
/**
18+
* @deprecated Use `useAssistant` from `@ai-sdk/vue` instead.
19+
*/
20+
export const useAssistant = useAssistantVue;
21+
1622
/**
1723
* @deprecated Use `@ai-sdk/vue` instead.
1824
*/
@@ -21,4 +27,5 @@ export type {
2127
Message,
2228
UseChatOptions,
2329
UseChatHelpers,
30+
UseAssistantHelpers,
2431
} from '@ai-sdk/vue';

‎packages/vue/README.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,6 @@
22

33
[Vue.js](https://vuejs.org/) UI components for the [Vercel AI SDK](https://sdk.vercel.ai/docs):
44

5-
- [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) hook
6-
- [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) hook
5+
- [`useChat`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-chat) composable
6+
- [`useCompletion`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-completion) composable
7+
- [`useAssistant`](https://sdk.vercel.ai/docs/reference/ai-sdk-ui/use-assistant) composable

‎packages/vue/package.json

+1
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
}
2626
},
2727
"dependencies": {
28+
"@ai-sdk/provider-utils": "0.0.14",
2829
"@ai-sdk/ui-utils": "0.0.12",
2930
"swrv": "1.0.4"
3031
},
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
<script lang="ts" setup>
2+
import { useAssistant } from './use-assistant';
3+
4+
const { status, messages, append } = useAssistant({
5+
api: '/api/assistant'
6+
});
7+
</script>
8+
9+
<template>
10+
<div>
11+
<div data-testid="status">{{ status }}</div>
12+
<div v-for="(message, index) in messages" :data-testid="`message-${index}`" :key="index">
13+
{{ message.role === 'user' ? 'User: ' : 'AI: ' }}
14+
{{ message.content }}
15+
</div>
16+
<button data-testid="do-append" @click="append({ role: 'user', content: 'hi' })" />
17+
</div>
18+
</template>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
<script setup lang="ts">
2+
import { useAssistant } from './use-assistant';
3+
4+
const { status, messages, error, append, setThreadId, threadId } = useAssistant({
5+
api: '/api/assistant'
6+
});
7+
</script>
8+
9+
<template>
10+
<div>
11+
<div data-testid="status">{{ status }}</div>
12+
<div data-testid="thread-id">{{ threadId || 'undefined' }}</div>
13+
<div data-testid="error">{{ error?.toString() }}</div>
14+
<div v-for="(message, index) in messages" :data-testid="`message-${index}`" :key="index">
15+
{{ message.role === 'user' ? 'User: ' : 'AI: ' }}
16+
{{ message.content }}
17+
</div>
18+
<button data-testid="do-append" @click="append({ role: 'user', content: 'hi' })" />
19+
<button data-testid="do-new-thread" @click="setThreadId(undefined)" />
20+
<button data-testid="do-thread-3" @click="setThreadId('t3')" />
21+
</div>
22+
</template>

‎packages/vue/src/index.ts

+1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
export * from './use-chat';
22
export * from './use-completion';
3+
export * from './use-assistant';

‎packages/vue/src/use-assistant.ts

+305
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,305 @@
1+
/**
2+
* A vue.js composable function to interact with the assistant API.
3+
*/
4+
5+
import { isAbortError } from '@ai-sdk/provider-utils';
6+
import { readDataStream, generateId } from '@ai-sdk/ui-utils';
7+
import type {
8+
AssistantStatus,
9+
CreateMessage,
10+
Message,
11+
UseAssistantOptions,
12+
} from '@ai-sdk/ui-utils';
13+
import { computed, readonly, ref } from 'vue';
14+
import type { ComputedRef, Ref } from 'vue';
15+
16+
export type UseAssistantHelpers = {
17+
/**
18+
* The current array of chat messages.
19+
*/
20+
messages: Ref<Message[]>;
21+
22+
/**
23+
* Update the message store with a new array of messages.
24+
*/
25+
setMessages: (messagesProcessor: (messages: Message[]) => Message[]) => void;
26+
27+
/**
28+
* The current thread ID.
29+
*/
30+
threadId: Ref<string | undefined>;
31+
32+
/**
33+
* Set the current thread ID. Specifying a thread ID will switch to that thread, if it exists. If set to 'undefined', a new thread will be created. For both cases, `threadId` will be updated with the new value and `messages` will be cleared.
34+
*/
35+
setThreadId: (threadId: string | undefined) => void;
36+
/**
37+
* The current value of the input field.
38+
*/
39+
input: Ref<string>;
40+
41+
/**
42+
* Append a user message to the chat list. This triggers the API call to fetch
43+
* the assistant's response.
44+
* @param message The message to append
45+
* @param requestOptions Additional options to pass to the API call
46+
*/
47+
append: (
48+
message: Message | CreateMessage,
49+
requestOptions?: {
50+
data?: Record<string, string>;
51+
},
52+
) => Promise<void>;
53+
54+
/**
55+
* Abort the current request immediately, keep the generated tokens if any.
56+
*/
57+
stop: ComputedRef<() => void>;
58+
59+
/**
60+
* Handler for the `onChange` event of the input field to control the input's value.
61+
*/
62+
handleInputChange: (e: Event & { target: HTMLInputElement }) => void;
63+
64+
/**
65+
* Handler for the `onSubmit` event of the form to append a user message and reset the input.
66+
*/
67+
handleSubmit: (e: Event & { target: HTMLFormElement }) => void;
68+
69+
/**
70+
* Whether the assistant is currently sending a message.
71+
*/
72+
isSending: ComputedRef<boolean>;
73+
74+
/**
75+
* The current status of the assistant.
76+
*/
77+
status: Ref<AssistantStatus>;
78+
79+
/**
80+
* The current error, if any.
81+
*/
82+
error: Ref<Error | undefined>;
83+
};
84+
85+
export function useAssistant({
86+
api,
87+
threadId: threadIdParam,
88+
credentials,
89+
headers,
90+
body,
91+
onError,
92+
}: UseAssistantOptions): UseAssistantHelpers {
93+
const messages: Ref<Message[]> = ref([]);
94+
const input: Ref<string> = ref('');
95+
const currentThreadId: Ref<string | undefined> = ref(undefined);
96+
const status: Ref<AssistantStatus> = ref('awaiting_message');
97+
const error: Ref<undefined | Error> = ref(undefined);
98+
99+
const setMessages = (messageFactory: (messages: Message[]) => Message[]) => {
100+
messages.value = messageFactory(messages.value);
101+
};
102+
103+
const setCurrentThreadId = (newThreadId: string | undefined) => {
104+
currentThreadId.value = newThreadId;
105+
messages.value = [];
106+
};
107+
108+
const handleInputChange = (event: Event & { target: HTMLInputElement }) => {
109+
input.value = event?.target?.value;
110+
};
111+
112+
const isSending = computed(() => status.value === 'in_progress');
113+
114+
// Abort controller to cancel the current API call when required
115+
const abortController = ref<AbortController | null>(null);
116+
117+
// memoized function to stop the current request when required
118+
const stop = computed(() => {
119+
return () => {
120+
if (abortController.value) {
121+
abortController.value.abort();
122+
abortController.value = null;
123+
}
124+
};
125+
});
126+
127+
const append = async (
128+
message: Message | CreateMessage,
129+
requestOptions?: {
130+
data?: Record<string, string>;
131+
},
132+
) => {
133+
status.value = 'in_progress';
134+
135+
// Append the new message to the current list of messages
136+
const newMessage: Message = {
137+
...message,
138+
id: message.id ?? generateId(),
139+
};
140+
141+
// Update the messages list with the new message
142+
setMessages(messages => [...messages, newMessage]);
143+
144+
input.value = '';
145+
146+
const controller = new AbortController();
147+
148+
try {
149+
// Assign the new controller to the abortController ref
150+
abortController.value = controller;
151+
152+
const response = await fetch(api, {
153+
method: 'POST',
154+
headers: {
155+
'Content-Type': 'application/json',
156+
...headers,
157+
},
158+
body: JSON.stringify({
159+
...body,
160+
// Message Content
161+
message: message.content,
162+
163+
// Always Use User Provided Thread ID When Available
164+
threadId: threadIdParam ?? currentThreadId.value ?? null,
165+
166+
// Optional Request Data
167+
...(requestOptions?.data && { data: requestOptions?.data }),
168+
}),
169+
signal: controller.signal,
170+
credentials,
171+
});
172+
173+
if (!response.ok) {
174+
throw new Error(
175+
response.statusText ?? 'An error occurred while sending the message',
176+
);
177+
}
178+
179+
if (!response.body) {
180+
throw new Error('The response body is empty');
181+
}
182+
183+
for await (const { type, value } of readDataStream(
184+
response.body.getReader(),
185+
)) {
186+
switch (type) {
187+
case 'assistant_message': {
188+
messages.value = [
189+
...messages.value,
190+
{
191+
id: value.id,
192+
content: value.content[0].text.value,
193+
role: value.role,
194+
},
195+
];
196+
break;
197+
}
198+
case 'assistant_control_data': {
199+
if (value.threadId) {
200+
currentThreadId.value = value.threadId;
201+
}
202+
203+
setMessages(messages => {
204+
const lastMessage = messages[messages.length - 1];
205+
lastMessage.id = value.messageId;
206+
207+
return [...messages.slice(0, -1), lastMessage];
208+
});
209+
210+
break;
211+
}
212+
213+
case 'text': {
214+
setMessages(messages => {
215+
const lastMessage = messages[messages.length - 1];
216+
lastMessage.content += value;
217+
218+
return [...messages.slice(0, -1), lastMessage];
219+
});
220+
221+
break;
222+
}
223+
224+
case 'data_message': {
225+
setMessages(messages => [
226+
...messages,
227+
{
228+
id: value.id ?? generateId(),
229+
role: 'data',
230+
content: '',
231+
data: value.data,
232+
},
233+
]);
234+
break;
235+
}
236+
237+
case 'error': {
238+
error.value = new Error(value);
239+
}
240+
241+
default: {
242+
console.error('Unknown message type:', type);
243+
break;
244+
}
245+
}
246+
}
247+
} catch (err) {
248+
// If the error is an AbortError and the signal is aborted, reset the abortController and do nothing.
249+
if (isAbortError(err) && abortController.value?.signal.aborted) {
250+
abortController.value = null;
251+
return;
252+
}
253+
254+
// If an error handler is provided, call it with the error
255+
if (onError && err instanceof Error) {
256+
onError(err);
257+
}
258+
259+
error.value = err as Error;
260+
} finally {
261+
// Reset the status to 'awaiting_message' after the request is complete
262+
abortController.value = null;
263+
status.value = 'awaiting_message';
264+
}
265+
};
266+
267+
const submitMessage = async (
268+
event: Event & { target: HTMLFormElement },
269+
requestOptions?: {
270+
data?: Record<string, string>;
271+
},
272+
) => {
273+
event?.preventDefault?.();
274+
275+
if (!input.value) return;
276+
277+
append(
278+
{
279+
role: 'user',
280+
content: input.value,
281+
},
282+
requestOptions,
283+
);
284+
};
285+
286+
return {
287+
append,
288+
messages,
289+
setMessages,
290+
threadId: readonly(currentThreadId),
291+
setThreadId: setCurrentThreadId,
292+
input,
293+
handleInputChange,
294+
handleSubmit: submitMessage,
295+
isSending,
296+
status,
297+
error,
298+
stop,
299+
};
300+
}
301+
302+
/**
303+
* @deprecated Use `useAssistant` instead.
304+
*/
305+
export const experimental_useAssistant = useAssistant;
+272
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,272 @@
1+
import { formatStreamPart } from '@ai-sdk/ui-utils';
2+
import {
3+
mockFetchDataStream,
4+
mockFetchDataStreamWithGenerator,
5+
} from '@ai-sdk/ui-utils/test';
6+
import '@testing-library/jest-dom/vitest';
7+
import { cleanup, findByText, render, screen } from '@testing-library/vue';
8+
import userEvent from '@testing-library/user-event';
9+
import TestChatAssistantStreamComponent from './TestChatAssistantStreamComponent.vue';
10+
import TestChatAssistantThreadChangeComponent from './TestChatAssistantThreadChangeComponent.vue';
11+
12+
describe('stream data stream', () => {
13+
// Render the TestChatAssistantStreamComponent before each test
14+
beforeEach(() => {
15+
render(TestChatAssistantStreamComponent);
16+
});
17+
18+
// Cleanup after each test
19+
afterEach(() => {
20+
vi.restoreAllMocks();
21+
cleanup();
22+
});
23+
24+
it('should show streamed response', async () => {
25+
// Mock the fetch data stream
26+
const { requestBody } = mockFetchDataStream({
27+
url: 'https://example.com/api/assistant',
28+
chunks: [
29+
// Format the stream part
30+
formatStreamPart('assistant_control_data', {
31+
threadId: 't0',
32+
messageId: 'm0',
33+
}),
34+
formatStreamPart('assistant_message', {
35+
id: 'm0',
36+
role: 'assistant',
37+
content: [{ type: 'text', text: { value: '' } }],
38+
}),
39+
// Text parts
40+
'0:"Hello"\n',
41+
'0:", world"\n',
42+
'0:"."\n',
43+
],
44+
});
45+
46+
// Click the button
47+
await userEvent.click(screen.getByTestId('do-append'));
48+
49+
// Find the message-0 element
50+
await screen.findByTestId('message-0');
51+
// Expect the message-0 element to have the text content 'User: hi'
52+
expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi');
53+
54+
// Find the message-1 element
55+
await screen.findByTestId('message-1');
56+
// Expect the message-1 element to have the text content 'AI: Hello, world.'
57+
expect(screen.getByTestId('message-1')).toHaveTextContent(
58+
'AI: Hello, world.',
59+
);
60+
61+
expect(await requestBody).toStrictEqual(
62+
JSON.stringify({
63+
message: 'hi',
64+
threadId: null,
65+
}),
66+
);
67+
});
68+
69+
describe('loading state', () => {
70+
it('should show loading state', async () => {
71+
let finishGeneration: ((value?: unknown) => void) | undefined;
72+
73+
const finishGenerationPromise = new Promise(resolve => {
74+
finishGeneration = resolve;
75+
});
76+
77+
// Mock the fetch data stream with generator
78+
mockFetchDataStreamWithGenerator({
79+
url: 'https://example.com/api/assistant',
80+
chunkGenerator: (async function* generate() {
81+
const encoder = new TextEncoder();
82+
83+
yield encoder.encode(
84+
formatStreamPart('assistant_control_data', {
85+
threadId: 't0',
86+
messageId: 'm1',
87+
}),
88+
);
89+
90+
yield encoder.encode(
91+
formatStreamPart('assistant_message', {
92+
id: 'm1',
93+
role: 'assistant',
94+
content: [{ type: 'text', text: { value: '' } }],
95+
}),
96+
);
97+
98+
yield encoder.encode('0:"Hello"\n');
99+
100+
await finishGenerationPromise;
101+
})(),
102+
});
103+
104+
// Click the button
105+
await userEvent.click(screen.getByTestId('do-append'));
106+
107+
// Find the loading element and expect it to be in progress
108+
await screen.findByTestId('status');
109+
expect(screen.getByTestId('status')).toHaveTextContent('in_progress');
110+
111+
// Resolve the finishGenerationPromise
112+
finishGeneration?.();
113+
114+
// Find the loading element and expect it to be awaiting a message
115+
await findByText(await screen.findByTestId('status'), 'awaiting_message');
116+
expect(screen.getByTestId('status')).toHaveTextContent(
117+
'awaiting_message',
118+
);
119+
});
120+
});
121+
});
122+
123+
describe('Thread management', () => {
124+
beforeEach(() => {
125+
render(TestChatAssistantThreadChangeComponent);
126+
});
127+
128+
afterEach(() => {
129+
vi.restoreAllMocks();
130+
cleanup();
131+
});
132+
133+
it('create new thread', async () => {
134+
await screen.findByTestId('thread-id');
135+
expect(screen.getByTestId('thread-id')).toHaveTextContent('undefined');
136+
});
137+
138+
it('should show streamed response', async () => {
139+
const { requestBody } = mockFetchDataStream({
140+
url: 'https://example.com/api/assistant',
141+
chunks: [
142+
formatStreamPart('assistant_control_data', {
143+
threadId: 't0',
144+
messageId: 'm0',
145+
}),
146+
formatStreamPart('assistant_message', {
147+
id: 'm0',
148+
role: 'assistant',
149+
content: [{ type: 'text', text: { value: '' } }],
150+
}),
151+
// text parts:
152+
'0:"Hello"\n',
153+
'0:","\n',
154+
'0:" world"\n',
155+
'0:"."\n',
156+
],
157+
});
158+
159+
await userEvent.click(screen.getByTestId('do-append'));
160+
161+
await screen.findByTestId('message-0');
162+
expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi');
163+
164+
expect(screen.getByTestId('thread-id')).toHaveTextContent('t0');
165+
166+
await screen.findByTestId('message-1');
167+
expect(screen.getByTestId('message-1')).toHaveTextContent(
168+
'AI: Hello, world.',
169+
);
170+
171+
expect(await requestBody).toStrictEqual(
172+
JSON.stringify({
173+
message: 'hi',
174+
threadId: null,
175+
}),
176+
);
177+
});
178+
179+
it('should switch to new thread on setting undefined threadId', async () => {
180+
await userEvent.click(screen.getByTestId('do-new-thread'));
181+
182+
expect(screen.queryByTestId('message-0')).toBeNull();
183+
expect(screen.queryByTestId('message-1')).toBeNull();
184+
185+
const { requestBody } = mockFetchDataStream({
186+
url: 'https://example.com/api/assistant',
187+
chunks: [
188+
formatStreamPart('assistant_control_data', {
189+
threadId: 't1',
190+
messageId: 'm0',
191+
}),
192+
formatStreamPart('assistant_message', {
193+
id: 'm0',
194+
role: 'assistant',
195+
content: [{ type: 'text', text: { value: '' } }],
196+
}),
197+
// text parts:
198+
'0:"Hello"\n',
199+
'0:","\n',
200+
'0:" world"\n',
201+
'0:"."\n',
202+
],
203+
});
204+
205+
await userEvent.click(screen.getByTestId('do-append'));
206+
207+
await screen.findByTestId('message-0');
208+
expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi');
209+
210+
expect(screen.getByTestId('thread-id')).toHaveTextContent('t1');
211+
212+
await screen.findByTestId('message-1');
213+
expect(screen.getByTestId('message-1')).toHaveTextContent(
214+
'AI: Hello, world.',
215+
);
216+
217+
// check that correct information was sent to the server:
218+
expect(await requestBody).toStrictEqual(
219+
JSON.stringify({
220+
message: 'hi',
221+
threadId: null,
222+
}),
223+
);
224+
});
225+
226+
it('should switch to thread on setting previously created threadId', async () => {
227+
await userEvent.click(screen.getByTestId('do-thread-3'));
228+
229+
expect(screen.queryByTestId('message-0')).toBeNull();
230+
expect(screen.queryByTestId('message-1')).toBeNull();
231+
232+
const { requestBody } = mockFetchDataStream({
233+
url: 'https://example.com/api/assistant',
234+
chunks: [
235+
formatStreamPart('assistant_control_data', {
236+
threadId: 't3',
237+
messageId: 'm0',
238+
}),
239+
formatStreamPart('assistant_message', {
240+
id: 'm0',
241+
role: 'assistant',
242+
content: [{ type: 'text', text: { value: '' } }],
243+
}),
244+
// text parts:
245+
'0:"Hello"\n',
246+
'0:","\n',
247+
'0:" world"\n',
248+
'0:"."\n',
249+
],
250+
});
251+
252+
await userEvent.click(screen.getByTestId('do-append'));
253+
254+
await screen.findByTestId('message-0');
255+
expect(screen.getByTestId('message-0')).toHaveTextContent('User: hi');
256+
257+
expect(screen.getByTestId('thread-id')).toHaveTextContent('t3');
258+
259+
await screen.findByTestId('message-1');
260+
expect(screen.getByTestId('message-1')).toHaveTextContent(
261+
'AI: Hello, world.',
262+
);
263+
264+
// check that correct information was sent to the server:
265+
expect(await requestBody).toStrictEqual(
266+
JSON.stringify({
267+
message: 'hi',
268+
threadId: 't3',
269+
}),
270+
);
271+
});
272+
});

‎pnpm-lock.yaml

+28-19
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)
Please sign in to comment.