Skip to content

Commit 25f3350

Browse files
authoredApr 23, 2024··
ai/core: expose raw response headers (#1417)
1 parent d6431ae commit 25f3350

27 files changed

+543
-133
lines changed
 

‎.changeset/short-seas-flash.md

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
---
2+
'@ai-sdk/provider-utils': patch
3+
'@ai-sdk/anthropic': patch
4+
'@ai-sdk/provider': patch
5+
'@ai-sdk/mistral': patch
6+
'@ai-sdk/google': patch
7+
'@ai-sdk/openai': patch
8+
'ai': patch
9+
---
10+
11+
ai/core: add support for getting raw response headers.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { experimental_streamText } from 'ai';
3+
import dotenv from 'dotenv';
4+
5+
dotenv.config();
6+
7+
async function main() {
8+
const result = await experimental_streamText({
9+
model: openai('gpt-3.5-turbo'),
10+
maxTokens: 512,
11+
temperature: 0.3,
12+
maxRetries: 5,
13+
prompt: 'Invent a new holiday and describe its traditions.',
14+
});
15+
16+
console.log(`Request ID: ${result.rawResponse?.headers?.['x-request-id']}`);
17+
console.log();
18+
19+
for await (const textPart of result.textStream) {
20+
process.stdout.write(textPart);
21+
}
22+
}
23+
24+
main().catch(console.error);

‎packages/anthropic/src/anthropic-messages-language-model.test.ts

+47-4
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,7 @@ const TEST_PROMPT: LanguageModelV1Prompt = [
1111
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
1212
];
1313

14-
const provider = createAnthropic({
15-
apiKey: 'test-api-key',
16-
});
17-
14+
const provider = createAnthropic({ apiKey: 'test-api-key' });
1815
const model = provider.chat('claude-3-haiku-20240307');
1916

2017
describe('doGenerate', () => {
@@ -181,6 +178,28 @@ describe('doGenerate', () => {
181178
});
182179
});
183180

181+
it('should expose the raw response headers', async () => {
182+
prepareJsonResponse({});
183+
184+
server.responseHeaders = {
185+
'test-header': 'test-value',
186+
};
187+
188+
const { rawResponse } = await model.doGenerate({
189+
inputFormat: 'prompt',
190+
mode: { type: 'regular' },
191+
prompt: TEST_PROMPT,
192+
});
193+
194+
expect(rawResponse?.headers).toStrictEqual({
195+
// default headers:
196+
'content-type': 'application/json',
197+
198+
// custom header
199+
'test-header': 'test-value',
200+
});
201+
});
202+
184203
it('should pass the model and the messages', async () => {
185204
prepareJsonResponse({});
186205

@@ -279,6 +298,30 @@ describe('doStream', () => {
279298
]);
280299
});
281300

301+
it('should expose the raw response headers', async () => {
302+
prepareStreamResponse({ content: [] });
303+
304+
server.responseHeaders = {
305+
'test-header': 'test-value',
306+
};
307+
308+
const { rawResponse } = await model.doStream({
309+
inputFormat: 'prompt',
310+
mode: { type: 'regular' },
311+
prompt: TEST_PROMPT,
312+
});
313+
314+
expect(rawResponse?.headers).toStrictEqual({
315+
// default headers:
316+
'content-type': 'text/event-stream',
317+
'cache-control': 'no-cache',
318+
connection: 'keep-alive',
319+
320+
// custom header
321+
'test-header': 'test-value',
322+
});
323+
});
324+
282325
it('should pass the messages and the model', async () => {
283326
prepareStreamResponse({ content: [] });
284327

‎packages/anthropic/src/anthropic-messages-language-model.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
164164
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
165165
const { args, warnings } = this.getArgs(options);
166166

167-
const response = await postJsonToApi({
167+
const { responseHeaders, value: response } = await postJsonToApi({
168168
url: `${this.config.baseURL}/messages`,
169169
headers: this.config.headers(),
170170
body: args,
@@ -210,6 +210,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
210210
completionTokens: response.usage.output_tokens,
211211
},
212212
rawCall: { rawPrompt, rawSettings },
213+
rawResponse: { headers: responseHeaders },
213214
warnings,
214215
};
215216
}
@@ -219,7 +220,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
219220
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
220221
const { args, warnings } = this.getArgs(options);
221222

222-
const response = await postJsonToApi({
223+
const { responseHeaders, value: response } = await postJsonToApi({
223224
url: `${this.config.baseURL}/messages`,
224225
headers: this.config.headers(),
225226
body: {
@@ -296,6 +297,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
296297
}),
297298
),
298299
rawCall: { rawPrompt, rawSettings },
300+
rawResponse: { headers: responseHeaders },
299301
warnings,
300302
};
301303
}

‎packages/core/core/generate-object/generate-object.ts

+19
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ Default and recommended: 'auto' (best mode for the model).
9494
let finishReason: LanguageModelV1FinishReason;
9595
let usage: Parameters<typeof calculateTokenUsage>[0];
9696
let warnings: LanguageModelV1CallWarning[] | undefined;
97+
let rawResponse: { headers?: Record<string, string> } | undefined;
9798
let logprobs: LanguageModelV1LogProbs | undefined;
9899

99100
switch (mode) {
@@ -122,6 +123,7 @@ Default and recommended: 'auto' (best mode for the model).
122123
finishReason = generateResult.finishReason;
123124
usage = generateResult.usage;
124125
warnings = generateResult.warnings;
126+
rawResponse = generateResult.rawResponse;
125127
logprobs = generateResult.logprobs;
126128

127129
break;
@@ -152,6 +154,7 @@ Default and recommended: 'auto' (best mode for the model).
152154
finishReason = generateResult.finishReason;
153155
usage = generateResult.usage;
154156
warnings = generateResult.warnings;
157+
rawResponse = generateResult.rawResponse;
155158
logprobs = generateResult.logprobs;
156159

157160
break;
@@ -192,6 +195,7 @@ Default and recommended: 'auto' (best mode for the model).
192195
finishReason = generateResult.finishReason;
193196
usage = generateResult.usage;
194197
warnings = generateResult.warnings;
198+
rawResponse = generateResult.rawResponse;
195199
logprobs = generateResult.logprobs;
196200

197201
break;
@@ -218,6 +222,7 @@ Default and recommended: 'auto' (best mode for the model).
218222
finishReason,
219223
usage: calculateTokenUsage(usage),
220224
warnings,
225+
rawResponse,
221226
logprobs,
222227
});
223228
}
@@ -246,6 +251,16 @@ Warnings from the model provider (e.g. unsupported settings)
246251
*/
247252
readonly warnings: LanguageModelV1CallWarning[] | undefined;
248253

254+
/**
255+
Optional raw response data.
256+
*/
257+
rawResponse?: {
258+
/**
259+
Response headers.
260+
*/
261+
headers?: Record<string, string>;
262+
};
263+
249264
/**
250265
Logprobs for the completion.
251266
`undefined` if the mode does not support logprobs or if was not enabled
@@ -257,12 +272,16 @@ Logprobs for the completion.
257272
finishReason: LanguageModelV1FinishReason;
258273
usage: TokenUsage;
259274
warnings: LanguageModelV1CallWarning[] | undefined;
275+
rawResponse?: {
276+
headers?: Record<string, string>;
277+
};
260278
logprobs: LanguageModelV1LogProbs | undefined;
261279
}) {
262280
this.object = options.object;
263281
this.finishReason = options.finishReason;
264282
this.usage = options.usage;
265283
this.warnings = options.warnings;
284+
this.rawResponse = options.rawResponse;
266285
this.logprobs = options.logprobs;
267286
}
268287
}

‎packages/core/core/generate-object/stream-object.ts

+16
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ Default and recommended: 'auto' (best mode for the model).
220220
return new StreamObjectResult({
221221
stream: result.stream.pipeThrough(new TransformStream(transformer)),
222222
warnings: result.warnings,
223+
rawResponse: result.rawResponse,
223224
});
224225
}
225226

@@ -259,15 +260,30 @@ Warnings from the model provider (e.g. unsupported settings)
259260
*/
260261
readonly warnings: LanguageModelV1CallWarning[] | undefined;
261262

263+
/**
264+
Optional raw response data.
265+
*/
266+
rawResponse?: {
267+
/**
268+
Response headers.
269+
*/
270+
headers?: Record<string, string>;
271+
};
272+
262273
constructor({
263274
stream,
264275
warnings,
276+
rawResponse,
265277
}: {
266278
stream: ReadableStream<string | ObjectStreamPartInput>;
267279
warnings: LanguageModelV1CallWarning[] | undefined;
280+
rawResponse?: {
281+
headers?: Record<string, string>;
282+
};
268283
}) {
269284
this.originalStream = stream;
270285
this.warnings = warnings;
286+
this.rawResponse = rawResponse;
271287
}
272288

273289
get partialObjectStream(): AsyncIterableStream<DeepPartial<T>> {

‎packages/core/core/generate-text/generate-text.ts

+15
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ The tools that the model can call. The model needs to support calling tools.
116116
finishReason: modelResponse.finishReason,
117117
usage: calculateTokenUsage(modelResponse.usage),
118118
warnings: modelResponse.warnings,
119+
rawResponse: modelResponse.rawResponse,
119120
logprobs: modelResponse.logprobs,
120121
});
121122
}
@@ -188,6 +189,16 @@ Warnings from the model provider (e.g. unsupported settings)
188189
*/
189190
readonly warnings: LanguageModelV1CallWarning[] | undefined;
190191

192+
/**
193+
Optional raw response data.
194+
*/
195+
rawResponse?: {
196+
/**
197+
Response headers.
198+
*/
199+
headers?: Record<string, string>;
200+
};
201+
191202
/**
192203
Logprobs for the completion.
193204
`undefined` if the mode does not support logprobs or if was not enabled
@@ -201,6 +212,9 @@ Logprobs for the completion.
201212
finishReason: LanguageModelV1FinishReason;
202213
usage: TokenUsage;
203214
warnings: LanguageModelV1CallWarning[] | undefined;
215+
rawResponse?: {
216+
headers?: Record<string, string>;
217+
};
204218
logprobs: LanguageModelV1LogProbs | undefined;
205219
}) {
206220
this.text = options.text;
@@ -209,6 +223,7 @@ Logprobs for the completion.
209223
this.finishReason = options.finishReason;
210224
this.usage = options.usage;
211225
this.warnings = options.warnings;
226+
this.rawResponse = options.rawResponse;
212227
this.logprobs = options.logprobs;
213228
}
214229
}

‎packages/core/core/generate-text/stream-text.test.ts

+1-2
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,8 @@ import { convertArrayToReadableStream } from '../test/convert-array-to-readable-
44
import { convertAsyncIterableToArray } from '../test/convert-async-iterable-to-array';
55
import { convertReadableStreamToArray } from '../test/convert-readable-stream-to-array';
66
import { MockLanguageModelV1 } from '../test/mock-language-model-v1';
7-
import { experimental_streamText } from './stream-text';
8-
import { ServerResponse } from 'node:http';
97
import { createMockServerResponse } from '../test/mock-server-response';
8+
import { experimental_streamText } from './stream-text';
109

1110
describe('result.textStream', () => {
1211
it('should send text deltas', async () => {

‎packages/core/core/generate-text/stream-text.ts

+17-1
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ The tools that the model can call. The model needs to support calling tools.
8585
}): Promise<StreamTextResult<TOOLS>> {
8686
const retry = retryWithExponentialBackoff({ maxRetries });
8787
const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
88-
const { stream, warnings } = await retry(() =>
88+
const { stream, warnings, rawResponse } = await retry(() =>
8989
model.doStream({
9090
mode: {
9191
type: 'regular',
@@ -112,6 +112,7 @@ The tools that the model can call. The model needs to support calling tools.
112112
generatorStream: stream,
113113
}),
114114
warnings,
115+
rawResponse,
115116
});
116117
}
117118

@@ -152,15 +153,30 @@ Warnings from the model provider (e.g. unsupported settings)
152153
*/
153154
readonly warnings: LanguageModelV1CallWarning[] | undefined;
154155

156+
/**
157+
Optional raw response data.
158+
*/
159+
rawResponse?: {
160+
/**
161+
Response headers.
162+
*/
163+
headers?: Record<string, string>;
164+
};
165+
155166
constructor({
156167
stream,
157168
warnings,
169+
rawResponse,
158170
}: {
159171
stream: ReadableStream<TextStreamPart<TOOLS>>;
160172
warnings: LanguageModelV1CallWarning[] | undefined;
173+
rawResponse?: {
174+
headers?: Record<string, string>;
175+
};
161176
}) {
162177
this.originalStream = stream;
163178
this.warnings = warnings;
179+
this.rawResponse = rawResponse;
164180
}
165181

166182
/**

‎packages/google/src/google-generative-ai-language-model.test.ts

+46
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,28 @@ describe('doGenerate', () => {
127127
expect(finishReason).toStrictEqual('tool-calls');
128128
});
129129

130+
it('should expose the raw response headers', async () => {
131+
prepareJsonResponse({ content: '' });
132+
133+
server.responseHeaders = {
134+
'test-header': 'test-value',
135+
};
136+
137+
const { rawResponse } = await model.doGenerate({
138+
inputFormat: 'prompt',
139+
mode: { type: 'regular' },
140+
prompt: TEST_PROMPT,
141+
});
142+
143+
expect(rawResponse?.headers).toStrictEqual({
144+
// default headers:
145+
'content-type': 'application/json',
146+
147+
// custom header
148+
'test-header': 'test-value',
149+
});
150+
});
151+
130152
it('should pass the model and the messages', async () => {
131153
prepareJsonResponse({ content: '' });
132154

@@ -225,6 +247,30 @@ describe('doStream', () => {
225247
]);
226248
});
227249

250+
it('should expose the raw response headers', async () => {
251+
prepareStreamResponse({ content: [] });
252+
253+
server.responseHeaders = {
254+
'test-header': 'test-value',
255+
};
256+
257+
const { rawResponse } = await model.doStream({
258+
inputFormat: 'prompt',
259+
mode: { type: 'regular' },
260+
prompt: TEST_PROMPT,
261+
});
262+
263+
expect(rawResponse?.headers).toStrictEqual({
264+
// default headers:
265+
'content-type': 'text/event-stream',
266+
'cache-control': 'no-cache',
267+
connection: 'keep-alive',
268+
269+
// custom header
270+
'test-header': 'test-value',
271+
});
272+
});
273+
228274
it('should pass the messages', async () => {
229275
prepareStreamResponse({ content: [''] });
230276

‎packages/google/src/google-generative-ai-language-model.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 {
151151
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
152152
const { args, warnings } = this.getArgs(options);
153153

154-
const response = await postJsonToApi({
154+
const { responseHeaders, value: response } = await postJsonToApi({
155155
url: `${this.config.baseURL}/${this.modelId}:generateContent`,
156156
headers: this.config.headers(),
157157
body: args,
@@ -180,6 +180,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 {
180180
completionTokens: candidate.tokenCount ?? NaN,
181181
},
182182
rawCall: { rawPrompt, rawSettings },
183+
rawResponse: { headers: responseHeaders },
183184
warnings,
184185
};
185186
}
@@ -189,7 +190,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 {
189190
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
190191
const { args, warnings } = this.getArgs(options);
191192

192-
const response = await postJsonToApi({
193+
const { responseHeaders, value: response } = await postJsonToApi({
193194
url: `${this.config.baseURL}/${this.modelId}:streamGenerateContent?alt=sse`,
194195
headers: this.config.headers(),
195196
body: args,
@@ -287,6 +288,7 @@ export class GoogleGenerativeAILanguageModel implements LanguageModelV1 {
287288
}),
288289
),
289290
rawCall: { rawPrompt, rawSettings },
291+
rawResponse: { headers: responseHeaders },
290292
warnings,
291293
};
292294
}

‎packages/mistral/src/mistral-chat-language-model.test.ts

+52-5
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ const TEST_PROMPT: LanguageModelV1Prompt = [
1111
];
1212

1313
const provider = createMistral({ apiKey: 'test-api-key' });
14+
const model = provider.chat('mistral-small-latest');
1415

1516
describe('doGenerate', () => {
1617
const server = new JsonTestServer(
@@ -58,7 +59,7 @@ describe('doGenerate', () => {
5859
it('should extract text response', async () => {
5960
prepareJsonResponse({ content: 'Hello, World!' });
6061

61-
const { text } = await provider.chat('mistral-small-latest').doGenerate({
62+
const { text } = await model.doGenerate({
6263
inputFormat: 'prompt',
6364
mode: { type: 'regular' },
6465
prompt: TEST_PROMPT,
@@ -73,7 +74,7 @@ describe('doGenerate', () => {
7374
usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
7475
});
7576

76-
const { usage } = await provider.chat('mistral-small-latest').doGenerate({
77+
const { usage } = await model.doGenerate({
7778
inputFormat: 'prompt',
7879
mode: { type: 'regular' },
7980
prompt: TEST_PROMPT,
@@ -85,10 +86,32 @@ describe('doGenerate', () => {
8586
});
8687
});
8788

89+
it('should expose the raw response headers', async () => {
90+
prepareJsonResponse({ content: '' });
91+
92+
server.responseHeaders = {
93+
'test-header': 'test-value',
94+
};
95+
96+
const { rawResponse } = await model.doGenerate({
97+
inputFormat: 'prompt',
98+
mode: { type: 'regular' },
99+
prompt: TEST_PROMPT,
100+
});
101+
102+
expect(rawResponse?.headers).toStrictEqual({
103+
// default headers:
104+
'content-type': 'application/json',
105+
106+
// custom header
107+
'test-header': 'test-value',
108+
});
109+
});
110+
88111
it('should pass the model and the messages', async () => {
89112
prepareJsonResponse({ content: '' });
90113

91-
await provider.chat('mistral-small-latest').doGenerate({
114+
await model.doGenerate({
92115
inputFormat: 'prompt',
93116
mode: { type: 'regular' },
94117
prompt: TEST_PROMPT,
@@ -167,7 +190,7 @@ describe('doStream', () => {
167190
it('should stream text deltas', async () => {
168191
prepareStreamResponse({ content: ['Hello', ', ', 'world!'] });
169192

170-
const { stream } = await provider.chat('mistral-small-latest').doStream({
193+
const { stream } = await model.doStream({
171194
inputFormat: 'prompt',
172195
mode: { type: 'regular' },
173196
prompt: TEST_PROMPT,
@@ -251,10 +274,34 @@ describe('doStream', () => {
251274
]);
252275
});
253276

277+
it('should expose the raw response headers', async () => {
278+
prepareStreamResponse({ content: [] });
279+
280+
server.responseHeaders = {
281+
'test-header': 'test-value',
282+
};
283+
284+
const { rawResponse } = await model.doStream({
285+
inputFormat: 'prompt',
286+
mode: { type: 'regular' },
287+
prompt: TEST_PROMPT,
288+
});
289+
290+
expect(rawResponse?.headers).toStrictEqual({
291+
// default headers:
292+
'content-type': 'text/event-stream',
293+
'cache-control': 'no-cache',
294+
connection: 'keep-alive',
295+
296+
// custom header
297+
'test-header': 'test-value',
298+
});
299+
});
300+
254301
it('should pass the messages', async () => {
255302
prepareStreamResponse({ content: [''] });
256303

257-
await provider.chat('mistral-small-latest').doStream({
304+
await model.doStream({
258305
inputFormat: 'prompt',
259306
mode: { type: 'regular' },
260307
prompt: TEST_PROMPT,

‎packages/mistral/src/mistral-chat-language-model.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 {
155155
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
156156
const { args, warnings } = this.getArgs(options);
157157

158-
const response = await postJsonToApi({
158+
const { responseHeaders, value: response } = await postJsonToApi({
159159
url: `${this.config.baseURL}/chat/completions`,
160160
headers: this.config.headers(),
161161
body: args,
@@ -183,6 +183,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 {
183183
completionTokens: response.usage.completion_tokens,
184184
},
185185
rawCall: { rawPrompt, rawSettings },
186+
rawResponse: { headers: responseHeaders },
186187
warnings,
187188
};
188189
}
@@ -192,7 +193,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 {
192193
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
193194
const { args, warnings } = this.getArgs(options);
194195

195-
const response = await postJsonToApi({
196+
const { responseHeaders, value: response } = await postJsonToApi({
196197
url: `${this.config.baseURL}/chat/completions`,
197198
headers: this.config.headers(),
198199
body: {
@@ -287,6 +288,7 @@ export class MistralChatLanguageModel implements LanguageModelV1 {
287288
}),
288289
),
289290
rawCall: { rawPrompt, rawSettings },
291+
rawResponse: { headers: responseHeaders },
290292
warnings,
291293
};
292294
}

‎packages/openai/src/openai-chat-language-model.test.ts

+56-14
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,8 @@ const TEST_LOGPROBS = {
106106
],
107107
};
108108

109-
const provider = createOpenAI({
110-
apiKey: 'test-api-key',
111-
});
109+
const provider = createOpenAI({ apiKey: 'test-api-key' });
110+
const model = provider.chat('gpt-3.5-turbo');
112111

113112
describe('doGenerate', () => {
114113
const server = new JsonTestServer(
@@ -168,7 +167,7 @@ describe('doGenerate', () => {
168167
it('should extract text response', async () => {
169168
prepareJsonResponse({ content: 'Hello, World!' });
170169

171-
const { text } = await provider.chat('gpt-3.5-turbo').doGenerate({
170+
const { text } = await model.doGenerate({
172171
inputFormat: 'prompt',
173172
mode: { type: 'regular' },
174173
prompt: TEST_PROMPT,
@@ -183,7 +182,7 @@ describe('doGenerate', () => {
183182
usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
184183
});
185184

186-
const { usage } = await provider.chat('gpt-3.5-turbo').doGenerate({
185+
const { usage } = await model.doGenerate({
187186
inputFormat: 'prompt',
188187
mode: { type: 'regular' },
189188
prompt: TEST_PROMPT,
@@ -200,8 +199,6 @@ describe('doGenerate', () => {
200199
logprobs: TEST_LOGPROBS,
201200
});
202201

203-
const provider = createOpenAI({ apiKey: 'test-api-key' });
204-
205202
const response = await provider
206203
.chat('gpt-3.5-turbo', { logprobs: 1 })
207204
.doGenerate({
@@ -220,20 +217,41 @@ describe('doGenerate', () => {
220217
finish_reason: 'stop',
221218
});
222219

223-
const provider = createOpenAI({ apiKey: 'test-api-key' });
224-
225-
const response = await provider.chat('gpt-3.5-turbo').doGenerate({
220+
const response = await model.doGenerate({
226221
inputFormat: 'prompt',
227222
mode: { type: 'regular' },
228223
prompt: TEST_PROMPT,
229224
});
225+
230226
expect(response.finishReason).toStrictEqual('stop');
231227
});
232228

229+
it('should expose the raw response headers', async () => {
230+
prepareJsonResponse({ content: '' });
231+
232+
server.responseHeaders = {
233+
'test-header': 'test-value',
234+
};
235+
236+
const { rawResponse } = await model.doGenerate({
237+
inputFormat: 'prompt',
238+
mode: { type: 'regular' },
239+
prompt: TEST_PROMPT,
240+
});
241+
242+
expect(rawResponse?.headers).toStrictEqual({
243+
// default headers:
244+
'content-type': 'application/json',
245+
246+
// custom header
247+
'test-header': 'test-value',
248+
});
249+
});
250+
233251
it('should pass the model and the messages', async () => {
234252
prepareJsonResponse({ content: '' });
235253

236-
await provider.chat('gpt-3.5-turbo').doGenerate({
254+
await model.doGenerate({
237255
inputFormat: 'prompt',
238256
mode: { type: 'regular' },
239257
prompt: TEST_PROMPT,
@@ -357,7 +375,7 @@ describe('doStream', () => {
357375
logprobs: TEST_LOGPROBS,
358376
});
359377

360-
const { stream } = await provider.chat('gpt-3.5-turbo').doStream({
378+
const { stream } = await model.doStream({
361379
inputFormat: 'prompt',
362380
mode: { type: 'regular' },
363381
prompt: TEST_PROMPT,
@@ -412,7 +430,7 @@ describe('doStream', () => {
412430
'data: [DONE]\n\n',
413431
];
414432

415-
const { stream } = await provider.chat('gpt-3.5-turbo').doStream({
433+
const { stream } = await model.doStream({
416434
inputFormat: 'prompt',
417435
mode: {
418436
type: 'regular',
@@ -499,10 +517,34 @@ describe('doStream', () => {
499517
]);
500518
});
501519

520+
it('should expose the raw response headers', async () => {
521+
prepareStreamResponse({ content: [] });
522+
523+
server.responseHeaders = {
524+
'test-header': 'test-value',
525+
};
526+
527+
const { rawResponse } = await model.doStream({
528+
inputFormat: 'prompt',
529+
mode: { type: 'regular' },
530+
prompt: TEST_PROMPT,
531+
});
532+
533+
expect(rawResponse?.headers).toStrictEqual({
534+
// default headers:
535+
'content-type': 'text/event-stream',
536+
'cache-control': 'no-cache',
537+
connection: 'keep-alive',
538+
539+
// custom header
540+
'test-header': 'test-value',
541+
});
542+
});
543+
502544
it('should pass the messages and the model', async () => {
503545
prepareStreamResponse({ content: [] });
504546

505-
await provider.chat('gpt-3.5-turbo').doStream({
547+
await model.doStream({
506548
inputFormat: 'prompt',
507549
mode: { type: 'regular' },
508550
prompt: TEST_PROMPT,

‎packages/openai/src/openai-chat-language-model.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 {
153153
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
154154
const args = this.getArgs(options);
155155

156-
const response = await postJsonToApi({
156+
const { responseHeaders, value: response } = await postJsonToApi({
157157
url: `${this.config.baseURL}/chat/completions`,
158158
headers: this.config.headers(),
159159
body: args,
@@ -181,6 +181,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 {
181181
completionTokens: response.usage.completion_tokens,
182182
},
183183
rawCall: { rawPrompt, rawSettings },
184+
rawResponse: { headers: responseHeaders },
184185
warnings: [],
185186
logprobs: mapOpenAIChatLogProbsOutput(choice.logprobs),
186187
};
@@ -191,7 +192,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 {
191192
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
192193
const args = this.getArgs(options);
193194

194-
const response = await postJsonToApi({
195+
const { responseHeaders, value: response } = await postJsonToApi({
195196
url: `${this.config.baseURL}/chat/completions`,
196197
headers: this.config.headers(),
197198
body: {
@@ -358,6 +359,7 @@ export class OpenAIChatLanguageModel implements LanguageModelV1 {
358359
}),
359360
),
360361
rawCall: { rawPrompt, rawSettings },
362+
rawResponse: { headers: responseHeaders },
361363
warnings: [],
362364
};
363365
}

‎packages/openai/src/openai-completion-language-model.test.ts

+65-26
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,8 @@ const TEST_LOGPROBS = {
3838
] as Record<string, number>[],
3939
};
4040

41-
const provider = createOpenAI({
42-
apiKey: 'test-api-key',
43-
});
41+
const provider = createOpenAI({ apiKey: 'test-api-key' });
42+
const model = provider.completion('gpt-3.5-turbo-instruct');
4443

4544
describe('doGenerate', () => {
4645
const server = new JsonTestServer('https://api.openai.com/v1/completions');
@@ -90,13 +89,11 @@ describe('doGenerate', () => {
9089
it('should extract text response', async () => {
9190
prepareJsonResponse({ content: 'Hello, World!' });
9291

93-
const { text } = await provider
94-
.completion('gpt-3.5-turbo-instruct')
95-
.doGenerate({
96-
inputFormat: 'prompt',
97-
mode: { type: 'regular' },
98-
prompt: TEST_PROMPT,
99-
});
92+
const { text } = await model.doGenerate({
93+
inputFormat: 'prompt',
94+
mode: { type: 'regular' },
95+
prompt: TEST_PROMPT,
96+
});
10097

10198
expect(text).toStrictEqual('Hello, World!');
10299
});
@@ -107,13 +104,11 @@ describe('doGenerate', () => {
107104
usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
108105
});
109106

110-
const { usage } = await provider
111-
.completion('gpt-3.5-turbo-instruct')
112-
.doGenerate({
113-
inputFormat: 'prompt',
114-
mode: { type: 'regular' },
115-
prompt: TEST_PROMPT,
116-
});
107+
const { usage } = await model.doGenerate({
108+
inputFormat: 'prompt',
109+
mode: { type: 'regular' },
110+
prompt: TEST_PROMPT,
111+
});
117112

118113
expect(usage).toStrictEqual({
119114
promptTokens: 20,
@@ -155,10 +150,32 @@ describe('doGenerate', () => {
155150
expect(finishReason).toStrictEqual('stop');
156151
});
157152

153+
it('should expose the raw response headers', async () => {
154+
prepareJsonResponse({ content: '' });
155+
156+
server.responseHeaders = {
157+
'test-header': 'test-value',
158+
};
159+
160+
const { rawResponse } = await model.doGenerate({
161+
inputFormat: 'prompt',
162+
mode: { type: 'regular' },
163+
prompt: TEST_PROMPT,
164+
});
165+
166+
expect(rawResponse?.headers).toStrictEqual({
167+
// default headers:
168+
'content-type': 'application/json',
169+
170+
// custom header
171+
'test-header': 'test-value',
172+
});
173+
});
174+
158175
it('should pass the model and the prompt', async () => {
159176
prepareJsonResponse({ content: '' });
160177

161-
await provider.completion('gpt-3.5-turbo-instruct').doGenerate({
178+
await model.doGenerate({
162179
inputFormat: 'prompt',
163180
mode: { type: 'regular' },
164181
prompt: TEST_PROMPT,
@@ -275,13 +292,11 @@ describe('doStream', () => {
275292
logprobs: TEST_LOGPROBS,
276293
});
277294

278-
const { stream } = await provider
279-
.completion('gpt-3.5-turbo-instruct')
280-
.doStream({
281-
inputFormat: 'prompt',
282-
mode: { type: 'regular' },
283-
prompt: TEST_PROMPT,
284-
});
295+
const { stream } = await model.doStream({
296+
inputFormat: 'prompt',
297+
mode: { type: 'regular' },
298+
prompt: TEST_PROMPT,
299+
});
285300

286301
// note: space moved to last chunk bc of trimming
287302
expect(await convertStreamToArray(stream)).toStrictEqual([
@@ -298,10 +313,34 @@ describe('doStream', () => {
298313
]);
299314
});
300315

316+
it('should expose the raw response headers', async () => {
317+
prepareStreamResponse({ content: [] });
318+
319+
server.responseHeaders = {
320+
'test-header': 'test-value',
321+
};
322+
323+
const { rawResponse } = await model.doStream({
324+
inputFormat: 'prompt',
325+
mode: { type: 'regular' },
326+
prompt: TEST_PROMPT,
327+
});
328+
329+
expect(rawResponse?.headers).toStrictEqual({
330+
// default headers:
331+
'content-type': 'text/event-stream',
332+
'cache-control': 'no-cache',
333+
connection: 'keep-alive',
334+
335+
// custom header
336+
'test-header': 'test-value',
337+
});
338+
});
339+
301340
it('should pass the model and the prompt', async () => {
302341
prepareStreamResponse({ content: [] });
303342

304-
await provider.completion('gpt-3.5-turbo-instruct').doStream({
343+
await model.doStream({
305344
inputFormat: 'prompt',
306345
mode: { type: 'regular' },
307346
prompt: TEST_PROMPT,

‎packages/openai/src/openai-completion-language-model.ts

+4-4
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,6 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 {
9999
stop: stopSequences,
100100
};
101101

102-
console.log('BASE ARGS LOGS', baseArgs.logprobs);
103-
104102
switch (type) {
105103
case 'regular': {
106104
if (mode.tools?.length) {
@@ -142,7 +140,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 {
142140
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
143141
const args = this.getArgs(options);
144142

145-
const response = await postJsonToApi({
143+
const { responseHeaders, value: response } = await postJsonToApi({
146144
url: `${this.config.baseURL}/completions`,
147145
headers: this.config.headers(),
148146
body: args,
@@ -165,6 +163,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 {
165163
finishReason: mapOpenAIFinishReason(choice.finish_reason),
166164
logprobs: mapOpenAICompletionLogProbs(choice.logprobs),
167165
rawCall: { rawPrompt, rawSettings },
166+
rawResponse: { headers: responseHeaders },
168167
warnings: [],
169168
};
170169
}
@@ -174,7 +173,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 {
174173
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
175174
const args = this.getArgs(options);
176175

177-
const response = await postJsonToApi({
176+
const { responseHeaders, value: response } = await postJsonToApi({
178177
url: `${this.config.baseURL}/completions`,
179178
headers: this.config.headers(),
180179
body: {
@@ -251,6 +250,7 @@ export class OpenAICompletionLanguageModel implements LanguageModelV1 {
251250
}),
252251
),
253252
rawCall: { rawPrompt, rawSettings },
253+
rawResponse: { headers: responseHeaders },
254254
warnings: [],
255255
};
256256
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
/**
2+
Extracts the headers from a response object and returns them as a key-value object.
3+
4+
@param response - The response object to extract headers from.
5+
@returns The headers as a key-value object.
6+
*/
7+
export function extractResponseHeaders(
8+
response: Response,
9+
): Record<string, string> {
10+
const headers: Record<string, string> = {};
11+
response.headers.forEach((value, key) => {
12+
headers[key] = value;
13+
});
14+
return headers;
15+
}

‎packages/provider-utils/src/index.ts

+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
export * from './extract-response-headers';
12
export * from './generate-id';
23
export * from './get-error-message';
34
export * from './load-api-key';
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,20 @@
1-
import { APICallError, NoResponseBodyError } from '@ai-sdk/provider';
1+
import { APICallError, EmptyResponseBodyError } from '@ai-sdk/provider';
22
import {
33
EventSourceParserStream,
44
ParsedEvent,
55
} from 'eventsource-parser/stream';
66
import { ZodSchema } from 'zod';
7+
import { extractResponseHeaders } from './extract-response-headers';
78
import { ParseResult, parseJSON, safeParseJSON } from './parse-json';
89

910
export type ResponseHandler<RETURN_TYPE> = (options: {
1011
url: string;
1112
requestBodyValues: unknown;
1213
response: Response;
13-
}) => PromiseLike<RETURN_TYPE>;
14+
}) => PromiseLike<{
15+
value: RETURN_TYPE;
16+
responseHeaders?: Record<string, string>;
17+
}>;
1418

1519
export const createJsonErrorResponseHandler =
1620
<T>({
@@ -24,17 +28,22 @@ export const createJsonErrorResponseHandler =
2428
}): ResponseHandler<APICallError> =>
2529
async ({ response, url, requestBodyValues }) => {
2630
const responseBody = await response.text();
31+
const responseHeaders = extractResponseHeaders(response);
2732

2833
// Some providers return an empty response body for some errors:
2934
if (responseBody.trim() === '') {
30-
return new APICallError({
31-
message: response.statusText,
32-
url,
33-
requestBodyValues,
34-
statusCode: response.status,
35-
responseBody,
36-
isRetryable: isRetryable?.(response),
37-
});
35+
return {
36+
responseHeaders,
37+
value: new APICallError({
38+
message: response.statusText,
39+
url,
40+
requestBodyValues,
41+
statusCode: response.status,
42+
responseHeaders,
43+
responseBody,
44+
isRetryable: isRetryable?.(response),
45+
}),
46+
};
3847
}
3948

4049
// resilient parsing in case the response is not JSON or does not match the schema:
@@ -44,24 +53,32 @@ export const createJsonErrorResponseHandler =
4453
schema: errorSchema,
4554
});
4655

47-
return new APICallError({
48-
message: errorToMessage(parsedError),
49-
url,
50-
requestBodyValues,
51-
statusCode: response.status,
52-
responseBody,
53-
data: parsedError,
54-
isRetryable: isRetryable?.(response, parsedError),
55-
});
56+
return {
57+
responseHeaders,
58+
value: new APICallError({
59+
message: errorToMessage(parsedError),
60+
url,
61+
requestBodyValues,
62+
statusCode: response.status,
63+
responseHeaders,
64+
responseBody,
65+
data: parsedError,
66+
isRetryable: isRetryable?.(response, parsedError),
67+
}),
68+
};
5669
} catch (parseError) {
57-
return new APICallError({
58-
message: response.statusText,
59-
url,
60-
requestBodyValues,
61-
statusCode: response.status,
62-
responseBody,
63-
isRetryable: isRetryable?.(response),
64-
});
70+
return {
71+
responseHeaders,
72+
value: new APICallError({
73+
message: response.statusText,
74+
url,
75+
requestBodyValues,
76+
statusCode: response.status,
77+
responseHeaders,
78+
responseBody,
79+
isRetryable: isRetryable?.(response),
80+
}),
81+
};
6582
}
6683
};
6784

@@ -70,30 +87,35 @@ export const createEventSourceResponseHandler =
7087
chunkSchema: ZodSchema<T>,
7188
): ResponseHandler<ReadableStream<ParseResult<T>>> =>
7289
async ({ response }: { response: Response }) => {
90+
const responseHeaders = extractResponseHeaders(response);
91+
7392
if (response.body == null) {
74-
throw new NoResponseBodyError();
93+
throw new EmptyResponseBodyError({});
7594
}
7695

77-
return response.body
78-
.pipeThrough(new TextDecoderStream())
79-
.pipeThrough(new EventSourceParserStream())
80-
.pipeThrough(
81-
new TransformStream<ParsedEvent, ParseResult<T>>({
82-
transform({ data }, controller) {
83-
// ignore the 'DONE' event that e.g. OpenAI sends:
84-
if (data === '[DONE]') {
85-
return;
86-
}
96+
return {
97+
responseHeaders,
98+
value: response.body
99+
.pipeThrough(new TextDecoderStream())
100+
.pipeThrough(new EventSourceParserStream())
101+
.pipeThrough(
102+
new TransformStream<ParsedEvent, ParseResult<T>>({
103+
transform({ data }, controller) {
104+
// ignore the 'DONE' event that e.g. OpenAI sends:
105+
if (data === '[DONE]') {
106+
return;
107+
}
87108

88-
controller.enqueue(
89-
safeParseJSON({
90-
text: data,
91-
schema: chunkSchema,
92-
}),
93-
);
94-
},
95-
}),
96-
);
109+
controller.enqueue(
110+
safeParseJSON({
111+
text: data,
112+
schema: chunkSchema,
113+
}),
114+
);
115+
},
116+
}),
117+
),
118+
};
97119
};
98120

99121
export const createJsonResponseHandler =
@@ -106,16 +128,22 @@ export const createJsonResponseHandler =
106128
schema: responseSchema,
107129
});
108130

131+
const responseHeaders = extractResponseHeaders(response);
132+
109133
if (!parsedResult.success) {
110134
throw new APICallError({
111135
message: 'Invalid JSON response',
112136
cause: parsedResult.error,
113137
statusCode: response.status,
138+
responseHeaders,
114139
responseBody,
115140
url,
116141
requestBodyValues,
117142
});
118143
}
119144

120-
return parsedResult.value;
145+
return {
146+
responseHeaders,
147+
value: parsedResult.value,
148+
};
121149
};

‎packages/provider-utils/src/test/json-test-server.ts

+7-1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { SetupServer, setupServer } from 'msw/node';
44
export class JsonTestServer {
55
readonly server: SetupServer;
66

7+
responseHeaders: Record<string, string> = {};
78
responseBodyJson: any = {};
89

910
request: Request | undefined;
@@ -15,7 +16,12 @@ export class JsonTestServer {
1516
http.post(url, ({ request }) => {
1617
this.request = request;
1718

18-
return HttpResponse.json(responseBodyJson());
19+
return HttpResponse.json(responseBodyJson(), {
20+
headers: {
21+
'Content-Type': 'application/json',
22+
...this.responseHeaders,
23+
},
24+
});
1925
}),
2026
);
2127
}

‎packages/provider-utils/src/test/streaming-test-server.ts

+2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { SetupServer, setupServer } from 'msw/node';
44
export class StreamingTestServer {
55
readonly server: SetupServer;
66

7+
responseHeaders: Record<string, string> = {};
78
responseChunks: any[] = [];
89

910
request: Request | undefined;
@@ -34,6 +35,7 @@ export class StreamingTestServer {
3435
'Content-Type': 'text/event-stream',
3536
'Cache-Control': 'no-cache',
3637
Connection: 'keep-alive',
38+
...this.responseHeaders,
3739
},
3840
});
3941
}),

‎packages/provider/src/errors/api-call-error.ts

+9
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,10 @@ export class APICallError extends Error {
22
readonly url: string;
33
readonly requestBodyValues: unknown;
44
readonly statusCode?: number;
5+
6+
readonly responseHeaders?: Record<string, string>;
57
readonly responseBody?: string;
8+
69
readonly cause?: unknown;
710
readonly isRetryable: boolean;
811
readonly data?: unknown;
@@ -12,6 +15,7 @@ export class APICallError extends Error {
1215
url,
1316
requestBodyValues,
1417
statusCode,
18+
responseHeaders,
1519
responseBody,
1620
cause,
1721
isRetryable = statusCode != null &&
@@ -25,6 +29,7 @@ export class APICallError extends Error {
2529
url: string;
2630
requestBodyValues: unknown;
2731
statusCode?: number;
32+
responseHeaders?: Record<string, string>;
2833
responseBody?: string;
2934
cause?: unknown;
3035
isRetryable?: boolean;
@@ -37,6 +42,7 @@ export class APICallError extends Error {
3742
this.url = url;
3843
this.requestBodyValues = requestBodyValues;
3944
this.statusCode = statusCode;
45+
this.responseHeaders = responseHeaders;
4046
this.responseBody = responseBody;
4147
this.cause = cause;
4248
this.isRetryable = isRetryable;
@@ -51,6 +57,8 @@ export class APICallError extends Error {
5157
typeof (error as APICallError).requestBodyValues === 'object' &&
5258
((error as APICallError).statusCode == null ||
5359
typeof (error as APICallError).statusCode === 'number') &&
60+
((error as APICallError).responseHeaders == null ||
61+
typeof (error as APICallError).responseHeaders === 'object') &&
5462
((error as APICallError).responseBody == null ||
5563
typeof (error as APICallError).responseBody === 'string') &&
5664
((error as APICallError).cause == null ||
@@ -68,6 +76,7 @@ export class APICallError extends Error {
6876
url: this.url,
6977
requestBodyValues: this.requestBodyValues,
7078
statusCode: this.statusCode,
79+
responseHeaders: this.responseHeaders,
7180
responseBody: this.responseBody,
7281
cause: this.cause,
7382
isRetryable: this.isRetryable,
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
export class EmptyResponseBodyError extends Error {
2+
constructor({ message = 'Empty response body' }: { message?: string } = {}) {
3+
super(message);
4+
5+
this.name = 'AI_EmptyResponseBodyError';
6+
}
7+
8+
static isEmptyResponseBodyError(
9+
error: unknown,
10+
): error is EmptyResponseBodyError {
11+
return error instanceof Error && error.name === 'AI_EmptyResponseBodyError';
12+
}
13+
14+
toJSON() {
15+
return {
16+
name: this.name,
17+
message: this.message,
18+
stack: this.stack,
19+
};
20+
}
21+
}

‎packages/provider/src/errors/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
export * from './api-call-error';
2+
export * from './empty-response-body-error';
23
export * from './invalid-argument-error';
34
export * from './invalid-data-content-error';
45
export * from './invalid-prompt-error';
@@ -7,7 +8,6 @@ export * from './invalid-tool-arguments-error';
78
export * from './json-parse-error';
89
export * from './load-api-key-error';
910
export * from './no-object-generated-error';
10-
export * from './no-response-body-error';
1111
export * from './no-such-tool-error';
1212
export * from './retry-error';
1313
export * from './tool-call-parse-error';

‎packages/provider/src/errors/no-response-body-error.ts

-19
This file was deleted.

‎packages/provider/src/language-model/v1/language-model-v1.ts

+22
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,16 @@ export type LanguageModelV1 = {
8787
rawSettings: Record<string, unknown>;
8888
};
8989

90+
/**
91+
* Optional raw response information for debugging purposes.
92+
*/
93+
rawResponse?: {
94+
/**
95+
* Response headers.
96+
*/
97+
headers?: Record<string, string>;
98+
};
99+
90100
warnings?: LanguageModelV1CallWarning[];
91101

92102
/**
@@ -124,6 +134,16 @@ export type LanguageModelV1 = {
124134
rawSettings: Record<string, unknown>;
125135
};
126136

137+
/**
138+
* Optional raw response data.
139+
*/
140+
rawResponse?: {
141+
/**
142+
* Response headers.
143+
*/
144+
headers?: Record<string, string>;
145+
};
146+
127147
warnings?: LanguageModelV1CallWarning[];
128148
}>;
129149
};
@@ -156,3 +176,5 @@ export type LanguageModelV1StreamPart =
156176

157177
// error parts are streamed, allowing for multiple errors
158178
| { type: 'error'; error: unknown };
179+
180+
export type LanguageModelV1ResponseMetadata = {};

0 commit comments

Comments
 (0)
Please sign in to comment.