Skip to content

Commit 66f94b0

Browse files
authoredMar 11, 2024··
chore: Changed token_count to only use tokenCountCallback (#2070)
1 parent 64b4ca2 commit 66f94b0

File tree

6 files changed

+14
-15
lines changed

6 files changed

+14
-15
lines changed
 

‎lib/llm-events/openai/chat-completion-message.js

+7-6
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,15 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
1919
this.content = message?.content
2020
}
2121

22+
const tokenCB = agent.llm?.tokenCountCallback
23+
if (typeof tokenCB !== 'function') {
24+
return
25+
}
26+
2227
if (this.is_response) {
23-
this.token_count =
24-
response?.usage?.completion_tokens ||
25-
agent.llm?.tokenCountCallback?.(this['response.model'], message?.content)
28+
this.token_count = tokenCB(this['response.model'], message?.content)
2629
} else {
27-
this.token_count =
28-
response?.usage?.prompt_tokens ||
29-
agent.llm?.tokenCountCallback?.(request.model || request.engine, message?.content)
30+
this.token_count = tokenCB(request.model || request.engine, message?.content)
3031
}
3132
}
3233
}

‎lib/llm-events/openai/embedding.js

+4-3
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@ module.exports = class LlmEmbedding extends LlmEvent {
1414
if (agent.config.ai_monitoring.record_content.enabled === true) {
1515
this.input = request.input?.toString()
1616
}
17-
this.token_count =
18-
response?.usage?.prompt_tokens ||
19-
agent.llm?.tokenCountCallback?.(this['request.model'], request.input?.toString())
17+
this.token_count = agent.llm?.tokenCountCallback?.(
18+
this['request.model'],
19+
request.input?.toString()
20+
)
2021
}
2122
}

‎test/unit/llm-events/openai/chat-completion-message.test.js

-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ tap.test('LlmChatCompletionMessage', (t) => {
6262
expected.content = chatRes.choices[0].message.content
6363
expected.role = chatRes.choices[0].message.role
6464
expected.is_response = true
65-
expected.token_count = 20
6665
t.same(chatMessageEvent, expected)
6766
t.end()
6867
})

‎test/unit/llm-events/openai/common.js

+1-2
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ function getExpectedResult(tx, event, type, completionId) {
7575
expected = { ...expected, ...resKeys }
7676
expected.input = 'This is my test input'
7777
expected.error = false
78-
expected.token_count = 10
78+
expected.token_count = undefined
7979
break
8080
case 'summary':
8181
expected = {
@@ -96,7 +96,6 @@ function getExpectedResult(tx, event, type, completionId) {
9696
role: 'inquisitive-kid',
9797
sequence: 0,
9898
completion_id: completionId,
99-
token_count: 10,
10099
is_response: false
101100
}
102101
}

‎test/versioned/openai/chat-completions.tap.js

+1-2
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,7 @@ tap.test('OpenAI instrumentation - chat completions', (t) => {
9696
model,
9797
id: 'chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTeat',
9898
resContent: '1 plus 2 is 3.',
99-
reqContent: content,
100-
tokenUsage: true
99+
reqContent: content
101100
})
102101

103102
const chatSummary = events.filter(([{ type }]) => type === 'LlmChatCompletionSummary')[0]

‎test/versioned/openai/embeddings.tap.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ tap.test('OpenAI instrumentation - embedding', (t) => {
100100
'response.organization': 'new-relic-nkmd8b',
101101
'response.usage.total_tokens': 6,
102102
'response.usage.prompt_tokens': 6,
103-
'token_count': 6,
103+
'token_count': undefined,
104104
'response.headers.llmVersion': '2020-10-01',
105105
'response.headers.ratelimitLimitRequests': '200',
106106
'response.headers.ratelimitLimitTokens': '150000',

0 commit comments

Comments
 (0)
Please sign in to comment.