fix(core): Improve Vercel AI SDK instrumentation attributes#19717
fix(core): Improve Vercel AI SDK instrumentation attributes#19717RulaKhaled merged 9 commits intodevelopfrom
Conversation
size-limit report 📦
|
node-overhead report 🧳Note: This is a synthetic benchmark with a minimal express app and does not necessarily reflect the real-world performance impact in an application.
|
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 1 potential issue.
Autofix Details
Bugbot Autofix prepared a fix for the issue found in the latest run.
- ✅ Fixed: V6 tests missing new output messages attribute assertions
- Added explicit
GEN_AI_OUTPUT_MESSAGES_ATTRIBUTEassertions (and import) across the v6 span expectations sogen_ai.output.messagesis now validated for text and tool-call outputs.
- Added explicit
Or push these changes by commenting:
@cursor push 8e0d6cceb7
Preview (8e0d6cceb7)
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
@@ -4,6 +4,7 @@
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
+ GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
@@ -97,6 +98,8 @@
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -129,6 +132,8 @@
'vercel.ai.response.id': expect.any(String),
'vercel.ai.response.timestamp': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -231,6 +236,8 @@
'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
@@ -257,6 +264,8 @@
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]:
'[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.response.id': expect.any(String),
'vercel.ai.response.model': 'mock-model-id',
@@ -289,6 +298,8 @@
'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
'vercel.ai.response.finishReason': 'stop',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
@@ -324,6 +335,8 @@
'vercel.ai.response.id': expect.any(String),
'vercel.ai.response.timestamp': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -346,6 +359,8 @@
'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]',
'vercel.ai.response.finishReason': 'tool-calls',
'vercel.ai.settings.maxRetries': 2,
'vercel.ai.streaming': false,
@@ -371,6 +386,8 @@
'vercel.ai.pipeline.name': 'generateText.doGenerate',
'vercel.ai.request.headers.user-agent': expect.any(String),
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
+ [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
+ '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool-calls"}]',
'vercel.ai.prompt.toolChoice': expect.any(String),
[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
'vercel.ai.response.finishReason': 'tool-calls',| function truncateContentArrayMessage(message: ContentArrayMessage, maxBytes: number): unknown[] { | ||
| const { content } = message; | ||
|
|
||
| // Find the first text part to truncate |
There was a problem hiding this comment.
m: Why do we only truncate the first text part? Is the assumption that these messages usually only have one text part?
There was a problem hiding this comment.
Yes, because this is the most common use case, but we could and should account for more parts, i'll update
There was a problem hiding this comment.
rethinking this, python sdk completely removed single message truncation, we should follow, i will keep it as it is for now, and remove truncation for single message for object parts as well in a later PR :)
There was a problem hiding this comment.
lol yeah I was just told that we'll likely be dropping most of truncation in the sdks soon so it's whatever
| */ | ||
| function normalizeFinishReason(finishReason: unknown): string { | ||
| if (typeof finishReason !== 'string') { | ||
| return 'stop'; |
There was a problem hiding this comment.
l: why do we default to stop if nothing is set?
There was a problem hiding this comment.
because finish_reason is required according to the OTel schema for output messages. https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-output-messages.json
"FinishReason": {
"enum": [
"stop",
"length",
"content_filter",
"tool_call",
"error"
]
}
when the SDK doesn't give us one, 'stop' (normal completion) is the most sensible default assumption.
| // eslint-disable-next-line @typescript-eslint/no-dynamic-delete | ||
| delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; | ||
| // eslint-disable-next-line @typescript-eslint/no-dynamic-delete | ||
| delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; |
There was a problem hiding this comment.
l: we do not delete the original finish reason attribute after normalizing here, is that on purpose?
There was a problem hiding this comment.
yah finish reason is an independent attribute that was not deprecated by output messages attribute https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-response-finish_reasons
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 1 potential issue.
Bugbot Autofix is OFF. To automatically fix reported issues with cloud agents, enable autofix in the Cursor dashboard.
7686f24 to
190d760
Compare
This PR introduces some attributes and fixes to Vercel AI SDK: - Adds new [gen_ai.output.messages ](https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-output-messages) which deprecates https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-response-text and https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-response-tool_calls - Adds new [gen_ai.tool.description](https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-tool-description) - Checks for Vercel AI media type when stripping media out of the input messages Closes #19574

This PR introduces some attributes and fixes to Vercel AI SDK:
Closes #19574