Variable AISemanticConventionsConst
AISemanticConventions: {
EMBEDDING_TEXT: "ai.value";
EMBEDDING_TEXTS: "ai.values";
EMBEDDING_VECTOR: "ai.embedding";
EMBEDDING_VECTORS: "ai.embeddings";
FUNCTION_ID: "ai.telemetry.functionId";
METADATA: "ai.telemetry.metadata";
MODEL_ID: "ai.model.id";
MODEL_PROVIDER: "ai.model.provider";
OPERATION_ID: "ai.operationId";
PROMPT: "ai.prompt";
PROMPT_MESSAGES: "ai.prompt.messages";
PROMPT_TOOL_CHOICE: "ai.prompt.toolChoice";
PROMPT_TOOLS: "ai.prompt.tools";
RESPONSE_AVG_OUTPUT_TOKENS_PER_SECOND: "ai.response.avgOutputTokensPerSecond";
RESPONSE_FINISH_REASON: "ai.response.finishReason";
RESPONSE_ID: "ai.response.id";
RESPONSE_MODEL: "ai.response.model";
RESPONSE_MS_TO_FINISH: "ai.response.msToFinish";
RESPONSE_MS_TO_FIRST_CHUNK: "ai.response.msToFirstChunk";
RESPONSE_OBJECT: "ai.response.object";
RESPONSE_PROVIDER_METADATA: "ai.response.providerMetadata";
RESPONSE_TEXT: "ai.response.text";
RESPONSE_TIMESTAMP: "ai.response.timestamp";
RESPONSE_TOOL_CALLS: "ai.response.toolCalls";
SETTINGS: "ai.settings";
TOKEN_COUNT_CACHED_INPUT: "ai.usage.cachedInputTokens";
TOKEN_COUNT_COMPLETION: "ai.usage.completionTokens";
TOKEN_COUNT_INPUT: "ai.usage.inputTokens";
TOKEN_COUNT_OUTPUT: "ai.usage.outputTokens";
TOKEN_COUNT_PROMPT: "ai.usage.promptTokens";
TOKEN_COUNT_REASONING: "ai.usage.reasoningTokens";
TOKEN_COUNT_TOKENS: "ai.usage.tokens";
TOKEN_COUNT_TOTAL: "ai.usage.totalTokens";
TOOL_CALL_ARGS: "ai.toolCall.args";
TOOL_CALL_ID: "ai.toolCall.id";
TOOL_CALL_NAME: "ai.toolCall.name";
TOOL_CALL_RESULT: "ai.toolCall.result";
} = ...
Type Declaration
EMBEDDING_TEXT: "ai.value"
EMBEDDING_TEXTS: "ai.values"
EMBEDDING_VECTOR: "ai.embedding"
EMBEDDING_VECTORS: "ai.embeddings"
FUNCTION_ID: "ai.telemetry.functionId"
MODEL_ID: "ai.model.id"
MODEL_PROVIDER: "ai.model.provider"
OPERATION_ID: "ai.operationId"
PROMPT: "ai.prompt"
PROMPT_MESSAGES: "ai.prompt.messages"
RESPONSE_AVG_OUTPUT_TOKENS_PER_SECOND: "ai.response.avgOutputTokensPerSecond"
RESPONSE_FINISH_REASON: "ai.response.finishReason"
RESPONSE_ID: "ai.response.id"
RESPONSE_MODEL: "ai.response.model"
RESPONSE_MS_TO_FINISH: "ai.response.msToFinish"
RESPONSE_MS_TO_FIRST_CHUNK: "ai.response.msToFirstChunk"
RESPONSE_OBJECT: "ai.response.object"
RESPONSE_TEXT: "ai.response.text"
RESPONSE_TIMESTAMP: "ai.response.timestamp"
SETTINGS: "ai.settings"
TOKEN_COUNT_COMPLETION: "ai.usage.completionTokens"
TOKEN_COUNT_OUTPUT: "ai.usage.outputTokens"
TOKEN_COUNT_PROMPT: "ai.usage.promptTokens"
TOKEN_COUNT_REASONING: "ai.usage.reasoningTokens"
TOKEN_COUNT_TOKENS: "ai.usage.tokens"
TOKEN_COUNT_TOTAL: "ai.usage.totalTokens"
The semantic conventions used by the Vercel AI SDK (
ai.*attributes).