OpenInference JS
    Preparing search index...
    SemanticConventions: {
        AGENT_NAME: "agent.name";
        DOCUMENT_CONTENT: "document.content";
        DOCUMENT_ID: "document.id";
        DOCUMENT_METADATA: "document.metadata";
        DOCUMENT_SCORE: "document.score";
        EMBEDDING_EMBEDDINGS: "embedding.embeddings";
        EMBEDDING_MODEL_NAME: "embedding.model_name";
        EMBEDDING_TEXT: "embedding.text";
        EMBEDDING_VECTOR: "embedding.vector";
        GRAPH_NODE_ID: "graph.node.id";
        GRAPH_NODE_NAME: "graph.node.name";
        GRAPH_NODE_PARENT_ID: "graph.node.parent_id";
        IMAGE_URL: "image.url";
        INPUT_MIME_TYPE: "input.mime_type";
        INPUT_VALUE: "input.value";
        LLM_COST: "llm.cost";
        LLM_COST_COMPLETION: "llm.cost.completion";
        LLM_COST_COMPLETION_DETAILS_AUDIO: "llm.cost.completion_details.audio";
        LLM_COST_COMPLETION_DETAILS_REASONING: "llm.cost.completion_details.reasoning";
        LLM_COST_INPUT: "llm.cost.prompt_details.input";
        LLM_COST_OUTPUT: "llm.cost.completion_details.output";
        LLM_COST_PROMPT: "llm.cost.prompt";
        LLM_COST_PROMPT_DETAILS_AUDIO: "llm.cost.prompt_details.audio";
        LLM_COST_PROMPT_DETAILS_CACHE_INPUT: "llm.cost.prompt_details.cache_input";
        LLM_COST_PROMPT_DETAILS_CACHE_READ: "llm.cost.prompt_details.cache_read";
        LLM_COST_PROMPT_DETAILS_CACHE_WRITE: "llm.cost.prompt_details.cache_write";
        LLM_COST_TOTAL: "llm.cost.total";
        LLM_FUNCTION_CALL: "llm.function_call";
        LLM_INPUT_MESSAGES: "llm.input_messages";
        LLM_INVOCATION_PARAMETERS: "llm.invocation_parameters";
        LLM_MODEL_NAME: "llm.model_name";
        LLM_OUTPUT_MESSAGES: "llm.output_messages";
        LLM_PROMPTS: "llm.prompts";
        LLM_PROVIDER: "llm.provider";
        LLM_SYSTEM: "llm.system";
        LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion";
        LLM_TOKEN_COUNT_COMPLETION_DETAILS: "llm.token_count.completion_details";
        LLM_TOKEN_COUNT_COMPLETION_DETAILS_AUDIO: "llm.token_count.completion_details.audio";
        LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning";
        LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt";
        LLM_TOKEN_COUNT_PROMPT_DETAILS: "llm.token_count.prompt_details";
        LLM_TOKEN_COUNT_PROMPT_DETAILS_AUDIO: "llm.token_count.prompt_details.audio";
        LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_INPUT: "llm.token_count.prompt_details.cache_input";
        LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read";
        LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write";
        LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
        LLM_TOOLS: "llm.tools";
        MESSAGE_CONTENT: "message.content";
        MESSAGE_CONTENT_IMAGE: "message_content.image";
        MESSAGE_CONTENT_TEXT: "message_content.text";
        MESSAGE_CONTENT_TYPE: "message_content.type";
        MESSAGE_CONTENTS: "message.contents";
        MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON: "message.function_call_arguments_json";
        MESSAGE_FUNCTION_CALL_NAME: "message.function_call_name";
        MESSAGE_NAME: "message.name";
        MESSAGE_ROLE: "message.role";
        MESSAGE_TOOL_CALL_ID: "message.tool_call_id";
        MESSAGE_TOOL_CALLS: "message.tool_calls";
        METADATA: "metadata";
        OPENINFERENCE_SPAN_KIND: "openinference.span.kind";
        OUTPUT_MIME_TYPE: "output.mime_type";
        OUTPUT_VALUE: "output.value";
        PROMPT_ID: "prompt.id";
        PROMPT_TEMPLATE_TEMPLATE: "llm.prompt_template.template";
        PROMPT_TEMPLATE_VARIABLES: "llm.prompt_template.variables";
        PROMPT_TEMPLATE_VERSION: "llm.prompt_template.version";
        PROMPT_URL: "prompt.url";
        PROMPT_VENDOR: "prompt.vendor";
        RERANKER_INPUT_DOCUMENTS: "reranker.input_documents";
        RERANKER_MODEL_NAME: "reranker.model_name";
        RERANKER_OUTPUT_DOCUMENTS: "reranker.output_documents";
        RERANKER_QUERY: "reranker.query";
        RERANKER_TOP_K: "reranker.top_k";
        RETRIEVAL_DOCUMENTS: "retrieval.documents";
        SESSION_ID: "session.id";
        TAG_TAGS: "tag.tags";
        TOOL_CALL_FUNCTION_ARGUMENTS_JSON: "tool_call.function.arguments";
        TOOL_CALL_FUNCTION_NAME: "tool_call.function.name";
        TOOL_CALL_ID: "tool_call.id";
        TOOL_DESCRIPTION: "tool.description";
        TOOL_JSON_SCHEMA: "tool.json_schema";
        TOOL_NAME: "tool.name";
        TOOL_PARAMETERS: "tool.parameters";
        USER_ID: "user.id";
    } = ...

    Type Declaration

    • AGENT_NAME: "agent.name"

      The name of the agent. Agents that perform the same functions should have the same name.

    • DOCUMENT_CONTENT: "document.content"
    • DOCUMENT_ID: "document.id"
    • DOCUMENT_METADATA: "document.metadata"
    • DOCUMENT_SCORE: "document.score"
    • EMBEDDING_EMBEDDINGS: "embedding.embeddings"

      The embedding list root

    • EMBEDDING_MODEL_NAME: "embedding.model_name"

      The name of the model that was used to create the vector

    • EMBEDDING_TEXT: "embedding.text"

      The text that was embedded to create the vector

    • EMBEDDING_VECTOR: "embedding.vector"

      The embedding vector. Typically a high dimensional vector of floats or ints

    • GRAPH_NODE_ID: "graph.node.id"

      The id of the node in the execution graph. This along with graph.node.parent_id are used to visualize the execution graph.

    • GRAPH_NODE_NAME: "graph.node.name"

      The name of the node in the execution graph. Use this to present a human readable name for the node. Optional

    • GRAPH_NODE_PARENT_ID: "graph.node.parent_id"

      This references the id of the parent node. Leaving this unset or set as empty string implies that the current span is the root node.

    • IMAGE_URL: "image.url"

      The http or base64 link to the image

    • INPUT_MIME_TYPE: "input.mime_type"
    • INPUT_VALUE: "input.value"

      The input to any span

    • LLM_COST: "llm.cost"

      Key prefix for cost information. When these keys are transformed into a JSON-like structure, it would look like: { "prompt": 0.0021, # Cost in USD "completion": 0.0045, # Cost in USD "total": 0.0066, # Cost in USD "completion_details": { "output": 0.0009, # Cost in USD "reasoning": 0.0024, # Cost in USD (e.g., 80 tokens * $0.03/1K tokens) "audio": 0.0012 # Cost in USD (e.g., 40 tokens * $0.03/1K tokens) }, "prompt_details": { "input": 0.0003, # Cost in USD "cache_write": 0.0006, # Cost in USD (e.g., 20 tokens * $0.03/1K tokens) "cache_read": 0.0003, # Cost in USD (e.g., 10 tokens * $0.03/1K tokens) "cache_input": 0.0006, # Cost in USD (e.g., 20 tokens * $0.03/1K tokens) "audio": 0.0003 # Cost in USD (e.g., 10 tokens * $0.03/1K tokens) } } Note: This is a key prefix - individual attributes are stored as separate span attributes with this prefix, e.g. llm.cost.prompt, llm.cost.completion_details.reasoning, etc. The JSON structure shown above represents how these separate attributes can be conceptually organized. All monetary values are in USD with floating point precision.

    • LLM_COST_COMPLETION: "llm.cost.completion"

      Cost of the completion tokens in USD

    • LLM_COST_COMPLETION_DETAILS_AUDIO: "llm.cost.completion_details.audio"

      Cost of audio tokens in the completion in USD

    • LLM_COST_COMPLETION_DETAILS_REASONING: "llm.cost.completion_details.reasoning"

      Cost of reasoning steps in the completion in USD

    • LLM_COST_INPUT: "llm.cost.prompt_details.input"

      Total cost of input tokens in USD. This represents the cost of tokens that were used as input to the model, which may be different from the prompt cost if there are additional processing steps.

    • LLM_COST_OUTPUT: "llm.cost.completion_details.output"

      Total cost of output tokens in USD. This represents the cost of tokens that were generated as output by the model, which may be different from the completion cost if there are additional processing steps.

    • LLM_COST_PROMPT: "llm.cost.prompt"

      Cost of the prompt tokens in USD

    • LLM_COST_PROMPT_DETAILS_AUDIO: "llm.cost.prompt_details.audio"

      Cost of audio tokens in the prompt in USD

    • LLM_COST_PROMPT_DETAILS_CACHE_INPUT: "llm.cost.prompt_details.cache_input"

      Cost of input tokens in the prompt that were cached in USD

    • LLM_COST_PROMPT_DETAILS_CACHE_READ: "llm.cost.prompt_details.cache_read"

      Cost of prompt tokens read from cache in USD

    • LLM_COST_PROMPT_DETAILS_CACHE_WRITE: "llm.cost.prompt_details.cache_write"

      Cost of prompt tokens written to cache in USD

    • LLM_COST_TOTAL: "llm.cost.total"

      Total cost of the LLM call in USD (prompt + completion)

    • LLM_FUNCTION_CALL: "llm.function_call"

      The JSON representation of a function call of an LLM

    • LLM_INPUT_MESSAGES: "llm.input_messages"

      The messages sent to the LLM for completions Typically seen in OpenAI chat completions

    • LLM_INVOCATION_PARAMETERS: "llm.invocation_parameters"

      The JSON representation of the parameters passed to the LLM

    • LLM_MODEL_NAME: "llm.model_name"

      The name of the LLM model

    • LLM_OUTPUT_MESSAGES: "llm.output_messages"

      The messages received from the LLM for completions Typically seen in OpenAI chat completions

    • LLM_PROMPTS: "llm.prompts"

      The prompts sent to the LLM for completions Typically seen in OpenAI legacy completions

    • LLM_PROVIDER: "llm.provider"

      The provider of the inferences. E.g. the cloud provider

    • LLM_SYSTEM: "llm.system"

      The AI product as identified by the client or server

    • LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"

      Token count for the completion by the llm (in tokens)

    • LLM_TOKEN_COUNT_COMPLETION_DETAILS: "llm.token_count.completion_details"

      Key prefix for additional completion token count details. Each detail should be a separate attribute with this prefix, e.g. llm.token_count.completion_details.reasoning, llm.token_count.completion_details.audio. All values should be in tokens (integer count of tokens).

    • LLM_TOKEN_COUNT_COMPLETION_DETAILS_AUDIO: "llm.token_count.completion_details.audio"

      Token count for audio input generated by the model (in tokens)

    • LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"

      Token count for the reasoning steps in the completion (in tokens)

    • LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"

      Token count for the prompt to the llm (in tokens)

    • LLM_TOKEN_COUNT_PROMPT_DETAILS: "llm.token_count.prompt_details"

      Key prefix for additional prompt token count details. Each detail should be a separate attribute with this prefix, e.g. llm.token_count.prompt_details.reasoning, llm.token_count.prompt_details.audio. All values should be in tokens (integer count of tokens).

    • LLM_TOKEN_COUNT_PROMPT_DETAILS_AUDIO: "llm.token_count.prompt_details.audio"

      Token count for audio input presented in the prompt (in tokens)

    • LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_INPUT: "llm.token_count.prompt_details.cache_input"

      Token count for the input tokens in the prompt that were cached (in tokens)

    • LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"

      Token count for the tokens retrieved from cache (in tokens)

    • LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"

      Token count for the tokens written to cache (in tokens)

    • LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"

      Token count for the entire transaction with the llm (in tokens)

    • LLM_TOOLS: "llm.tools"

      List of tools that are advertised to the LLM to be able to call

    • MESSAGE_CONTENT: "message.content"

      The content of the message sent to the LLM

    • MESSAGE_CONTENT_IMAGE: "message_content.image"

      The image content of the message sent to the LLM

    • MESSAGE_CONTENT_TEXT: "message_content.text"

      The text content of the message sent to the LLM

    • MESSAGE_CONTENT_TYPE: "message_content.type"

      The type of content sent to the LLM

    • MESSAGE_CONTENTS: "message.contents"

      The array of contents for the message sent to the LLM. Each element of the array is an message_content object.

    • MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON: "message.function_call_arguments_json"

      The LLM function call function arguments in a json string

    • MESSAGE_FUNCTION_CALL_NAME: "message.function_call_name"

      The LLM function call function name

    • MESSAGE_NAME: "message.name"

      The name of the message. This is only used for role 'function' where the name of the function is captured in the name field and the parameters are captured in the content.

    • MESSAGE_ROLE: "message.role"

      The role that the LLM assumes the message is from during the LLM invocation

    • MESSAGE_TOOL_CALL_ID: "message.tool_call_id"

      The id of the tool call on a "tool" role message

    • MESSAGE_TOOL_CALLS: "message.tool_calls"

      The tool calls generated by the model, such as function calls.

    • METADATA: "metadata"

      Metadata for a span, used to store user-defined key-value pairs

    • ReadonlyOPENINFERENCE_SPAN_KIND: "openinference.span.kind"
    • OUTPUT_MIME_TYPE: "output.mime_type"
    • OUTPUT_VALUE: "output.value"

      The output of any span

    • PROMPT_ID: "prompt.id"

      A vendor-specific id used to locate the prompt

    • PROMPT_TEMPLATE_TEMPLATE: "llm.prompt_template.template"

      A prompt template

    • PROMPT_TEMPLATE_VARIABLES: "llm.prompt_template.variables"

      The JSON representation of the variables used in the prompt template

    • PROMPT_TEMPLATE_VERSION: "llm.prompt_template.version"

      A prompt template version

    • PROMPT_URL: "prompt.url"

      A vendor-specific URL used to locate the prompt

    • PROMPT_VENDOR: "prompt.vendor"

      The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc.

    • RERANKER_INPUT_DOCUMENTS: "reranker.input_documents"

      The documents used as input to the reranker

    • RERANKER_MODEL_NAME: "reranker.model_name"

      The model name for the reranker

    • RERANKER_OUTPUT_DOCUMENTS: "reranker.output_documents"

      The documents output by the reranker

    • RERANKER_QUERY: "reranker.query"

      The query string for the reranker

    • RERANKER_TOP_K: "reranker.top_k"

      The top k parameter for the reranker

    • RETRIEVAL_DOCUMENTS: "retrieval.documents"

      The retrieval documents list root

    • SESSION_ID: "session.id"

      The session id of a trace. Used to correlate spans in a single session.

    • TAG_TAGS: "tag.tags"

      The tags associated with a span

    • TOOL_CALL_FUNCTION_ARGUMENTS_JSON: "tool_call.function.arguments"

      tool_call.function.argument (JSON string)

    • TOOL_CALL_FUNCTION_NAME: "tool_call.function.name"

      tool_call.function.name

    • TOOL_CALL_ID: "tool_call.id"

      The id of the tool call

    • TOOL_DESCRIPTION: "tool.description"

      The description of a tool

    • TOOL_JSON_SCHEMA: "tool.json_schema"

      The json schema of a tool input, It is RECOMMENDED that this be in the OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools

    • TOOL_NAME: "tool.name"

      The name of a tool

    • TOOL_PARAMETERS: "tool.parameters"

      The parameters of the tool represented as a JSON string

    • USER_ID: "user.id"

      The user id of a trace. Used to correlate spans for a single user.