Returns Promise<
| {
description?: string
| null;
id: string;
invocation_parameters:
| {
anthropic: {
extra_body?: { [key: string]: unknown };
max_tokens: number;
output_config?: {
effort?: "low" | "medium" | "high" | "xhigh" | "max";
};
stop_sequences?: string[];
temperature?: number;
thinking?: | { type: "disabled" }
| {
budget_tokens: number;
display?: "summarized" | "omitted";
type: "enabled";
}
| { display?: "summarized"
| "omitted"; type: "adaptive" };
top_p?: number;
};
type: "anthropic";
}
| {
aws: {
max_tokens?: number;
stop_sequences?: string[];
temperature?: number;
top_p?: number;
};
type: "aws";
}
| {
azure_openai: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "azure_openai";
}
| {
cerebras: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "cerebras";
}
| {
deepseek: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "deepseek";
}
| {
fireworks: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "fireworks";
}
| {
google: {
frequency_penalty?: number;
max_output_tokens?: number;
presence_penalty?: number;
stop_sequences?: string[];
temperature?: number;
thinking_config?: {
include_thoughts?: boolean;
thinking_budget?: number;
thinking_level?: "low"
| "medium"
| "high"
| "minimal";
};
top_k?: number;
top_p?: number;
};
type: "google";
}
| {
groq: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "groq";
}
| {
moonshot: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "moonshot";
}
| {
ollama: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "ollama";
}
| {
openai: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "openai";
}
| {
perplexity: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "perplexity";
}
| {
together: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
type: "together";
}
| {
type: "xai";
xai: {
extra_body?: { [key: string]: unknown };
frequency_penalty?: number;
max_completion_tokens?: number;
max_tokens?: number;
presence_penalty?: number;
reasoning_effort?:
| "low"
| "medium"
| "high"
| "xhigh"
| "none"
| "minimal";
seed?: number;
stop?: string[];
temperature?: number;
top_p?: number;
};
};
model_name: string;
model_provider: | "OPENAI"
| "AZURE_OPENAI"
| "ANTHROPIC"
| "GOOGLE"
| "DEEPSEEK"
| "XAI"
| "OLLAMA"
| "AWS"
| "CEREBRAS"
| "FIREWORKS"
| "GROQ"
| "MOONSHOT"
| "PERPLEXITY"
| "TOGETHER";
response_format?: | {
json_schema: {
description?: string;
name: string;
schema?: { [key: string]: unknown };
strict?: boolean;
};
type: "json_schema";
}
| null;
template: | {
messages: {
content: | string
| (
| { text: ...; type: ... }
| { tool_call: ...; tool_call_id: ...; type: ... }
| { tool_call_id: ...; tool_result: ...; type: ... }
)[];
role:
| "user"
| "assistant"
| "model"
| "ai"
| "tool"
| "system"
| "developer";
}[];
type: "chat";
}
| { template: string; type: "string" };
template_format: "NONE" | "MUSTACHE" | "F_STRING";
template_type: "STR" | "CHAT";
tools?:
| {
disable_parallel_tool_calls?: boolean;
tool_choice?: | { type: "none" }
| { type: "one_or_more" }
| { function_name: string; type: "specific_function" }
| { type: "zero_or_more" };
tools: {
function: {
description?: string;
name: string;
parameters?: { [key: string]: unknown };
strict?: boolean;
};
type: "function";
}[];
type: "tools";
}
| null;
}
| null,
>
The nearest prompt version that matches the selector, or null if it does not exist.
Get a prompt from the Phoenix API.
if the input is a prompt id, fetch the latest prompt version from the client. if the input is a prompt version id, fetch that prompt version. if the input is a prompt tag and name, fetch the prompt version that has that tag and name. if the input is a prompt name, fetch the latest prompt version from the client.