File: agent-reference.md | Updated: 11/18/2025
Complete reference for all agent definition fields:
typescript
/**
* Codebuff Agent Type Definitions
*
* This file provides TypeScript type definitions for creating custom Codebuff agents.
* Import these types in your agent files to get full type safety and IntelliSense.
*
* Usage in .agents/your-agent.ts:
* import { AgentDefinition, ToolName, ModelName } from './types/agent-definition'
*
* const definition: AgentDefinition = {
* // ... your agent configuration with full type safety ...
* }
*
* export default definition
*/
// ============================================================================
// Agent Definition and Utility Types
// ============================================================================
export interface AgentDefinition {
/** Unique identifier for this agent. Must contain only lowercase letters, numbers, and hyphens, e.g. 'code-reviewer' */
id: string
/** Version string (if not provided, will default to '0.0.1' and be bumped on each publish) */
version?: string
/** Publisher ID for the agent. Must be provided if you want to publish the agent. */
publisher?: string
/** Human-readable name for the agent */
displayName: string
/** AI model to use for this agent. Can be any model in OpenRouter: https://openrouter.ai/models */
model: ModelName
/**
* https://openrouter.ai/docs/use-cases/reasoning-tokens
* One of `max_tokens` or `effort` is required.
* If `exclude` is true, reasoning will be removed from the response. Default is false.
*/
reasoningOptions?: {
enabled?: boolean
exclude?: boolean
} & (
| {
max\_tokens: number
}
| {
effort: 'high' | 'medium' | 'low'
}
)
/**
* Provider routing options for OpenRouter.
* Controls which providers to use and fallback behavior.
* See https://openrouter.ai/docs/features/provider-routing
*/
providerOptions?: {
/\*\*
\* List of provider slugs to try in order (e.g. \["anthropic", "openai"\])
\*/
order?: string\[\]
/\*\*
\* Whether to allow backup providers when primary is unavailable (default: true)
\*/
allow\_fallbacks?: boolean
/\*\*
\* Only use providers that support all parameters in your request (default: false)
\*/
require\_parameters?: boolean
/\*\*
\* Control whether to use providers that may store data
\*/
data\_collection?: 'allow' | 'deny'
/\*\*
\* List of provider slugs to allow for this request
\*/
only?: string\[\]
/\*\*
\* List of provider slugs to skip for this request
\*/
ignore?: string\[\]
/\*\*
\* List of quantization levels to filter by (e.g. \["int4", "int8"\])
\*/
quantizations?: Array<
| 'int4'
| 'int8'
| 'fp4'
| 'fp6'
| 'fp8'
| 'fp16'
| 'bf16'
| 'fp32'
| 'unknown'
\>
/\*\*
\* Sort providers by price, throughput, or latency
\*/
sort?: 'price' | 'throughput' | 'latency'
/\*\*
\* Maximum pricing you want to pay for this request
\*/
max\_price?: {
prompt?: number | string
completion?: number | string
image?: number | string
audio?: number | string
request?: number | string
}
}
// ============================================================================
// Tools and Subagents
// ============================================================================
/** MCP servers by name. Names cannot contain `/`. */
mcpServers?: Record<string, MCPConfig>
/**
* Tools this agent can use.
*
* By default, all tools are available from any specified MCP server. In
* order to limit the tools from a specific MCP server, add the tool name(s)
* in the format `'mcpServerName/toolName1'`, `'mcpServerName/toolName2'`,
* etc.
*/
toolNames?: (ToolName | (string & {}))[]
/** Other agents this agent can spawn, like 'codebuff/file-picker@0.0.1'.
*
* Use the fully qualified agent id from the agent store, including publisher and version: 'codebuff/file-picker@0.0.1'
* (publisher and version are required!)
*
* Or, use the agent id from a local agent file in your .agents directory: 'file-picker'.
*/
spawnableAgents?: string[]
// ============================================================================
// Input and Output
// ============================================================================
/** The input schema required to spawn the agent. Provide a prompt string and/or a params object or none.
* 80% of the time you want just a prompt string with a description:
* inputSchema: {
* prompt: { type: 'string', description: 'A description of what info would be helpful to the agent' }
* }
*/
inputSchema?: {
prompt?: { type: 'string'; description?: string }
params?: JsonObjectSchema
}
/** How the agent should output a response to its parent (defaults to 'last_message')
*
* last_message: The last message from the agent, typically after using tools.
*
* all_messages: All messages from the agent, including tool calls and results.
*
* structured_output: Make the agent output a JSON object. Can be used with outputSchema or without if you want freeform json output.
*/
outputMode?: 'last_message' | 'all_messages' | 'structured_output'
/** JSON schema for structured output (when outputMode is 'structured_output') */
outputSchema?: JsonObjectSchema
// ============================================================================
// Prompts
// ============================================================================
/** Prompt for when and why to spawn this agent. Include the main purpose and use cases.
*
* This field is key if the agent is intended to be spawned by other agents. */
spawnerPrompt?: string
/** Whether to include conversation history from the parent agent in context.
*
* Defaults to false.
* Use this when the agent needs to know all the previous messages in the conversation.
*/
includeMessageHistory?: boolean
/** Whether to inherit the parent agent's system prompt instead of using this agent's own systemPrompt.
*
* Defaults to false.
* Use this when you want to enable prompt caching by preserving the same system prompt prefix.
* Cannot be used together with the systemPrompt field.
*/
inheritParentSystemPrompt?: boolean
/** Background information for the agent. Fairly optional. Prefer using instructionsPrompt for agent instructions. */
systemPrompt?: string
/** Instructions for the agent.
*
* IMPORTANT: Updating this prompt is the best way to shape the agent's behavior.
* This prompt is inserted after each user input. */
instructionsPrompt?: string
/** Prompt inserted at each agent step.
*
* Powerful for changing the agent's behavior, but usually not necessary for smart models.
* Prefer instructionsPrompt for most instructions. */
stepPrompt?: string
// ============================================================================
// Handle Steps
// ============================================================================
/** Programmatically step the agent forward and run tools.
*
* You can either yield:
* - A tool call object with toolName and input properties.
* - 'STEP' to run agent's model and generate one assistant message.
* - 'STEP_ALL' to run the agent's model until it uses the end_turn tool or stops includes no tool calls in a message.
*
* Or use 'return' to end the turn.
*
* Example 1:
* function* handleSteps({ agentState, prompt, params, logger }) {
* logger.info('Starting file read process')
* const { toolResult } = yield {
* toolName: 'read_files',
* input: { paths: ['file1.txt', 'file2.txt'] }
* }
* yield 'STEP_ALL'
*
* // Optionally do a post-processing step here...
* logger.info('Files read successfully, setting output')
* yield {
* toolName: 'set_output',
* input: {
* output: 'The files were read successfully.',
* },
* }
* }
*
* Example 2:
* handleSteps: function* ({ agentState, prompt, params, logger }) {
* while (true) {
* logger.debug('Spawning thinker agent')
* yield {
* toolName: 'spawn_agents',
* input: {
* agents: [
* {
* agent_type: 'thinker',
* prompt: 'Think deeply about the user request',
* },
* ],
* },
* }
* const { stepsComplete } = yield 'STEP'
* if (stepsComplete) break
* }
* }
*/
handleSteps?: (context: AgentStepContext) => Generator<
ToolCall | 'STEP' | 'STEP\_ALL' | StepText | GenerateN,
void,
{
agentState: AgentState
toolResult: ToolResultOutput\[\] | undefined
stepsComplete: boolean
nResponses?: string\[\]
}
>
}
// ============================================================================
// Supporting Types
// ============================================================================
export interface AgentState {
agentId: string
runId: string
parentId: string | undefined
/** The agent's conversation history: messages from the user and the assistant. */
messageHistory: Message[]
/** The last value set by the set_output tool. This is a plain object or undefined if not set. */
output: Record<string, any> | undefined
}
/**
* Context provided to handleSteps generator function
*/
export interface AgentStepContext {
agentState: AgentState
prompt?: string
params?: Record<string, any>
logger: Logger
}
export type StepText = { type: 'STEP_TEXT'; text: string }
export type GenerateN = { type: 'GENERATE_N'; n: number }
/**
* Tool call object for handleSteps generator
*/
export type ToolCall<T extends ToolName = ToolName> = {
[K in T]: {
toolName: K
input: GetToolParams<K\>
includeToolCall?: boolean
}
}[T]
// ============================================================================
// Available Tools
// ============================================================================
/**
* File operation tools
*/
export type FileEditingTools = 'read_files' | 'write_file' | 'str_replace'
/**
* Code analysis tools
*/
export type CodeAnalysisTools = 'code_search' | 'find_files' | 'read_files'
/**
* Terminal and system tools
*/
export type TerminalTools = 'run_terminal_command' | 'code_search'
/**
* Web and browser tools
*/
export type WebTools = 'web_search' | 'read_docs'
/**
* Agent management tools
*/
export type AgentTools = 'spawn_agents'
/**
* Output and control tools
*/
export type OutputTools = 'set_output'
// ============================================================================
// Available Models (see: https://openrouter.ai/models)
// ============================================================================
/**
* AI models available for agents. Pick from our selection of recommended models or choose any model in OpenRouter.
*
* See available models at https://openrouter.ai/models
*/
export type ModelName =
// Recommended Models
// OpenAI
| 'openai/gpt-5.1'
| 'openai/gpt-5.1-chat'
| 'openai/gpt-5-mini'
| 'openai/gpt-5-nano'
// Anthropic
| 'anthropic/claude-sonnet-4.5'
| 'anthropic/claude-opus-4.1'
// Gemini
| 'google/gemini-2.5-pro'
| 'google/gemini-2.5-flash'
| 'google/gemini-2.5-flash-lite'
| 'google/gemini-2.5-flash-preview-09-2025'
| 'google/gemini-2.5-flash-lite-preview-09-2025'
// X-AI
| 'x-ai/grok-4-07-09'
| 'x-ai/grok-4-fast'
| 'x-ai/grok-code-fast-1'
// Qwen
| 'qwen/qwen3-max'
| 'qwen/qwen3-coder-plus'
| 'qwen/qwen3-coder'
| 'qwen/qwen3-coder:nitro'
| 'qwen/qwen3-coder-flash'
| 'qwen/qwen3-235b-a22b-2507'
| 'qwen/qwen3-235b-a22b-2507:nitro'
| 'qwen/qwen3-235b-a22b-thinking-2507'
| 'qwen/qwen3-235b-a22b-thinking-2507:nitro'
| 'qwen/qwen3-30b-a3b'
| 'qwen/qwen3-30b-a3b:nitro'
// DeepSeek
| 'deepseek/deepseek-chat-v3-0324'
| 'deepseek/deepseek-chat-v3-0324:nitro'
| 'deepseek/deepseek-r1-0528'
| 'deepseek/deepseek-r1-0528:nitro'
// Other open source models
| 'moonshotai/kimi-k2'
| 'moonshotai/kimi-k2:nitro'
| 'z-ai/glm-4.6'
| 'z-ai/glm-4.6:nitro'
| (string & {})
import type { ToolName, GetToolParams } from './tools'
import type {
Message,
ToolResultOutput,
JsonObjectSchema,
MCPConfig,
Logger,
} from './util-types'
export type { ToolName, GetToolParams }
id (string, required)Unique identifier for this agent. Must contain only lowercase letters, numbers, and hyphens.
json
"id": "code-reviewer"
displayName (string, required)Human-readable name for the agent.
json
"displayName": "Code Review Specialist"
spawnerPrompt (string, optional)Prompt for when and why to spawn this agent. Include the main purpose and use cases. This field is key if the agent is intended to be spawned by other agents.
json
"spawnerPrompt": "Spawn this agent for thorough code review, focusing on bugs, security issues, and best practices"
model (string, required)The model to use, which can be any model string from Openrouter .
json
"model": "anthropic/claude-4-sonnet-20250522"
reasoningOptions, object, optional)Controls model reasoning behavior using OpenRouter-style settings.
Fields:
enabled (boolean, default: false) — Turn reasoning mode on for supported models.exclude (boolean, default: false) — If true, omit model-revealed reasoning content from responses (when available), returning only the final answer.effort ("low" | "medium" | "high") — Increase or decrease how much the model "thinks" before answering. Higher effort typically improves quality for hard tasks at the cost of more reasoning tokens.Notes:
Example:
ts
// .agents/thinker.ts
const definition = {
id: 'thinker',
// ... other fields ...
reasoningOptions: {
enabled: true,
exclude: false,
effort: 'high',
},
}
outputMode (string, optional, default: "last_message")How the agent's output is handled.
Options:
"last_message" - Return only the final message (default)"all_messages" - Return all messages from the conversation"structured_output" - Return a structured JSON object (use with outputSchema)json
"outputMode": "last_message"
includeMessageHistory (boolean, optional, default: false)Whether to include conversation history from the parent agent when spawning this agent.
json
"includeMessageHistory": true
outputSchema (object, optional)JSON Schema for structured output (when outputMode is "structured_output"). Defines the expected shape of the JSON object the agent will return.
json
"outputMode": "structured_output",
"outputSchema": {
"type": "object",
"properties": {
"summary": { "type": "string" },
"issues": {
"type": "array",
"items": { "type": "string" }
},
"score": { "type": "number" }
},
"required": ["summary", "issues"]
}
toolNames (array, optional, default: ["end_turn"])List of tools the agent can use.
Available Tools:
add_subgoal - Create subgoals for tracking progressbrowser_logs - Navigate web pages and get console logscode_search - Search for patterns in code filescreate_plan - Generate detailed plans for complex tasksend_turn - End the agent's turnfind_files - Find relevant files in the codebaseread_docs - Read documentation for librariesread_files - Read file contentsrun_file_change_hooks - Run configured file change hooksrun_terminal_command - Execute terminal commandsspawn_agents - Spawn other agentsstr_replace - Replace strings in filesthink_deeply - Perform deep analysisupdate_subgoal - Update existing subgoalsweb_search - Search the webwrite_file - Create or edit filesset_output - Set an output JSON objectjson
"toolNames": ["read_files", "write_file", "code_search", "end_turn"]
spawnableAgents (array, optional, default: [])Other agents this agent can spawn. Use the fully qualified agent ID from the agent store (publisher/name@version) or the agent ID from a local agent file.
⚠️ Important: When referencing built-in agents, you must specify both publisher and version (e.g.,
codebuff/reviewer@0.0.1). Omit publisher/version only for local agents defined in your.agents/directory.
Referencing Agents:
"codebuff/file-picker@0.0.1" (publisher and version required!)"my-custom-agent" (just the agent ID)Available Built-in Agents:
codebuff/base - Main coding assistantcodebuff/reviewer - Code review agentcodebuff/thinker - Deep thinking agentcodebuff/researcher - Research and documentation agentcodebuff/planner - Planning and architecture agentcodebuff/file-picker - File discovery agentjson
"spawnableAgents": ["codebuff/researcher@0.0.1", "my-local-agent"]
All prompt fields support two formats:
json
"systemPrompt": "You are a helpful assistant..."
json
"systemPrompt": {
"path": "./my-system-prompt.md"
}
systemPrompt (string or object, optional)Background information for the agent. Fairly optional - prefer using instructionsPrompt for agent instructions.
instructionsPrompt (string or object, optional)Instructions for the agent. This is the best way to shape the agent's behavior and is inserted after each user input.
stepPrompt (string or object, optional)Prompt inserted at each agent step. Powerful for changing behavior but usually not necessary for smart models.
handleSteps (generator function, optional)🚀 This is what makes Codebuff agents truly powerful! Unlike traditional prompt-based agents, handleSteps lets you write actual code to control agent behavior.
Programmatically control the agent's execution using a TypeScript generator function. This enables:
What You Can Yield:
| Yield Value | What It Does | | ------------ | ------------- | | | --- | --- | --- | --- | --- | --- | | | 'STEP_ALL' | Let agent run until completion | | | End the agent's turn immediately |
Tool Call Pattern:
typescript
const { toolResult, toolError } = yield {
toolName: 'read_files',
input: { paths: ['file.ts'] }
}
// Now you can use toolResult to make decisions!
Example:
typescript
handleSteps: function* ({ agentState, prompt, params }) {
// First, read some files
const { toolResult } = yield {
toolName: 'read\_files',
input: { paths: \['src/index.ts', 'src/config.ts'\] }
}
// Then spawn a thinker agent
yield {
toolName: 'spawn\_agents',
input: {
agents: \[{\
agent_type: 'thinker',
prompt: 'Analyze this code structure'
}]
}
}
// Let the agent take over from here
yield 'STEP_ALL'
}
inputSchema (object, optional)JSON Schema definitions for validating prompt and params when spawning the agent.
json
"inputSchema": {
"prompt": {
"type": "string",
"description": "What documentation to create"
},
"params": {
"type": "object",
"properties": {
"format": {
"type": "string",
"enum": \["markdown", "html"\]
}
Real-World Example:
typescript
// 1. Dynamically find relevant files
const { toolResult: searchResults } = yield {
toolName: 'code\_search',
input: { pattern: params.searchPattern || 'TODO' }
}
// 2. Parse results and decide what to read
const files = JSON.parse(searchResults || '[]')
if (files.length > 0) {
const { toolResult: fileContents } \= yield {
toolName: 'read\_files',
input: { paths: files.slice(0, 10) }
}
// 3. Conditionally spawn different agents based on content
if (fileContents?.includes('security')) {
yield {
toolName: 'spawn\_agents',
input: {
agents: \[{\
agent_type: 'security-reviewer',
prompt: `Review security implications in: ${files.join(', ')}`
}]
}
}
}
}
// 4. Let the LLM handle the rest with context
**Why This Matters:**
- Traditional agents rely solely on prompts and hope the LLM makes the right decisions
- With `handleSteps`, you have **deterministic control** over the agent's workflow
- You can implement complex logic that would be impossible with prompts alone
- Results from one tool directly inform the next action programmatically
### Agent Example
**.agents/documentation-writer.ts**
import { AgentDefinition } from './types/agent-definition'
const definition: AgentDefinition = {
id: "documentation-writer",
version: "1.0.0",
publisher: "mycompany",
displayName: "Documentation Writer",
spawnerPrompt: "Spawn this agent for creating comprehensive documentation, API docs, or user guides",
model: "anthropic/claude-4-sonnet-20250522",
outputMode: "last_message",
includeMessageHistory: true,
toolNames: [
"read_files",
"write_file",
"code_search",
"spawn_agents",
"end_turn"
],
spawnableAgents: ["codebuff/researcher@0.0.1"],
inputSchema: {
prompt: {
type: "string",
description: "What documentation to create or update"
}
},
systemPrompt: {
path: "./prompts/doc-writer-system.md"
},
instructionsPrompt: "Create comprehensive documentation based on the user's request. Research existing code first.",
stepPrompt: "Continue working on the documentation. Use end_turn when complete."
}
export default definition
Creating New Agents Troubleshooting Agent Customization
Toggle menu