Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
301 changes: 301 additions & 0 deletions docs/config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,301 @@
# Codex configuration template (config.toml)
#
# This file lists all keys Codex reads from config.toml, their default values,
# and concise explanations. Values here mirror the effective defaults compiled
# into the CLI as of this repository. Adjust as needed.
#
# Notes
# - Root keys must appear before tables in TOML.
# - Optional keys that default to “unset” are shown commented out with notes.
# - MCP servers, profiles, and model providers are examples; remove or edit.

################################################################################
# Core Model Selection
################################################################################

# Primary model used by Codex. Default differs by OS; non‑Windows defaults here.
# Linux/macOS default: "gpt-5-codex"; Windows default: "gpt-5".
model = "gpt-5-codex"

# Model used by the /review feature (code reviews). Default: "gpt-5-codex".
review_model = "gpt-5-codex"

# Provider id selected from [model_providers]. Default: "openai".
model_provider = "openai"

# Optional manual model metadata. When unset, Codex auto‑detects from model.
# Uncomment to force values.
# model_context_window = 128000 # tokens; default: auto for model
# model_max_output_tokens = 8192 # tokens; default: auto for model
# model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific

################################################################################
# Reasoning & Verbosity (Responses API capable models)
################################################################################

# Reasoning effort: minimal | low | medium | high (default: medium)
model_reasoning_effort = "medium"

# Reasoning summary: auto | concise | detailed | none (default: auto)
model_reasoning_summary = "auto"

# Text verbosity for GPT‑5 family (Responses API): low | medium | high (default: medium)
model_verbosity = "medium"

# Force-enable reasoning summaries for current model (default: false)
model_supports_reasoning_summaries = false

# Force reasoning summary format: none | experimental (default: none)
model_reasoning_summary_format = "none"

################################################################################
# Approval & Sandbox
################################################################################

# When to ask for command approval:
# - untrusted: only known-safe read-only commands auto-run; others prompt
# - on-failure: auto-run in sandbox; prompt only on failure for escalation
# - on-request: model decides when to ask (default)
# - never: never prompt (risky)
approval_policy = "on-request"

# Filesystem/network sandbox policy for tool calls:
# - read-only (default)
# - workspace-write
# - danger-full-access (no sandbox; extremely risky)
sandbox_mode = "read-only"

# Extra settings used only when sandbox_mode = "workspace-write".
[sandbox_workspace_write]
# Additional writable roots beyond the workspace (cwd). Default: []
writable_roots = []
# Allow outbound network access inside the sandbox. Default: false
network_access = false
# Exclude $TMPDIR from writable roots. Default: false
exclude_tmpdir_env_var = false
# Exclude /tmp from writable roots. Default: false
exclude_slash_tmp = false

################################################################################
# Shell Environment Policy for spawned processes
################################################################################

[shell_environment_policy]
# inherit: all (default) | core | none
inherit = "all"
# Skip default excludes for names containing KEY/TOKEN (case-insensitive). Default: false
ignore_default_excludes = false
# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
exclude = []
# Explicit key/value overrides (always win). Default: {}
set = {}
# Whitelist; if non-empty, keep only matching vars. Default: []
include_only = []
# Experimental: run via user shell profile. Default: false
experimental_use_profile = false

################################################################################
# History & File Opener
################################################################################

[history]
# save-all (default) | none
persistence = "save-all"
# Maximum bytes for history file (currently not enforced). Example: 5242880
# max_bytes = 0

# URI scheme for clickable citations: vscode (default) | vscode-insiders | windsurf | cursor | none
file_opener = "vscode"

################################################################################
# UI, Notifications, and Misc
################################################################################

[tui]
# Desktop notifications from the TUI: boolean or filtered list. Default: false
# Examples: true | ["agent-turn-complete", "approval-requested"]
notifications = false

# Suppress internal reasoning events from output (default: false)
hide_agent_reasoning = false

# Show raw reasoning content when available (default: false)
show_raw_agent_reasoning = false

# Disable burst‑paste detection in the TUI (default: false)
disable_paste_burst = false

# Track Windows onboarding acknowledgement (Windows only). Default: false
windows_wsl_setup_acknowledged = false

# External notifier program (argv array). When unset: disabled.
# Example: notify = ["notify-send", "Codex"]
# notify = [ ]

# Base URL for ChatGPT auth flow (not OpenAI API). Default:
chatgpt_base_url = "https://chatgpt.com/backend-api/"

# Optional: legacy system instructions override (currently ignored; prefer
# `experimental_instructions_file` or AGENTS.md). Keeping here because Codex
# still accepts/deserializes the key. Default: unset.
# instructions = ""

################################################################################
# Project Documentation Controls
################################################################################

# Max bytes from AGENTS.md to embed into first-turn instructions. Default: 32768
project_doc_max_bytes = 32768

# Ordered fallbacks when AGENTS.md is missing at a directory level. Default: []
project_doc_fallback_filenames = []

################################################################################
# Tools (feature toggles)
################################################################################

[tools]
# Enable web search tool (alias: web_search_request). Default: false
web_search = false

# Enable the view_image tool so the agent can attach local images. Default: true
view_image = true

# (Alias accepted) You can also write:
# web_search_request = false

################################################################################
# Experimental toggles (off by default)
################################################################################

# Use experimental exec command tool (streamable shell). Default: false
experimental_use_exec_command_tool = false

# Use experimental unified exec tool. Default: false
experimental_use_unified_exec_tool = false

# Use experimental Rust MCP client (enables OAuth for HTTP MCP). Default: false
experimental_use_rmcp_client = false

# Include apply_patch via freeform editing path (affects default tool set). Default: false
experimental_use_freeform_apply_patch = false

# Override built-in base instructions with a file path. When unset: disabled.
# experimental_instructions_file = "/absolute/or/relative/path/to/instructions.txt"

################################################################################
# MCP (Model Context Protocol) servers
################################################################################

# Preferred store for MCP OAuth credentials: auto (default) | file | keyring
mcp_oauth_credentials_store = "auto"

# Define MCP servers under this table. Leave empty to disable.
[mcp_servers]

# --- Example: STDIO transport ---
# [mcp_servers.docs]
# command = "docs-server" # required
# args = ["--port", "4000"] # optional
# env = { "API_KEY" = "value" } # optional (whitelisted vars are inherited by default)
# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
# tool_timeout_sec = 60.0 # optional; default 60.0 seconds

# --- Example: Streamable HTTP transport ---
# [mcp_servers.github]
# url = "https://github-mcp.example.com/mcp" # required
# bearer_token_env_var = "GITHUB_TOKEN" # optional; header Authorization: Bearer <token>
# startup_timeout_sec = 10.0 # optional
# tool_timeout_sec = 60.0 # optional

################################################################################
# Model Providers (extend/override built-ins)
################################################################################

# Built-ins include:
# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)
# - oss (Chat Completions API; defaults to http://localhost:11434/v1)

[model_providers]

# --- Example: override OpenAI with explicit base URL or headers ---
# [model_providers.openai]
# name = "OpenAI"
# base_url = "https://api.openai.com/v1" # default if unset
# wire_api = "responses" # "responses" | "chat" (default varies)
# # requires_openai_auth = true # built-in OpenAI defaults to true
# # request_max_retries = 4 # default 4; max 100
# # stream_max_retries = 5 # default 5; max 100
# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
# # http_headers = { "X-Example" = "value" }
# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }

# --- Example: Azure (Chat/Responses depending on endpoint) ---
# [model_providers.azure]
# name = "Azure"
# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
# wire_api = "responses" # or "chat" per endpoint
# query_params = { api-version = "2025-04-01-preview" }
# env_key = "AZURE_OPENAI_API_KEY"
# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"

# --- Example: Local OSS (e.g., Ollama-compatible) ---
# [model_providers.ollama]
# name = "Ollama"
# base_url = "http://localhost:11434/v1"
# wire_api = "chat"

################################################################################
# Profiles (named presets)
################################################################################

# Active profile name. When unset, no profile is applied.
# profile = "default"

[profiles]

# [profiles.default]
# model = "gpt-5-codex"
# model_provider = "openai"
# approval_policy = "on-request"
# model_reasoning_effort = "medium"
# model_reasoning_summary = "auto"
# model_verbosity = "medium"
# chatgpt_base_url = "https://chatgpt.com/backend-api/"

################################################################################
# Projects (trust levels)
################################################################################

# Mark specific worktrees as trusted. Only "trusted" is recognized.
[projects]
# [projects."/absolute/path/to/project"]
# trust_level = "trusted"

################################################################################
# OpenTelemetry (OTEL) – disabled by default
################################################################################

[otel]
# Include user prompt text in logs. Default: false
log_user_prompt = false
# Environment label applied to telemetry. Default: "dev"
environment = "dev"
# Exporter: none (default) | otlp-http | otlp-grpc
exporter = "none"

# Example OTLP/HTTP exporter configuration
# [otel]
# exporter = { otlp-http = {
# endpoint = "https://otel.example.com/v1/logs",
# protocol = "binary", # "binary" | "json"
# headers = { "x-otlp-api-key" = "${OTLP_TOKEN}" }
# }}

# Example OTLP/gRPC exporter configuration
# [otel]
# exporter = { otlp-grpc = {
# endpoint = "https://otel.example.com:4317",
# headers = { "x-otlp-meta" = "abc123" }
# }}