-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.yaml
More file actions
38 lines (30 loc) · 1.13 KB
/
config.yaml
File metadata and controls
38 lines (30 loc) · 1.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# Configuration for LLM Mathematics Research
execution:
timeout: 1800 # seconds (30 minutes)
output_limit: 100000 # characters to keep from execution output (effectively unlimited)
compilation:
timeout: 30 # seconds for LaTeX compilation
error_limit: 500 # characters to show from compilation errors
api:
# LLM Provider - Default provider to use
# Options: anthropic, openai, google, xai, moonshot
# Can be overridden via CLI: --provider openai
provider: google
# Optional: Override provider defaults (see provider_defaults.py for full list)
# If not specified, uses sensible defaults for each provider
# Uncomment to customize:
# model: claude-opus-4-20250514
# max_tokens: 32000
# thinking_budget: 16000
# costs:
# input_per_million: 3.0
# output_per_million: 15.0
# Rate limiting
rate_limit_wait: 20 # seconds to wait when rate limited
output:
figure_dpi: 300 # DPI for saved figures
research:
max_iterations: 20 # default maximum iterations for research sessions
modal:
timeout: 1800 # seconds (half an hour) for GPU training tasks
gpu: "L4" # default GPU type (T4, A10G, A100, etc.)