Skip to content

Commit 6c00c56

Browse files
committed
feat: Add MaxTokens option for AI model output control
1 parent 6d7585c commit 6c00c56

File tree

22 files changed

+812
-18
lines changed

22 files changed

+812
-18
lines changed
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
### PR [#1747](https://github.com/danielmiessler/Fabric/pull/1747) by [2b3pro](https://github.com/2b3pro): feat: Add MaxTokens option for AI model output control
2+
3+
- Add MaxTokens configuration option allowing users to specify the maximum number of tokens to generate in AI model responses
4+
- Integrate MaxTokens support across multiple AI providers including Anthropic, Gemini, and Ollama with updated CLI flags and example configuration
5+
- Enhance ParseFileChanges function to support both JSON format and markdown format for better compatibility with different AI model outputs
6+
- Support max_completion_tokens for GPT-5 models with conditional logic to map MaxTokens to the appropriate parameter for OpenAI API requests
7+
- Add test case to validate proper parameter mapping for GPT-5 models according to their specific API requirements

internal/cli/example.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@ topp: 0.67
1717
temperature: 0.88
1818
seed: 42
1919

20+
# Maximum number of tokens to generate
21+
maxTokens: 1000
22+
2023
stream: true
2124
raw: false
2225

internal/cli/flags.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ type Flags struct {
103103
Notification bool `long:"notification" yaml:"notification" description:"Send desktop notification when command completes"`
104104
NotificationCommand string `long:"notification-command" yaml:"notificationCommand" description:"Custom command to run for notifications (overrides built-in notifications)"`
105105
Thinking domain.ThinkingLevel `long:"thinking" yaml:"thinking" description:"Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)"`
106+
MaxTokens int `long:"max-tokens" yaml:"maxTokens" description:"Maximum number of tokens to generate (provider-specific limits apply)"`
106107
Debug int `long:"debug" description:"Set debug level (0=off, 1=basic, 2=detailed, 3=trace)" default:"0"`
107108
}
108109

@@ -464,6 +465,7 @@ func (o *Flags) BuildChatOptions() (ret *domain.ChatOptions, err error) {
464465
Raw: o.Raw,
465466
Seed: o.Seed,
466467
Thinking: o.Thinking,
468+
MaxTokens: o.MaxTokens,
467469
ModelContextLength: o.ModelContextLength,
468470
Search: o.Search,
469471
SearchLocation: o.SearchLocation,

internal/cli/help.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ var flagDescriptionMap = map[string]string{
9191
"notification": "send_desktop_notification",
9292
"notification-command": "custom_notification_command",
9393
"thinking": "set_reasoning_thinking_level",
94+
"max-tokens": "maximum_tokens_to_generate",
9495
"debug": "set_debug_level",
9596
}
9697

0 commit comments

Comments
 (0)