Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions codex-rs/app-server/src/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ pub async fn supported_models(
.list_models(config)
.await
.into_iter()
.filter(|preset| preset.show_in_picker)
.map(model_from_preset)
.collect()
}
Expand Down
2 changes: 1 addition & 1 deletion codex-rs/app-server/tests/suite/v2/model_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
Expand Down
34 changes: 20 additions & 14 deletions codex-rs/core/src/models_manager/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,14 +149,14 @@ impl ModelsManager {
// if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model
let auth_mode = self.auth_manager.get_auth_mode();
let remote_models = self.remote_models(config).await;
if auth_mode == Some(AuthMode::ChatGPT)
&& self
if auth_mode == Some(AuthMode::ChatGPT) {
let has_auto_balanced = self
.build_available_models(remote_models)
.iter()
.any(|m| m.model == CODEX_AUTO_BALANCED_MODEL)
{
return CODEX_AUTO_BALANCED_MODEL.to_string();
} else if auth_mode == Some(AuthMode::ChatGPT) {
.any(|model| model.model == CODEX_AUTO_BALANCED_MODEL && model.show_in_picker);
if has_auto_balanced {
return CODEX_AUTO_BALANCED_MODEL.to_string();
}
return OPENAI_DEFAULT_CHATGPT_MODEL.to_string();
}
OPENAI_DEFAULT_API_MODEL.to_string()
Expand Down Expand Up @@ -247,10 +247,15 @@ impl ModelsManager {
merged_presets = self.filter_visible_models(merged_presets);

let has_default = merged_presets.iter().any(|preset| preset.is_default);
if let Some(default) = merged_presets.first_mut()
&& !has_default
{
default.is_default = true;
if !has_default {
if let Some(default) = merged_presets
.iter_mut()
.find(|preset| preset.show_in_picker)
{
default.is_default = true;
} else if let Some(default) = merged_presets.first_mut() {
default.is_default = true;
}
}

merged_presets
Expand All @@ -260,7 +265,7 @@ impl ModelsManager {
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
models
.into_iter()
.filter(|model| model.show_in_picker && (chatgpt_mode || model.supported_in_api))
.filter(|model| chatgpt_mode || model.supported_in_api)
.collect()
}

Expand Down Expand Up @@ -654,12 +659,13 @@ mod tests {
let hidden_model = remote_model_with_visibility("hidden", "Hidden", 0, "hide");
let visible_model = remote_model_with_visibility("visible", "Visible", 1, "list");

let mut expected = ModelPreset::from(visible_model.clone());
expected.is_default = true;
let expected_hidden = ModelPreset::from(hidden_model.clone());
let mut expected_visible = ModelPreset::from(visible_model.clone());
expected_visible.is_default = true;

let available = manager.build_available_models(vec![hidden_model, visible_model]);

assert_eq!(available, vec![expected]);
assert_eq!(available, vec![expected_hidden, expected_visible]);
}

#[test]
Expand Down
10 changes: 3 additions & 7 deletions codex-rs/core/src/models_manager/model_presets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: false,
Expand Down Expand Up @@ -170,7 +170,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning for complex problems".to_string(),
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: false,
Expand Down Expand Up @@ -322,11 +322,7 @@ fn gpt_52_codex_upgrade() -> ModelUpgrade {
}

pub(super) fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> {
PRESETS
.iter()
.filter(|preset| preset.show_in_picker)
.cloned()
.collect()
PRESETS.iter().cloned().collect()
}

#[cfg(any(test, feature = "test-support"))]
Expand Down
226 changes: 224 additions & 2 deletions codex-rs/core/tests/suite/list_models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,18 @@ async fn list_models_returns_chatgpt_models() -> Result<()> {
}

fn expected_models_for_api_key() -> Vec<ModelPreset> {
vec![gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2()]
vec![
gpt_5_1_codex_max(),
gpt_5_1_codex_mini(),
gpt_5_2(),
bengalfox(),
boomslang(),
gpt_5_codex(),
gpt_5_codex_mini(),
gpt_5_1_codex(),
gpt_5(),
gpt_5_1(),
]
}

fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
Expand All @@ -53,6 +64,13 @@ fn expected_models_for_chatgpt() -> Vec<ModelPreset> {
gpt_5_1_codex_max,
gpt_5_1_codex_mini(),
gpt_5_2(),
bengalfox(),
boomslang(),
gpt_5_codex(),
gpt_5_codex_mini(),
gpt_5_1_codex(),
gpt_5(),
gpt_5_1(),
]
}

Expand Down Expand Up @@ -168,7 +186,7 @@ fn gpt_5_2() -> ModelPreset {
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning for complex problems",
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
Expand All @@ -178,6 +196,210 @@ fn gpt_5_2() -> ModelPreset {
}
}

fn bengalfox() -> ModelPreset {
ModelPreset {
id: "bengalfox".to_string(),
model: "bengalfox".to_string(),
display_name: "bengalfox".to_string(),
description: "bengalfox".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fast responses with lighter reasoning",
),
effort(
ReasoningEffort::Medium,
"Balances speed and reasoning depth for everyday tasks",
),
effort(
ReasoningEffort::High,
"Greater reasoning depth for complex problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
}
}

fn boomslang() -> ModelPreset {
ModelPreset {
id: "boomslang".to_string(),
model: "boomslang".to_string(),
display_name: "boomslang".to_string(),
description: "boomslang".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
effort(
ReasoningEffort::XHigh,
"Extra high reasoning depth for complex problems",
),
],
is_default: false,
upgrade: None,
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt_5_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex".to_string(),
model: "gpt-5-codex".to_string(),
display_name: "gpt-5-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt_5_codex_mini() -> ModelPreset {
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
model: "gpt-5-codex-mini".to_string(),
display_name: "gpt-5-codex-mini".to_string(),
description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt_5_1_codex() -> ModelPreset {
ModelPreset {
id: "gpt-5.1-codex".to_string(),
model: "gpt-5.1-codex".to_string(),
display_name: "gpt-5.1-codex".to_string(),
description: "Optimized for codex.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Fastest responses with limited reasoning",
),
effort(
ReasoningEffort::Medium,
"Dynamically adjusts reasoning based on the task",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt_5() -> ModelPreset {
ModelPreset {
id: "gpt-5".to_string(),
model: "gpt-5".to_string(),
display_name: "gpt-5".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Minimal,
"Fastest responses with little reasoning",
),
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt_5_1() -> ModelPreset {
ModelPreset {
id: "gpt-5.1".to_string(),
model: "gpt-5.1".to_string(),
display_name: "gpt-5.1".to_string(),
description: "Broad world knowledge with strong general reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
effort(
ReasoningEffort::Low,
"Balances speed with some reasoning; useful for straightforward queries and short explanations",
),
effort(
ReasoningEffort::Medium,
"Provides a solid balance of reasoning depth and latency for general-purpose tasks",
),
effort(
ReasoningEffort::High,
"Maximizes reasoning depth for complex or ambiguous problems",
),
],
is_default: false,
upgrade: Some(gpt52_codex_upgrade()),
show_in_picker: false,
supported_in_api: true,
}
}

fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade {
codex_protocol::openai_models::ModelUpgrade {
id: "gpt-5.2-codex".to_string(),
Expand Down
11 changes: 5 additions & 6 deletions codex-rs/core/tests/suite/remote_models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -381,12 +381,11 @@ async fn remote_models_hide_picker_only_models() -> Result<()> {
assert_eq!(selected, "gpt-5.2-codex");

let available = manager.list_models(&config).await;
assert!(
available
.iter()
.all(|model| model.model != "codex-auto-balanced"),
"hidden models should not appear in the picker list"
);
let hidden = available
.iter()
.find(|model| model.model == "codex-auto-balanced")
.expect("hidden remote model should be listed");
assert!(!hidden.show_in_picker, "hidden models should remain hidden");

Ok(())
}
Expand Down
Loading
Loading