diff --git a/codex-rs/app-server/src/models.rs b/codex-rs/app-server/src/models.rs index 21411603547..29a9c996361 100644 --- a/codex-rs/app-server/src/models.rs +++ b/codex-rs/app-server/src/models.rs @@ -15,6 +15,7 @@ pub async fn supported_models( .list_models(config) .await .into_iter() + .filter(|preset| preset.show_in_picker) .map(model_from_preset) .collect() } diff --git a/codex-rs/app-server/tests/suite/v2/model_list.rs b/codex-rs/app-server/tests/suite/v2/model_list.rs index e9fe70dbee5..4cdb306205c 100644 --- a/codex-rs/app-server/tests/suite/v2/model_list.rs +++ b/codex-rs/app-server/tests/suite/v2/model_list.rs @@ -74,7 +74,7 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> { }, ReasoningEffortOption { reasoning_effort: ReasoningEffort::XHigh, - description: "Extra high reasoning for complex problems".to_string(), + description: "Extra high reasoning depth for complex problems".to_string(), }, ], default_reasoning_effort: ReasoningEffort::Medium, diff --git a/codex-rs/core/src/models_manager/manager.rs b/codex-rs/core/src/models_manager/manager.rs index 060f4a5c278..29be88735be 100644 --- a/codex-rs/core/src/models_manager/manager.rs +++ b/codex-rs/core/src/models_manager/manager.rs @@ -149,14 +149,14 @@ impl ModelsManager { // if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model let auth_mode = self.auth_manager.get_auth_mode(); let remote_models = self.remote_models(config).await; - if auth_mode == Some(AuthMode::ChatGPT) - && self + if auth_mode == Some(AuthMode::ChatGPT) { + let has_auto_balanced = self .build_available_models(remote_models) .iter() - .any(|m| m.model == CODEX_AUTO_BALANCED_MODEL) - { - return CODEX_AUTO_BALANCED_MODEL.to_string(); - } else if auth_mode == Some(AuthMode::ChatGPT) { + .any(|model| model.model == CODEX_AUTO_BALANCED_MODEL && model.show_in_picker); + if has_auto_balanced { + return CODEX_AUTO_BALANCED_MODEL.to_string(); + } return OPENAI_DEFAULT_CHATGPT_MODEL.to_string(); } OPENAI_DEFAULT_API_MODEL.to_string() @@ -247,10 +247,15 @@ impl ModelsManager { merged_presets = self.filter_visible_models(merged_presets); let has_default = merged_presets.iter().any(|preset| preset.is_default); - if let Some(default) = merged_presets.first_mut() - && !has_default - { - default.is_default = true; + if !has_default { + if let Some(default) = merged_presets + .iter_mut() + .find(|preset| preset.show_in_picker) + { + default.is_default = true; + } else if let Some(default) = merged_presets.first_mut() { + default.is_default = true; + } } merged_presets @@ -260,7 +265,7 @@ impl ModelsManager { let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT); models .into_iter() - .filter(|model| model.show_in_picker && (chatgpt_mode || model.supported_in_api)) + .filter(|model| chatgpt_mode || model.supported_in_api) .collect() } @@ -654,12 +659,13 @@ mod tests { let hidden_model = remote_model_with_visibility("hidden", "Hidden", 0, "hide"); let visible_model = remote_model_with_visibility("visible", "Visible", 1, "list"); - let mut expected = ModelPreset::from(visible_model.clone()); - expected.is_default = true; + let expected_hidden = ModelPreset::from(hidden_model.clone()); + let mut expected_visible = ModelPreset::from(visible_model.clone()); + expected_visible.is_default = true; let available = manager.build_available_models(vec![hidden_model, visible_model]); - assert_eq!(available, vec![expected]); + assert_eq!(available, vec![expected_hidden, expected_visible]); } #[test] diff --git a/codex-rs/core/src/models_manager/model_presets.rs b/codex-rs/core/src/models_manager/model_presets.rs index 0a7e7857843..080c44433bc 100644 --- a/codex-rs/core/src/models_manager/model_presets.rs +++ b/codex-rs/core/src/models_manager/model_presets.rs @@ -112,7 +112,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, - description: "Extra high reasoning for complex problems".to_string(), + description: "Extra high reasoning depth for complex problems".to_string(), }, ], is_default: false, @@ -170,7 +170,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ReasoningEffortPreset { effort: ReasoningEffort::XHigh, - description: "Extra high reasoning for complex problems".to_string(), + description: "Extra high reasoning depth for complex problems".to_string(), }, ], is_default: false, @@ -322,11 +322,7 @@ fn gpt_52_codex_upgrade() -> ModelUpgrade { } pub(super) fn builtin_model_presets(_auth_mode: Option) -> Vec { - PRESETS - .iter() - .filter(|preset| preset.show_in_picker) - .cloned() - .collect() + PRESETS.iter().cloned().collect() } #[cfg(any(test, feature = "test-support"))] diff --git a/codex-rs/core/tests/suite/list_models.rs b/codex-rs/core/tests/suite/list_models.rs index 565b978faa2..30593a8f603 100644 --- a/codex-rs/core/tests/suite/list_models.rs +++ b/codex-rs/core/tests/suite/list_models.rs @@ -42,7 +42,18 @@ async fn list_models_returns_chatgpt_models() -> Result<()> { } fn expected_models_for_api_key() -> Vec { - vec![gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2()] + vec![ + gpt_5_1_codex_max(), + gpt_5_1_codex_mini(), + gpt_5_2(), + bengalfox(), + boomslang(), + gpt_5_codex(), + gpt_5_codex_mini(), + gpt_5_1_codex(), + gpt_5(), + gpt_5_1(), + ] } fn expected_models_for_chatgpt() -> Vec { @@ -53,6 +64,13 @@ fn expected_models_for_chatgpt() -> Vec { gpt_5_1_codex_max, gpt_5_1_codex_mini(), gpt_5_2(), + bengalfox(), + boomslang(), + gpt_5_codex(), + gpt_5_codex_mini(), + gpt_5_1_codex(), + gpt_5(), + gpt_5_1(), ] } @@ -168,7 +186,7 @@ fn gpt_5_2() -> ModelPreset { ), effort( ReasoningEffort::XHigh, - "Extra high reasoning for complex problems", + "Extra high reasoning depth for complex problems", ), ], is_default: false, @@ -178,6 +196,210 @@ fn gpt_5_2() -> ModelPreset { } } +fn bengalfox() -> ModelPreset { + ModelPreset { + id: "bengalfox".to_string(), + model: "bengalfox".to_string(), + display_name: "bengalfox".to_string(), + description: "bengalfox".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Low, + "Fast responses with lighter reasoning", + ), + effort( + ReasoningEffort::Medium, + "Balances speed and reasoning depth for everyday tasks", + ), + effort( + ReasoningEffort::High, + "Greater reasoning depth for complex problems", + ), + effort( + ReasoningEffort::XHigh, + "Extra high reasoning depth for complex problems", + ), + ], + is_default: false, + upgrade: None, + show_in_picker: false, + supported_in_api: true, + } +} + +fn boomslang() -> ModelPreset { + ModelPreset { + id: "boomslang".to_string(), + model: "boomslang".to_string(), + display_name: "boomslang".to_string(), + description: "boomslang".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Low, + "Balances speed with some reasoning; useful for straightforward queries and short explanations", + ), + effort( + ReasoningEffort::Medium, + "Provides a solid balance of reasoning depth and latency for general-purpose tasks", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + effort( + ReasoningEffort::XHigh, + "Extra high reasoning depth for complex problems", + ), + ], + is_default: false, + upgrade: None, + show_in_picker: false, + supported_in_api: true, + } +} + +fn gpt_5_codex() -> ModelPreset { + ModelPreset { + id: "gpt-5-codex".to_string(), + model: "gpt-5-codex".to_string(), + display_name: "gpt-5-codex".to_string(), + description: "Optimized for codex.".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Low, + "Fastest responses with limited reasoning", + ), + effort( + ReasoningEffort::Medium, + "Dynamically adjusts reasoning based on the task", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + ], + is_default: false, + upgrade: Some(gpt52_codex_upgrade()), + show_in_picker: false, + supported_in_api: true, + } +} + +fn gpt_5_codex_mini() -> ModelPreset { + ModelPreset { + id: "gpt-5-codex-mini".to_string(), + model: "gpt-5-codex-mini".to_string(), + display_name: "gpt-5-codex-mini".to_string(), + description: "Optimized for codex. Cheaper, faster, but less capable.".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Medium, + "Dynamically adjusts reasoning based on the task", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + ], + is_default: false, + upgrade: Some(gpt52_codex_upgrade()), + show_in_picker: false, + supported_in_api: true, + } +} + +fn gpt_5_1_codex() -> ModelPreset { + ModelPreset { + id: "gpt-5.1-codex".to_string(), + model: "gpt-5.1-codex".to_string(), + display_name: "gpt-5.1-codex".to_string(), + description: "Optimized for codex.".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Low, + "Fastest responses with limited reasoning", + ), + effort( + ReasoningEffort::Medium, + "Dynamically adjusts reasoning based on the task", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + ], + is_default: false, + upgrade: Some(gpt52_codex_upgrade()), + show_in_picker: false, + supported_in_api: true, + } +} + +fn gpt_5() -> ModelPreset { + ModelPreset { + id: "gpt-5".to_string(), + model: "gpt-5".to_string(), + display_name: "gpt-5".to_string(), + description: "Broad world knowledge with strong general reasoning.".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Minimal, + "Fastest responses with little reasoning", + ), + effort( + ReasoningEffort::Low, + "Balances speed with some reasoning; useful for straightforward queries and short explanations", + ), + effort( + ReasoningEffort::Medium, + "Provides a solid balance of reasoning depth and latency for general-purpose tasks", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + ], + is_default: false, + upgrade: Some(gpt52_codex_upgrade()), + show_in_picker: false, + supported_in_api: true, + } +} + +fn gpt_5_1() -> ModelPreset { + ModelPreset { + id: "gpt-5.1".to_string(), + model: "gpt-5.1".to_string(), + display_name: "gpt-5.1".to_string(), + description: "Broad world knowledge with strong general reasoning.".to_string(), + default_reasoning_effort: ReasoningEffort::Medium, + supported_reasoning_efforts: vec![ + effort( + ReasoningEffort::Low, + "Balances speed with some reasoning; useful for straightforward queries and short explanations", + ), + effort( + ReasoningEffort::Medium, + "Provides a solid balance of reasoning depth and latency for general-purpose tasks", + ), + effort( + ReasoningEffort::High, + "Maximizes reasoning depth for complex or ambiguous problems", + ), + ], + is_default: false, + upgrade: Some(gpt52_codex_upgrade()), + show_in_picker: false, + supported_in_api: true, + } +} + fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade { codex_protocol::openai_models::ModelUpgrade { id: "gpt-5.2-codex".to_string(), diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index e4db774ddbc..94641f1a4cf 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -381,12 +381,11 @@ async fn remote_models_hide_picker_only_models() -> Result<()> { assert_eq!(selected, "gpt-5.2-codex"); let available = manager.list_models(&config).await; - assert!( - available - .iter() - .all(|model| model.model != "codex-auto-balanced"), - "hidden models should not appear in the picker list" - ); + let hidden = available + .iter() + .find(|model| model.model == "codex-auto-balanced") + .expect("hidden remote model should be listed"); + assert!(!hidden.show_in_picker, "hidden models should remain hidden"); Ok(()) } diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index e0af51e7ebb..d76a5b40392 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -1334,16 +1334,19 @@ mod tests { use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ConversationManager; + use codex_core::config::ConfigBuilder; use codex_core::protocol::AskForApproval; use codex_core::protocol::Event; use codex_core::protocol::EventMsg; use codex_core::protocol::SandboxPolicy; use codex_core::protocol::SessionConfiguredEvent; use codex_protocol::ConversationId; + use insta::assert_snapshot; use ratatui::prelude::Line; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; + use tempfile::tempdir; async fn make_test_app() -> App { let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await; @@ -1427,6 +1430,24 @@ mod tests { codex_core::models_manager::model_presets::all_model_presets().clone() } + fn model_migration_copy_to_plain_text( + copy: &crate::model_migration::ModelMigrationCopy, + ) -> String { + let mut s = String::new(); + for span in ©.heading { + s.push_str(&span.content); + } + s.push('\n'); + s.push('\n'); + for line in ©.content { + for span in &line.spans { + s.push_str(&span.content); + } + s.push('\n'); + } + s + } + #[tokio::test] async fn model_migration_prompt_only_shows_for_deprecated_models() { let seen = BTreeMap::new(); @@ -1508,6 +1529,59 @@ mod tests { assert!(target_preset_for_upgrade(&available, "missing-target").is_none()); } + #[tokio::test] + async fn model_migration_prompt_shows_for_hidden_model() { + let codex_home = tempdir().expect("temp codex home"); + let config = ConfigBuilder::default() + .codex_home(codex_home.path().to_path_buf()) + .build() + .await + .expect("config"); + + let available_models = all_model_presets(); + let current = available_models + .iter() + .find(|preset| preset.model == "gpt-5.1-codex") + .cloned() + .expect("gpt-5.1-codex preset present"); + assert!( + !current.show_in_picker, + "expected gpt-5.1-codex to be hidden from picker for this test" + ); + + let upgrade = current.upgrade.as_ref().expect("upgrade configured"); + assert!( + should_show_model_migration_prompt( + ¤t.model, + &upgrade.id, + &config.notices.model_migrations, + &available_models, + ), + "expected migration prompt to be eligible for hidden model" + ); + + let target = target_preset_for_upgrade(&available_models, &upgrade.id) + .expect("upgrade target present"); + let target_description = + (!target.description.is_empty()).then(|| target.description.clone()); + let can_opt_out = true; + let copy = migration_copy_for_models( + ¤t.model, + &upgrade.id, + upgrade.model_link.clone(), + upgrade.upgrade_copy.clone(), + target.display_name.clone(), + target_description, + can_opt_out, + ); + + // Snapshot the copy we would show; rendering is covered by model_migration snapshots. + assert_snapshot!( + "model_migration_prompt_shows_for_hidden_model", + model_migration_copy_to_plain_text(©) + ); + } + #[tokio::test] async fn update_reasoning_effort_updates_config() { let mut app = make_test_app().await; diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 152724261ac..3535a1d0551 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2310,7 +2310,7 @@ impl ChatWidget { let models = self.models_manager.try_list_models(&self.config).ok()?; models .iter() - .find(|preset| preset.model == NUDGE_MODEL_SLUG) + .find(|preset| preset.show_in_picker && preset.model == NUDGE_MODEL_SLUG) .cloned() } @@ -2426,6 +2426,14 @@ impl ChatWidget { return; } }; + self.open_model_popup_with_presets(presets); + } + + pub(crate) fn open_model_popup_with_presets(&mut self, presets: Vec) { + let presets: Vec = presets + .into_iter() + .filter(|preset| preset.show_in_picker) + .collect(); let current_label = presets .iter() diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_picker_filters_hidden_models.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_picker_filters_hidden_models.snap new file mode 100644 index 00000000000..56dff7b5f0c --- /dev/null +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_picker_filters_hidden_models.snap @@ -0,0 +1,11 @@ +--- +source: tui/src/chatwidget/tests.rs +assertion_line: 1989 +expression: popup +--- + Select Model and Effort + Access legacy models by running codex -m or in your config.toml + +› 1. test-visible-model (current) test-visible-model description + + Press enter to select reasoning effort, or esc to dismiss. diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 52cfcb8db02..f7af95b32fa 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -11,6 +11,7 @@ use codex_core::config::Config; use codex_core::config::ConfigBuilder; use codex_core::config::Constrained; use codex_core::config::ConstraintError; +use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::AgentMessageDeltaEvent; use codex_core::protocol::AgentMessageEvent; @@ -1961,6 +1962,41 @@ async fn model_selection_popup_snapshot() { assert_snapshot!("model_selection_popup", popup); } +#[tokio::test] +async fn model_picker_hides_show_in_picker_false_models_from_cache() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("test-visible-model")).await; + let preset = |slug: &str, show_in_picker: bool| ModelPreset { + id: slug.to_string(), + model: slug.to_string(), + display_name: slug.to_string(), + description: format!("{slug} description"), + default_reasoning_effort: ReasoningEffortConfig::Medium, + supported_reasoning_efforts: vec![ReasoningEffortPreset { + effort: ReasoningEffortConfig::Medium, + description: "medium".to_string(), + }], + is_default: false, + upgrade: None, + show_in_picker, + supported_in_api: true, + }; + + chat.open_model_popup_with_presets(vec![ + preset("test-visible-model", true), + preset("test-hidden-model", false), + ]); + let popup = render_bottom_popup(&chat, 80); + assert_snapshot!("model_picker_filters_hidden_models", popup); + assert!( + popup.contains("test-visible-model"), + "expected visible model to appear in picker:\n{popup}" + ); + assert!( + !popup.contains("test-hidden-model"), + "expected hidden model to be excluded from picker:\n{popup}" + ); +} + #[tokio::test] async fn approvals_selection_popup_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; diff --git a/codex-rs/tui/src/snapshots/codex_tui__app__tests__model_migration_prompt_shows_for_hidden_model.snap b/codex-rs/tui/src/snapshots/codex_tui__app__tests__model_migration_prompt_shows_for_hidden_model.snap new file mode 100644 index 00000000000..9016aebea84 --- /dev/null +++ b/codex-rs/tui/src/snapshots/codex_tui__app__tests__model_migration_prompt_shows_for_hidden_model.snap @@ -0,0 +1,10 @@ +--- +source: tui/src/app.rs +assertion_line: 1579 +expression: model_migration_copy_to_plain_text(©) +--- +Codex just got an upgrade. Introducing gpt-5.2-codex. + +Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work. Learn more about gpt-5.2-codex at https://openai.com/index/introducing-gpt-5-2-codex + +You can continue using gpt-5.1-codex if you prefer. diff --git a/codex-rs/tui2/src/app.rs b/codex-rs/tui2/src/app.rs index 1f7cad57746..49bb005a7a0 100644 --- a/codex-rs/tui2/src/app.rs +++ b/codex-rs/tui2/src/app.rs @@ -2088,17 +2088,20 @@ mod tests { use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ConversationManager; + use codex_core::config::ConfigBuilder; use codex_core::protocol::AskForApproval; use codex_core::protocol::Event; use codex_core::protocol::EventMsg; use codex_core::protocol::SandboxPolicy; use codex_core::protocol::SessionConfiguredEvent; use codex_protocol::ConversationId; + use insta::assert_snapshot; use pretty_assertions::assert_eq; use ratatui::prelude::Line; use std::path::PathBuf; use std::sync::Arc; use std::sync::atomic::AtomicBool; + use tempfile::tempdir; async fn make_test_app() -> App { let (chat_widget, app_event_tx, _rx, _op_rx) = make_chatwidget_manual_with_sender().await; @@ -2208,6 +2211,24 @@ mod tests { codex_core::models_manager::model_presets::all_model_presets().clone() } + fn model_migration_copy_to_plain_text( + copy: &crate::model_migration::ModelMigrationCopy, + ) -> String { + let mut s = String::new(); + for span in ©.heading { + s.push_str(&span.content); + } + s.push('\n'); + s.push('\n'); + for line in ©.content { + for span in &line.spans { + s.push_str(&span.content); + } + s.push('\n'); + } + s + } + #[tokio::test] async fn model_migration_prompt_only_shows_for_deprecated_models() { let seen = BTreeMap::new(); @@ -2243,6 +2264,59 @@ mod tests { )); } + #[tokio::test] + async fn model_migration_prompt_shows_for_hidden_model() { + let codex_home = tempdir().expect("temp codex home"); + let config = ConfigBuilder::default() + .codex_home(codex_home.path().to_path_buf()) + .build() + .await + .expect("config"); + + let available_models = all_model_presets(); + let current = available_models + .iter() + .find(|preset| preset.model == "gpt-5.1-codex") + .cloned() + .expect("gpt-5.1-codex preset present"); + assert!( + !current.show_in_picker, + "expected gpt-5.1-codex to be hidden from picker for this test" + ); + + let upgrade = current.upgrade.as_ref().expect("upgrade configured"); + assert!( + should_show_model_migration_prompt( + ¤t.model, + &upgrade.id, + &config.notices.model_migrations, + &available_models, + ), + "expected migration prompt to be eligible for hidden model" + ); + + let target = available_models + .iter() + .find(|preset| preset.model == upgrade.id) + .cloned() + .expect("upgrade target present"); + let target_description = + (!target.description.is_empty()).then(|| target.description.clone()); + let can_opt_out = true; + let copy = migration_copy_for_models( + ¤t.model, + &upgrade.id, + target.display_name, + target_description, + can_opt_out, + ); + + assert_snapshot!( + "model_migration_prompt_shows_for_hidden_model", + model_migration_copy_to_plain_text(©) + ); + } + #[tokio::test] async fn transcript_selection_copy_includes_offscreen_lines() { let mut app = make_test_app().await; diff --git a/codex-rs/tui2/src/chatwidget.rs b/codex-rs/tui2/src/chatwidget.rs index 7700c534249..200d66cf3e3 100644 --- a/codex-rs/tui2/src/chatwidget.rs +++ b/codex-rs/tui2/src/chatwidget.rs @@ -2106,7 +2106,7 @@ impl ChatWidget { let models = self.models_manager.try_list_models(&self.config).ok()?; models .iter() - .find(|preset| preset.model == NUDGE_MODEL_SLUG) + .find(|preset| preset.show_in_picker && preset.model == NUDGE_MODEL_SLUG) .cloned() } @@ -2222,6 +2222,14 @@ impl ChatWidget { return; } }; + self.open_model_popup_with_presets(presets); + } + + pub(crate) fn open_model_popup_with_presets(&mut self, presets: Vec) { + let presets: Vec = presets + .into_iter() + .filter(|preset| preset.show_in_picker) + .collect(); let current_label = presets .iter() diff --git a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_picker_filters_hidden_models.snap b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_picker_filters_hidden_models.snap new file mode 100644 index 00000000000..a03d434905f --- /dev/null +++ b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_picker_filters_hidden_models.snap @@ -0,0 +1,11 @@ +--- +source: tui2/src/chatwidget/tests.rs +assertion_line: 1758 +expression: popup +--- + Select Model and Effort + Access legacy models by running codex -m or in your config.toml + +› 1. test-visible-model (current) test-visible-model description + + Press enter to select reasoning effort, or esc to dismiss. diff --git a/codex-rs/tui2/src/chatwidget/tests.rs b/codex-rs/tui2/src/chatwidget/tests.rs index df41f0dc696..cbece2dbaec 100644 --- a/codex-rs/tui2/src/chatwidget/tests.rs +++ b/codex-rs/tui2/src/chatwidget/tests.rs @@ -1731,6 +1731,41 @@ async fn model_selection_popup_snapshot() { assert_snapshot!("model_selection_popup", popup); } +#[tokio::test] +async fn model_picker_hides_show_in_picker_false_models_from_cache() { + let (mut chat, _rx, _op_rx) = make_chatwidget_manual(Some("test-visible-model")).await; + let preset = |slug: &str, show_in_picker: bool| ModelPreset { + id: slug.to_string(), + model: slug.to_string(), + display_name: slug.to_string(), + description: format!("{slug} description"), + default_reasoning_effort: ReasoningEffortConfig::Medium, + supported_reasoning_efforts: vec![ReasoningEffortPreset { + effort: ReasoningEffortConfig::Medium, + description: "medium".to_string(), + }], + is_default: false, + upgrade: None, + show_in_picker, + supported_in_api: true, + }; + + chat.open_model_popup_with_presets(vec![ + preset("test-visible-model", true), + preset("test-hidden-model", false), + ]); + let popup = render_bottom_popup(&chat, 80); + assert_snapshot!("model_picker_filters_hidden_models", popup); + assert!( + popup.contains("test-visible-model"), + "expected visible model to appear in picker:\n{popup}" + ); + assert!( + !popup.contains("test-hidden-model"), + "expected hidden model to be excluded from picker:\n{popup}" + ); +} + #[tokio::test] async fn approvals_selection_popup_snapshot() { let (mut chat, _rx, _op_rx) = make_chatwidget_manual(None).await; diff --git a/codex-rs/tui2/src/snapshots/codex_tui2__app__tests__model_migration_prompt_shows_for_hidden_model.snap b/codex-rs/tui2/src/snapshots/codex_tui2__app__tests__model_migration_prompt_shows_for_hidden_model.snap new file mode 100644 index 00000000000..acfdedc1007 --- /dev/null +++ b/codex-rs/tui2/src/snapshots/codex_tui2__app__tests__model_migration_prompt_shows_for_hidden_model.snap @@ -0,0 +1,12 @@ +--- +source: tui2/src/app.rs +assertion_line: 2314 +expression: model_migration_copy_to_plain_text(©) +--- +Try gpt-5.2-codex + +We recommend switching from gpt-5.1-codex to gpt-5.2-codex. + +Latest frontier agentic coding model. + +You can continue using gpt-5.1-codex if you prefer.