diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb index 9e5196748e3..8def0f2504b 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk.ipynb @@ -905,7 +905,7 @@ " # Basic configuration\n", " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE, # System instructions for the target model. String.\n", " prompt_template=PROMPT_TEMPLATE, # Template for prompts, String.\n", - " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-2.0-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", " optimization_mode=\"instruction\", # Optimization mode. String. Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", " eval_metrics_types=[\n", " \"question_answering_correctness\",\n", diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb index f33a7a08226..52d1225e242 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_custom_metric.ipynb @@ -1146,7 +1146,7 @@ " # Basic configuration\n", " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE, # System instructions for the target model. String.\n", " prompt_template=PROMPT_TEMPLATE, # Template for prompts, String.\n", - " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-2.0-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", " optimization_mode=\"instruction\", # Optimization mode. String. Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", " custom_metric_name=\"custom_engagement_personalization_score\", # Metric name, as defined by the key that corresponds in the dictionary returned from Cloud function. String.\n", " custom_metric_cloud_function_name=\"custom_engagement_personalization_metric\", # Cloud Run function name you previously deployed. String.\n", diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_long_prompt_optimization.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_long_prompt_optimization.ipynb index 8b1c1b53e64..f8acd952f77 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_long_prompt_optimization.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_long_prompt_optimization.ipynb @@ -811,7 +811,7 @@ " # Basic configuration\n", " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE, # System instructions for the target model. String.\n", " prompt_template=PROMPT_TEMPLATE, # Template for prompts, String.\n", - " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-2.0-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", " optimization_mode=\"instruction\", # Optimization mode. String. Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", " eval_metrics_types=[\n", " \"question_answering_correctness\",\n", diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_tool_calling.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_tool_calling.ipynb index fe66ef6e262..4793ac65071 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_tool_calling.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_sdk_tool_calling.ipynb @@ -1123,7 +1123,7 @@ " # Basic configuration\n", " system_instruction=SYSTEM_INSTRUCTION_TEMPLATE, # System instructions for the target model. String.\n", " prompt_template=PROMPT_TEMPLATE, # Template for prompts, String.\n", - " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", + " target_model=\"gemini-1.5-flash-001\", # Target model for optimization. String. Supported models: \"gemini-2.0-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-pro-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.0-ultra-001\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"\n", " optimization_mode=\"instruction\", # Optimization mode. String. Supported modes: \"instruction\", \"demonstration\", \"instruction_and_demo\"\n", " tools=vapo_tools,\n", " tool_config=vapo_tool_config,\n", diff --git a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb index 6d5bd9fab9f..3e96e13d3b7 100644 --- a/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb +++ b/gemini/prompts/prompt_optimizer/vertex_ai_prompt_optimizer_ui.ipynb @@ -247,7 +247,7 @@ "outputs": [], "source": [ "SOURCE_MODEL = \"\" # @param [\"\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-001\", \"gemini-1.5-pro-002\", \"gemini-1.0-ultra-001\", \"gemini-experimental\", \"gemini-flash-experimental\", \"gemini-pro-experimental\", \"text-bison@001\", \"text-bison@002\", \"text-bison32k@002\", \"text-unicorn@001\"]\n", - "TARGET_MODEL = \"gemini-1.5-flash-001\" # @param [\"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-001\", \"gemini-1.5-pro-002\", \"gemini-1.0-ultra-001\", \"gemini-experimental\", \"gemini-flash-experimental\", \"gemini-pro-experimental\"]\n", + "TARGET_MODEL = \"gemini-1.5-flash-001\" # @param [\"gemini-2.0-flash-001\", \"gemini-1.0-pro-001\", \"gemini-1.0-pro-002\", \"gemini-1.5-flash-001\", \"gemini-1.5-flash-002\", \"gemini-1.5-pro-001\", \"gemini-1.5-pro-002\", \"gemini-1.0-ultra-001\", \"gemini-experimental\", \"gemini-flash-experimental\", \"gemini-pro-experimental\"]\n", "OPTIMIZATION_MODE = \"instruction_and_demo\" # @param [\"instruction\", \"demonstration\", \"instruction_and_demo\"]\n", "EVAL_METRIC = \"question_answering_correctness\" # @param [\"bleu\", \"coherence\", \"comet\", \"exact_match\", \"fluency\", \"groundedness\", \"metricx\", \"text_quality\", \"verbosity\", \"rouge_1\", \"rouge_2\", \"rouge_l\", \"rouge_l_sum\", \"safety\", \"question_answering_correctness\", \"question_answering_quality\", \"summarization_quality\", \"tool_name_match\", \"tool_parameter_key_match\", \"tool_parameter_kv_match\", \"tool_call_valid\"] {type:\"string\"}\n", "TRANSLATION_SOURCE_FIELD_NAME = \"\" # @param {type:\"string\"}"