From 689f05783a6eeef3ce323d5a9b6171858a7b2a23 Mon Sep 17 00:00:00 2001 From: Meng Lan Date: Thu, 7 Dec 2023 12:27:17 +0800 Subject: [PATCH 1/2] change max_tokens default value to 512 --- src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml b/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml index d88778e8e0d..0fc68b0acb5 100644 --- a/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml +++ b/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml @@ -32,7 +32,7 @@ promptflow.tools.openai_gpt4v.OpenAI.chat: type: - double max_tokens: - default: "" + default: 512 type: - int stop: From dea9a7284a73fb188ec67f4d88cb6ec7ba15fdaa Mon Sep 17 00:00:00 2001 From: Meng Lan Date: Thu, 7 Dec 2023 13:14:30 +0800 Subject: [PATCH 2/2] change reference doc --- docs/reference/tools-reference/openai-gpt-4v-tool.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/tools-reference/openai-gpt-4v-tool.md b/docs/reference/tools-reference/openai-gpt-4v-tool.md index f5dee3b0d13..49721f42480 100644 --- a/docs/reference/tools-reference/openai-gpt-4v-tool.md +++ b/docs/reference/tools-reference/openai-gpt-4v-tool.md @@ -29,7 +29,7 @@ Setup connections to provisioned resources in prompt flow. | connection | OpenAI | the OpenAI connection to be used in the tool | Yes | | model | string | the language model to use, currently only support gpt-4-vision-preview | Yes | | prompt | string | The text prompt that the language model will use to generate it's response. | Yes | -| max\_tokens | integer | the maximum number of tokens to generate in the response. Default is a low value decided by [OpenAI API](https://platform.openai.com/docs/guides/vision). | No | +| max\_tokens | integer | the maximum number of tokens to generate in the response. Default is 512. | No | | temperature | float | the randomness of the generated text. Default is 1. | No | | stop | list | the stopping sequence for the generated text. Default is null. | No | | top_p | float | the probability of using the top choice from the generated tokens. Default is 1. | No |