diff --git a/openapi.yaml b/openapi.yaml index d35693f3..14335596 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -302,7 +302,7 @@ paths: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"?"},"finish_reason":null}]} {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"finish_reason":"stop"}]} - - title: Function calling + - title: Functions request: curl: | curl https://api.openai.com/v1/chat/completions \ @@ -316,55 +316,61 @@ paths: "content": "What is the weather like in Boston?" } ], - "functions": [ + "tools": [ { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA" + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"] - } - }, - "required": ["location"] + "required": ["location"] + } } } ], - "function_call": "auto" + "tool_choice": "auto" }' python: | from openai import OpenAI client = OpenAI() - functions = [ + tools = [ { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { "type": "object", "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], - }, + }, + } } ] messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] completion = client.chat.completions.create( model="VAR_model_id", messages=messages, - functions=functions, - function_call="auto" + tools=tools, + tool_choice="auto" ) print(completion) @@ -375,29 +381,32 @@ paths: async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; - const functions = [ + const tools = [ { + "type": "function", + "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], }, + } } ]; const response = await openai.chat.completions.create({ - model: "gpt-3.5-turbo", - messages: messages, - functions: functions, - function_call: "auto", // auto is default, but we'll be explicit + model: "gpt-3.5-turbo", + messages: messages, + tools: tools, + tool_choice: "auto", }); console.log(response); @@ -406,30 +415,37 @@ paths: main(); response: &chat_completion_function_example | { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-3.5-turbo-0613", "choices": [ { - "finish_reason": "function_call", "index": 0, "message": { + "role": "assistant", "content": null, - "function_call": { - "arguments": "{\n \"location\": \"Boston, MA\"\n}", - "name": "get_current_weather" - }, - "role": "assistant" - } + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "finish_reason": "tool_calls", } ], - "created": 1694028367, - "model": "gpt-3.5-turbo-0613", - "system_fingerprint": "fp_44709d6fcb", - "object": "chat.completion", "usage": { - "completion_tokens": 18, "prompt_tokens": 82, - "total_tokens": 100 + "completion_tokens": 17, + "total_tokens": 99 } } + /completions: post: operationId: createCompletion @@ -1103,15 +1119,20 @@ paths: file=audio_file ) node: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const resp = await openai.createTranslation( - fs.createReadStream("audio.mp3"), - "whisper-1" - ); + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); response: | { "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" @@ -1191,9 +1212,9 @@ paths: tags: - Files summary: | - Upload a file that can be used across various endpoints/features. The size of all the files uploaded by one organization can be up to 100 GB. + Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. - The size of individual files for can be a maximum of 512MB. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. + The size of individual files can be a maximum of 512 MB. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: @@ -1245,7 +1266,7 @@ paths: main(); response: | { - "id": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "id": "file-abc123", "object": "file", "bytes": 120000, "created_at": 1677610602, @@ -1285,7 +1306,7 @@ paths: from openai import OpenAI client = OpenAI() - client.files.delete("file-oaG6vwLtV3v3mWpvxexWDKxq") + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; @@ -1329,20 +1350,20 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/files/file-BK7bzQj3FfZFXr7DbL6xJwfo \ + curl https://api.openai.com/v1/files/file-abc123 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | from openai import OpenAI client = OpenAI() - client.files.retrieve("file-BK7bzQj3FfZFXr7DbL6xJwfo") + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const file = await openai.files.retrieve("file-BK7bzQj3FfZFXr7DbL6xJwfo"); + const file = await openai.files.retrieve("file-abc123"); console.log(file); } @@ -1350,7 +1371,7 @@ paths: main(); response: | { - "id": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "id": "file-abc123", "object": "file", "bytes": 120000, "created_at": 1677610602, @@ -1383,20 +1404,20 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/files/file-BK7bzQj3FfZFXr7DbL6xJwfo/content \ + curl https://api.openai.com/v1/files/file-abc123/content \ -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl python: | from openai import OpenAI client = OpenAI() - content = client.files.retrieve_content("file-BK7bzQj3FfZFXr7DbL6xJwfo") + content = client.files.retrieve_content("file-abc123") node.js: | import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const file = await openai.files.retrieveContent("file-BK7bzQj3FfZFXr7DbL6xJwfo"); + const file = await openai.files.retrieveContent("file-abc123"); console.log(file); } @@ -1431,7 +1452,7 @@ paths: name: Create fine-tuning job returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. examples: - - title: No hyperparameters + - title: Default request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ @@ -1476,7 +1497,7 @@ paths: "validation_file": null, "training_file": "file-abc123", } - - title: Hyperparameters + - title: Epochs request: curl: | curl https://api.openai.com/v1/fine_tuning/jobs \ @@ -1912,7 +1933,14 @@ paths: "training_file": "file-abc123" }' python: | - # deprecated + from openai import OpenAI + client = OpenAI() + + fine_tune = client.fine_tunes.create( + training_file="file-abc123", + model="davinci" + } + print(fine_tune) node.js: | import OpenAI from "openai"; @@ -1987,7 +2015,11 @@ paths: curl https://api.openai.com/v1/fine-tunes \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - # deprecated + from openai import OpenAI + client = OpenAI() + + models = client.fine_tunes.list() + print(models) node.js: |- import OpenAI from "openai"; @@ -2056,17 +2088,21 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + curl https://api.openai.com/v1/fine-tunes/ft-abc123 \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - # deprecated + from openai import OpenAI + client = OpenAI() + + fine_tune = client.fine_tunes.retrieve("ft-abc123") + print(fine_tune) node.js: |- import OpenAI from "openai"; const openai = new OpenAI(); async function main() { - const fineTune = await openai.fineTunes.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + const fineTune = await openai.fineTunes.retrieve("ft-abc123"); console.log(fineTune); } @@ -2074,7 +2110,7 @@ paths: main(); response: &fine_tune_example | { - "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "id": "ft-abc123", "object": "fine-tune", "model": "curie", "created_at": 1614807352, @@ -2175,7 +2211,11 @@ paths: curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - # deprecated + from openai import OpenAI + client = OpenAI() + + fine_tune = client.fine_tunes.cancel("ft-abc123") + print(fine_tune) node.js: |- import OpenAI from "openai"; @@ -2260,7 +2300,11 @@ paths: curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ -H "Authorization: Bearer $OPENAI_API_KEY" python: | - # deprecated + from openai import OpenAI + client = OpenAI() + + fine_tune = client.fine_tunes.list_events("ft-abc123") + print(fine_tune) node.js: |- import OpenAI from "openai"; @@ -2563,7 +2607,6 @@ paths: ] } - # Assistants /assistants: get: operationId: listAssistants @@ -2718,7 +2761,7 @@ paths: -H "OpenAI-Beta: assistants=v1" \ -d '{ "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", - "name": "Math Tutor" + "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], "model": "gpt-4" }' @@ -2906,7 +2949,7 @@ paths: post: operationId: modifyAssistant tags: - - Assistant + - Assistants summary: Modifies an assistant. parameters: - in: path @@ -3039,7 +3082,7 @@ paths: from openai import OpenAI client = OpenAI() - response = client.beta.assistants.delete("asst_QLoItBbqwyAJEzlTy4y9kOMM") + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; @@ -3047,7 +3090,7 @@ paths: const openai = new OpenAI(); async function main() { - const response = await openai.beta.assistants.del("asst_QLoItBbqwyAJEzlTy4y9kOMM"); + const response = await openai.beta.assistants.del("asst_abc123"); console.log(response); } @@ -3436,7 +3479,7 @@ paths: from openai import OpenAI client = OpenAI() - thread_messages = client.beta.threads.messages.list("thread_1OWaSqVIxJdy3KYnJLbXEWhy") + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; @@ -3445,7 +3488,7 @@ paths: async function main() { const threadMessages = await openai.beta.threads.messages.list( - "thread_1OWaSqVIxJdy3KYnJLbXEWhy" + "thread_abc123" ); console.log(threadMessages.data); @@ -3801,11 +3844,11 @@ paths: request: curl: | curl https://api.openai.com/v1/threads/runs \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ - "assistant_id": "asst_IgmpQTah3ZfPHCVZjTqAY8Kv", + "assistant_id": "asst_abc123", "thread": { "messages": [ {"role": "user", "content": "Explain deep learning to a 5 year old."} @@ -3817,7 +3860,7 @@ paths: client = OpenAI() run = client.beta.threads.create_and_run( - assistant_id="asst_IgmpQTah3ZfPHCVZjTqAY8Kv", + assistant_id="asst_abc123", thread={ "messages": [ {"role": "user", "content": "Explain deep learning to a 5 year old."} @@ -3831,7 +3874,7 @@ paths: async function main() { const run = await openai.beta.threads.createAndRun({ - assistant_id: "asst_IgmpQTah3ZfPHCVZjTqAY8Kv", + assistant_id: "asst_abc123", thread: { messages: [ { role: "user", content: "Explain deep learning to a 5 year old." }, @@ -3845,11 +3888,11 @@ paths: main(); response: | { - "id": "run_3Qudf05GGhCleEg9ggwfJQih", + "id": "run_abc123", "object": "thread.run", "created_at": 1699076792, - "assistant_id": "asst_IgmpQTah3ZfPHCVZjTqAY8Kv", - "thread_id": "thread_Ec3eKZcWI00WDZRC7FZci8hP", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "queued", "started_at": null, "expires_at": 1699077392, @@ -3915,16 +3958,16 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() runs = client.beta.threads.runs.list( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs" + "thread_abc123" ) print(runs) node.js: | @@ -3934,7 +3977,7 @@ paths: async function main() { const runs = await openai.beta.threads.runs.list( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs" + "thread_abc123" ); console.log(runs); @@ -3946,11 +3989,11 @@ paths: "object": "list", "data": [ { - "id": "run_5pyUEwhaPk11vCKiDneUWXXY", + "id": "run_abc123", "object": "thread.run", "created_at": 1699075072, - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "completed", "started_at": 1699075072, "expires_at": null, @@ -3966,17 +4009,17 @@ paths: } ], "file_ids": [ - "file-9F1ex49ipEnKzyLUNnCA0Yzx", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "file-abc123", + "file-abc456" ], "metadata": {} }, { - "id": "run_UWvV94U0FQYiT2rlbBrdEVmC", + "id": "run_abc456", "object": "thread.run", "created_at": 1699063290, - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "completed", "started_at": 1699063290, "expires_at": null, @@ -3992,14 +4035,14 @@ paths: } ], "file_ids": [ - "file-9F1ex49ipEnKzyLUNnCA0Yzx", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "file-abc123", + "file-abc456" ], "metadata": {} } ], - "first_id": "run_5pyUEwhaPk11vCKiDneUWXXY", - "last_id": "run_UWvV94U0FQYiT2rlbBrdEVmC", + "first_id": "run_abc123", + "last_id": "run_abc456", "has_more": false } post: @@ -4034,20 +4077,20 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' \ + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ" + "assistant_id": "asst_abc123" }' python: | from openai import OpenAI client = OpenAI() run = client.beta.threads.runs.create( - thread_id="thread_BDDwIqM4KgHibXX3mqmN3Lgs", - assistant_id="asst_nGl00s4xa9zmVY6Fvuvz9wwQ" + thread_id="thread_abc123", + assistant_id="asst_abc123" ) print(run) node.js: | @@ -4057,8 +4100,8 @@ paths: async function main() { const run = await openai.beta.threads.runs.create( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs", - { assistant_id: "asst_nGl00s4xa9zmVY6Fvuvz9wwQ" } + "thread_abc123", + { assistant_id: "asst_abc123" } ); console.log(run); @@ -4067,11 +4110,11 @@ paths: main(); response: &run_object_example | { - "id": "run_UWvV94U0FQYiT2rlbBrdEVmC", + "id": "run_abc123", "object": "thread.run", "created_at": 1699063290, - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "queued", "started_at": 1699063290, "expires_at": null, @@ -4087,8 +4130,8 @@ paths: } ], "file_ids": [ - "file-9F1ex49ipEnKzyLUNnCA0Yzx", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "file-abc123", + "file-abc456" ], "metadata": {} } @@ -4126,16 +4169,16 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs/run_5pyUEwhaPk11vCKiDneUWXXY \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() run = client.beta.threads.runs.retrieve( - thread_id="thread_BDDwIqM4KgHibXX3mqmN3Lgs", - run_id="run_5pyUEwhaPk11vCKiDneUWXXY" + thread_id="thread_abc123", + run_id="run_abc123" ) print(run) node.js: | @@ -4145,8 +4188,8 @@ paths: async function main() { const run = await openai.beta.threads.runs.retrieve( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs", - "run_5pyUEwhaPk11vCKiDneUWXXY" + "thread_abc123", + "run_abc123" ); console.log(run); @@ -4155,11 +4198,11 @@ paths: main(); response: | { - "id": "run_5pyUEwhaPk11vCKiDneUWXXY", + "id": "run_abc123", "object": "thread.run", "created_at": 1699075072, - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "completed", "started_at": 1699075072, "expires_at": null, @@ -4175,8 +4218,8 @@ paths: } ], "file_ids": [ - "file-9F1ex49ipEnKzyLUNnCA0Yzx", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "file-abc123", + "file-abc456" ], "metadata": {} } @@ -4218,13 +4261,13 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs/run_5pyUEwhaPk11vCKiDneUWXXY \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' \ + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ "metadata": { - "user_id": "user_zmVY6FvuBDDwIqM4KgH" + "user_id": "user_abc123" } }' python: | @@ -4232,9 +4275,9 @@ paths: client = OpenAI() run = client.beta.threads.runs.update( - thread_id="thread_BDDwIqM4KgHibXX3mqmN3Lgs", - run_id="run_5pyUEwhaPk11vCKiDneUWXXY", - metadata={"user_id": "user_zmVY6FvuBDDwIqM4KgH"}, + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, ) print(run) node.js: | @@ -4244,11 +4287,11 @@ paths: async function main() { const run = await openai.beta.threads.runs.update( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs", - "run_5pyUEwhaPk11vCKiDneUWXXY", + "thread_abc123", + "run_abc123", { metadata: { - user_id: "user_zmVY6FvuBDDwIqM4KgH", + user_id: "user_abc123", }, } ); @@ -4259,11 +4302,11 @@ paths: main(); response: | { - "id": "run_5pyUEwhaPk11vCKiDneUWXXY", + "id": "run_abc123", "object": "thread.run", "created_at": 1699075072, - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "completed", "started_at": 1699075072, "expires_at": null, @@ -4279,11 +4322,11 @@ paths: } ], "file_ids": [ - "file-9F1ex49ipEnKzyLUNnCA0Yzx", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "file-abc123", + "file-abc456" ], "metadata": { - "user_id": "user_zmVY6FvuBDDwIqM4KgH" + "user_id": "user_abc123" } } @@ -4327,14 +4370,14 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_EdR8UvCDJ035LFEJZMt3AxCd/runs/run_PHLyHQYIQn4F7JrSXslEYWwh/submit_tool_outputs \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' \ + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ -d '{ "tool_outputs": [ { - "tool_call_id": "call_MbELIQcB72cq35Yzo2MRw5qs", + "tool_call_id": "call_abc123", "output": "28C" } ] @@ -4344,11 +4387,11 @@ paths: client = OpenAI() run = client.beta.threads.runs.submit_tool_outputs( - thread_id="thread_EdR8UvCDJ035LFEJZMt3AxCd", - run_id="run_PHLyHQYIQn4F7JrSXslEYWwh", + thread_id="thread_abc123", + run_id="run_abc123", tool_outputs=[ { - "tool_call_id": "call_MbELIQcB72cq35Yzo2MRw5qs", + "tool_call_id": "call_abc123", "output": "28C" } ] @@ -4361,12 +4404,12 @@ paths: async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( - "thread_EdR8UvCDJ035LFEJZMt3AxCd", - "run_PHLyHQYIQn4F7JrSXslEYWwh", + "thread_abc123", + "run_abc123", { tool_outputs: [ { - tool_call_id: "call_MbELIQcB72cq35Yzo2MRw5qs", + tool_call_id: "call_abc123", output: "28C", }, ], @@ -4379,11 +4422,11 @@ paths: main(); response: | { - "id": "run_PHLyHQYIQn4F7JrSXslEYWwh", + "id": "run_abc123", "object": "thread.run", "created_at": 1699075592, - "assistant_id": "asst_IgmpQTah3ZfPHCVZjTqAY8Kv", - "thread_id": "thread_EdR8UvCDJ035LFEJZMt3AxCd", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "queued", "started_at": 1699075592, "expires_at": 1699076192, @@ -4458,17 +4501,17 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_1cjnJPXj8MFiqTx58jU9TivC/runs/run_BeRGmpGt2wb1VI22ZRniOkrR/cancel \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'OpenAI-Beta: assistants=v1' \ + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v1" \ -X POST python: | from openai import OpenAI client = OpenAI() run = client.beta.threads.runs.cancel( - thread_id="thread_1cjnJPXj8MFiqTx58jU9TivC", - run_id="run_BeRGmpGt2wb1VI22ZRniOkrR" + thread_id="thread_abc123", + run_id="run_abc123" ) print(run) node.js: | @@ -4478,8 +4521,8 @@ paths: async function main() { const run = await openai.beta.threads.runs.cancel( - "thread_1cjnJPXj8MFiqTx58jU9TivC", - "run_BeRGmpGt2wb1VI22ZRniOkrR" + "thread_abc123", + "run_abc123" ); console.log(run); @@ -4488,11 +4531,11 @@ paths: main(); response: | { - "id": "run_BeRGmpGt2wb1VI22ZRniOkrR", + "id": "run_abc123", "object": "thread.run", "created_at": 1699076126, - "assistant_id": "asst_IgmpQTah3ZfPHCVZjTqAY8Kv", - "thread_id": "thread_1cjnJPXj8MFiqTx58jU9TivC", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "cancelling", "started_at": 1699076126, "expires_at": 1699076726, @@ -4568,17 +4611,17 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs/run_UWvV94U0FQYiT2rlbBrdEVmC/steps \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() run_steps = client.beta.threads.runs.steps.list( - thread_id="thread_BDDwIqM4KgHibXX3mqmN3Lgs", - run_id="run_UWvV94U0FQYiT2rlbBrdEVmC" + thread_id="thread_abc123", + run_id="run_abc123" ) print(run_steps) node.js: | @@ -4587,8 +4630,8 @@ paths: async function main() { const runStep = await openai.beta.threads.runs.steps.list( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs", - "run_UWvV94U0FQYiT2rlbBrdEVmC" + "thread_abc123", + "run_abc123" ); console.log(runStep); } @@ -4599,12 +4642,12 @@ paths: "object": "list", "data": [ { - "id": "step_QyjyrsVsysd7F4K894BZHG97", + "id": "step_abc123", "object": "thread.run.step", "created_at": 1699063291, - "run_id": "run_UWvV94U0FQYiT2rlbBrdEVmC", - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "type": "message_creation", "status": "completed", "cancelled_at": null, @@ -4615,13 +4658,13 @@ paths: "step_details": { "type": "message_creation", "message_creation": { - "message_id": "msg_6YmiCRmMbbE6FALYNePPHqwm" + "message_id": "msg_abc123" } } } ], - "first_id": "step_QyjyrsVsysd7F4K894BZHG97", - "last_id": "step_QyjyrsVsysd7F4K894BZHG97", + "first_id": "step_abc123", + "last_id": "step_abc456", "has_more": false } @@ -4664,18 +4707,18 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_BDDwIqM4KgHibXX3mqmN3Lgs/runs/run_UWvV94U0FQYiT2rlbBrdEVmC/steps/step_QyjyrsVsysd7F4K894BZHG97 \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() run_step = client.beta.threads.runs.steps.retrieve( - thread_id="thread_BDDwIqM4KgHibXX3mqmN3Lgs", - run_id="run_UWvV94U0FQYiT2rlbBrdEVmC", - step_id="step_QyjyrsVsysd7F4K894BZHG97" + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" ) print(run_step) node.js: | @@ -4684,9 +4727,9 @@ paths: async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( - "thread_BDDwIqM4KgHibXX3mqmN3Lgs", - "run_UWvV94U0FQYiT2rlbBrdEVmC", - "step_QyjyrsVsysd7F4K894BZHG97" + "thread_abc123", + "run_abc123", + "step_abc123" ); console.log(runStep); } @@ -4694,12 +4737,12 @@ paths: main(); response: &run_step_object_example | { - "id": "step_QyjyrsVsysd7F4K894BZHG97", + "id": "step_abc123", "object": "thread.run.step", "created_at": 1699063291, - "run_id": "run_UWvV94U0FQYiT2rlbBrdEVmC", - "assistant_id": "asst_nGl00s4xa9zmVY6Fvuvz9wwQ", - "thread_id": "thread_BDDwIqM4KgHibXX3mqmN3Lgs", + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "type": "message_creation", "status": "completed", "cancelled_at": null, @@ -4710,7 +4753,7 @@ paths: "step_details": { "type": "message_creation", "message_creation": { - "message_id": "msg_6YmiCRmMbbE6FALYNePPHqwm" + "message_id": "msg_abc123" } } } @@ -4766,16 +4809,16 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/assistants/asst_DUGk5I7sK0FpKeijvrO30z9J/files \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/assistants/asst_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() assistant_files = client.beta.assistants.files.list( - assistant_id="asst_DUGk5I7sK0FpKeijvrO30z9J" + assistant_id="asst_abc123" ) print(assistant_files) node.js: | @@ -4784,7 +4827,7 @@ paths: async function main() { const assistantFiles = await openai.beta.assistants.files.list( - "asst_FBOFvAOHhwEWMghbMGseaPGQ" + "asst_abc123" ); console.log(assistantFiles); } @@ -4795,20 +4838,20 @@ paths: "object": "list", "data": [ { - "id": "file-dEWwUbt2UGHp3v0e0DpCzemP", + "id": "file-abc123", "object": "assistant.file", "created_at": 1699060412, - "assistant_id": "asst_DUGk5I7sK0FpKeijvrO30z9J" + "assistant_id": "asst_abc123" }, { - "id": "file-9F1ex49ipEnKzyLUNnCA0Yzx", + "id": "file-abc456", "object": "assistant.file", "created_at": 1699060412, - "assistant_id": "asst_DUGk5I7sK0FpKeijvrO30z9J" + "assistant_id": "asst_abc123" } ], - "first_id": "file-dEWwUbt2UGHp3v0e0DpCzemP", - "last_id": "file-9F1ex49ipEnKzyLUNnCA0Yzx", + "first_id": "file-abc123", + "last_id": "file-abc456", "has_more": false } post: @@ -4822,7 +4865,7 @@ paths: required: true schema: type: string - example: file-AF1WoRqd3aJAHsqc9NY7iL8F + example: file-abc123 description: | The ID of the assistant for which to create a File. requestBody: @@ -4845,20 +4888,20 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/assistants/asst_FBOFvAOHhwEWMghbMGseaPGQ/files \ + curl https://api.openai.com/v1/assistants/asst_abc123/files \ -H 'Authorization: Bearer $OPENAI_API_KEY"' \ -H 'Content-Type: application/json' \ -H 'OpenAI-Beta: assistants=v1' \ -d '{ - "file_id": "file-wB6RM6wHdA49HfS2DJ9fEyrH" + "file_id": "file-abc123" }' python: | from openai import OpenAI client = OpenAI() assistant_file = client.beta.assistants.files.create( - assistant_id="asst_FBOFvAOHhwEWMghbMGseaPGQ", - file_id="file-wB6RM6wHdA49HfS2DJ9fEyrH" + assistant_id="asst_abc123", + file_id="file-abc123" ) print(assistant_file) node.js: | @@ -4867,9 +4910,9 @@ paths: async function main() { const myAssistantFile = await openai.beta.assistants.files.create( - "asst_FBOFvAOHhwEWMghbMGseaPGQ", + "asst_abc123", { - file_id: "file-wB6RM6wHdA49HfS2DJ9fEyrH" + file_id: "file-abc123" } ); console.log(myAssistantFile); @@ -4878,10 +4921,10 @@ paths: main(); response: &assistant_file_object | { - "id": "file-wB6RM6wHdA49HfS2DJ9fEyrH", + "id": "file-abc123", "object": "assistant.file", "created_at": 1699055364, - "assistant_id": "asst_FBOFvAOHhwEWMghbMGseaPGQ" + "assistant_id": "asst_abc123" } /assistants/{assistant_id}/files/{file_id}: @@ -4917,7 +4960,7 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/assistants/asst_FBOFvAOHhwEWMghbMGseaPGQ/files/file-wB6RM6wHdA49HfS2DJ9fEyrH \ + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ -H 'Authorization: Bearer $OPENAI_API_KEY"' \ -H 'Content-Type: application/json' \ -H 'OpenAI-Beta: assistants=v1' @@ -4926,8 +4969,8 @@ paths: client = OpenAI() assistant_file = client.beta.assistants.files.retrieve( - assistant_id="asst_FBOFvAOHhwEWMghbMGseaPGQ", - file_id="file-wB6RM6wHdA49HfS2DJ9fEyrH" + assistant_id="asst_abc123", + file_id="file-abc123" ) print(assistant_file) node.js: | @@ -4936,8 +4979,8 @@ paths: async function main() { const myAssistantFile = await openai.beta.assistants.files.retrieve( - "asst_FBOFvAOHhwEWMghbMGseaPGQ", - "file-wB6RM6wHdA49HfS2DJ9fEyrH" + "asst_abc123", + "file-abc123" ); console.log(myAssistantFile); } @@ -4976,18 +5019,18 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/assistants/asst_DUGk5I7sK0FpKeijvrO30z9J/files/file-9F1ex49ipEnKzyLUNnCA0Yzx \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' \ + curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ -X DELETE python: | from openai import OpenAI client = OpenAI() deleted_assistant_file = client.beta.assistants.files.delete( - assistant_id="asst_DUGk5I7sK0FpKeijvrO30z9J", - file_id="file-dEWwUbt2UGHp3v0e0DpCzemP" + assistant_id="asst_abc123", + file_id="file-abc123" ) print(deleted_assistant_file) node.js: | @@ -4996,8 +5039,8 @@ paths: async function main() { const deletedAssistantFile = await openai.beta.assistants.files.del( - "asst_FBOFvAOHhwEWMghbMGseaPGQ", - "file-wB6RM6wHdA49HfS2DJ9fEyrH" + "asst_abc123", + "file-abc123" ); console.log(deletedAssistantFile); } @@ -5005,7 +5048,7 @@ paths: main(); response: | { - id: "file-BK7bzQj3FfZFXr7DbL6xJwfo", + id: "file-abc123", object: "assistant.file.deleted", deleted: true } @@ -5067,17 +5110,17 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_RGUhOuO9b2nrktrmsQ2uSR6I/messages/msg_q3XhbGmMzsqEFa81gMLBDAVU/files \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() message_files = client.beta.threads.messages.files.list( - thread_id="thread_RGUhOuO9b2nrktrmsQ2uSR6I", - message_id="msg_q3XhbGmMzsqEFa81gMLBDAVU" + thread_id="thread_abc123", + message_id="msg_abc123" ) print(message_files) node.js: | @@ -5086,8 +5129,8 @@ paths: async function main() { const messageFiles = await openai.beta.threads.messages.files.list( - "thread_RGUhOuO9b2nrktrmsQ2uSR6I", - "msg_q3XhbGmMzsqEFa81gMLBDAVU" + "thread_abc123", + "msg_abc123" ); console.log(messageFiles); } @@ -5098,20 +5141,20 @@ paths: "object": "list", "data": [ { - "id": "file-dEWwUbt2UGHp3v0e0DpCzemP", + "id": "file-abc123", "object": "thread.message.file", "created_at": 1699061776, - "message_id": "msg_q3XhbGmMzsqEFa81gMLBDAVU" + "message_id": "msg_abc123" }, { - "id": "file-dEWwUbt2UGHp3v0e0DpCzemP", + "id": "file-abc123", "object": "thread.message.file", "created_at": 1699061776, - "message_id": "msg_q3XhbGmMzsqEFa81gMLBDAVU" + "message_id": "msg_abc123" } ], - "first_id": "file-dEWwUbt2UGHp3v0e0DpCzemP", - "last_id": "file-dEWwUbt2UGHp3v0e0DpCzemP", + "first_id": "file-abc123", + "last_id": "file-abc123", "has_more": false } @@ -5127,21 +5170,21 @@ paths: required: true schema: type: string - example: thread_AF1WoRqd3aJAHsqc9NY7iL8F + example: thread_abc123 description: The ID of the thread to which the message and File belong. - in: path name: message_id required: true schema: type: string - example: msg_AF1WoRqd3aJAHsqc9NY7iL8F + example: msg_abc123 description: The ID of the message the file belongs to. - in: path name: file_id required: true schema: type: string - example: file-AF1WoRqd3aJAHsqc9NY7iL8F + example: file-abc123 description: The ID of the file being retrieved. responses: "200": @@ -5157,18 +5200,18 @@ paths: examples: request: curl: | - curl https://api.openai.com/v1/threads/thread_RGUhOuO9b2nrktrmsQ2uSR6I/messages/msg_q3XhbGmMzsqEFa81gMLBDAVU/files/file-dEWwUbt2UGHp3v0e0DpCzemP \ - -H 'Authorization: Bearer $OPENAI_API_KEY' \ - -H 'Content-Type: application/json' \ - -H 'OpenAI-Beta: assistants=v1' + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" python: | from openai import OpenAI client = OpenAI() message_files = client.beta.threads.messages.files.retrieve( - thread_id="thread_RGUhOuO9b2nrktrmsQ2uSR6I", - message_id="msg_q3XhbGmMzsqEFa81gMLBDAVU", - file_id="file-dEWwUbt2UGHp3v0e0DpCzemP" + thread_id="thread_abc123", + message_id="msg_abc123", + file_id="file-abc123" ) print(message_files) node.js: | @@ -5177,9 +5220,9 @@ paths: async function main() { const messageFile = await openai.beta.threads.messages.files.retrieve( - "thread_RGUhOuO9b2nrktrmsQ2uSR6I", - "msg_q3XhbGmMzsqEFa81gMLBDAVU", - "file-dEWwUbt2UGHp3v0e0DpCzemP" + "thread_abc123", + "msg_abc123", + "file-abc123" ); console.log(messageFile); } @@ -5187,10 +5230,10 @@ paths: main(); response: | { - "id": "file-dEWwUbt2UGHp3v0e0DpCzemP", + "id": "file-abc123", "object": "thread.message.file", "created_at": 1699061776, - "message_id": "msg_q3XhbGmMzsqEFa81gMLBDAVU" + "message_id": "msg_abc123" } components: @@ -5334,7 +5377,7 @@ components: description: &completions_frequency_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) logit_bias: &completions_logit_bias type: object x-oaiTypeLabel: map @@ -5388,7 +5431,7 @@ components: description: &completions_presence_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) seed: &completions_seed_param type: integer minimum: -9223372036854775808 @@ -5581,7 +5624,7 @@ components: format: uri detail: type: string - description: Specifies the detail level of the image. + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). enum: ["auto", "low", "high"] default: "auto" required: @@ -5626,6 +5669,9 @@ components: type: string enum: ["system"] description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. required: - content - role @@ -5648,10 +5694,14 @@ components: items: $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" minItems: 1 + x-oaiExpandable: true role: type: string enum: ["user"] description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. required: - content - role @@ -5669,6 +5719,9 @@ components: type: string enum: ["assistant"] description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. tool_calls: $ref: "#/components/schemas/ChatCompletionMessageToolCalls" function_call: @@ -5718,21 +5771,20 @@ components: type: string enum: ["function"] description: The role of the messages author, in this case `function`. - content: + arguments: type: string - nullable: true - description: The return value from the function call, to return to the model. + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. name: type: string description: The name of the function to call. required: - role - - name + - arguments - content FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." additionalProperties: true ChatCompletionFunctions: @@ -5979,11 +6031,11 @@ components: "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", - "gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k-0613", ] x-oaiTypeLabel: string @@ -6020,7 +6072,7 @@ components: default: 1 example: 1 nullable: true - description: How many chat completion choices to generate for each input message. + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: type: number default: 0 @@ -6035,7 +6087,7 @@ components: Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in increased latency and appearance of a "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. properties: type: type: string @@ -6393,8 +6445,8 @@ components: CreateEditResponse: type: object - title: Edit deprecated: true + title: Edit properties: choices: type: array @@ -7099,31 +7151,43 @@ components: properties: input: description: | - Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. example: "The quick brown fox jumped over the lazy dog" oneOf: - type: string + title: string + description: The string that will be turned into an embedding. default: "" example: "This is a test." - type: array + title: array + description: The array of strings that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: string default: "" - example: "This is a test." + example: "['This is a test.']" - type: array + title: array + description: The array of integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: integer example: "[1212, 318, 257, 1332, 13]" - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. minItems: 1 + maxItems: 2048 items: type: array minItems: 1 items: type: integer example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true model: description: *model_description example: "text-embedding-ada-002" @@ -7296,7 +7360,7 @@ components: description: The text to generate audio for. The maximum length is 4096 characters. maxLength: 4096 voice: - description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). type: string enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] response_format: @@ -7392,7 +7456,7 @@ components: name: The File object example: | { - "id": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "id": "file-abc123", "object": "file", "bytes": 120000, "created_at": 1677610602, @@ -7940,10 +8004,10 @@ components: $ref: "#/components/schemas/AssistantObject" first_id: type: string - example: "asst_hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "asst_abc123" last_id: type: string - example: "asst_QLoItBbqwyAJEzlTy4y9kOMM" + example: "asst_abc456" has_more: type: boolean example: false @@ -8138,11 +8202,11 @@ components: beta: true example: | { - "id": "run_example123", + "id": "run_abc123", "object": "thread.run", "created_at": 1698107661, - "assistant_id": "asst_gZ1aOomboBuYWPcXJx4vAYB0", - "thread_id": "thread_adOpf7Jbb5Abymz0QbwxAh3c", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", "status": "completed", "started_at": 1699073476, "expires_at": null, @@ -8188,6 +8252,7 @@ components: x-oaiTypeLabel: map nullable: true required: + - thread_id - assistant_id ListRunsResponse: type: object @@ -8201,10 +8266,10 @@ components: $ref: "#/components/schemas/RunObject" first_id: type: string - example: "run_hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "run_abc123" last_id: type: string - example: "run_QLoItBbqwyAJEzlTy4y9kOMM" + example: "run_abc456" has_more: type: boolean example: false @@ -8241,6 +8306,7 @@ components: description: The output of the tool call to be submitted to continue the run. required: - tool_outputs + RunToolCallObject: type: object description: Tool call objects @@ -8269,6 +8335,7 @@ components: - id - type - function + CreateThreadAndRunRequest: type: object additionalProperties: false @@ -8303,6 +8370,7 @@ components: x-oaiTypeLabel: map nullable: true required: + - thread_id - assistant_id ThreadObject: @@ -8392,10 +8460,10 @@ components: $ref: "#/components/schemas/ThreadObject" first_id: type: string - example: "asst_hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "asst_abc123" last_id: type: string - example: "asst_QLoItBbqwyAJEzlTy4y9kOMM" + example: "asst_abc456" has_more: type: boolean example: false @@ -8472,10 +8540,10 @@ components: beta: true example: | { - "id": "msg_dKYDWyQvtjDBi3tudL1yWKDa", + "id": "msg_abc123", "object": "thread.message", "created_at": 1698983503, - "thread_id": "thread_RGUhOuO9b2nrktrmsQ2uSR6I", + "thread_id": "thread_abc123", "role": "assistant", "content": [ { @@ -8487,8 +8555,8 @@ components: } ], "file_ids": [], - "assistant_id": "asst_ToSF7Gb04YMj8AMMm50ZLLtY", - "run_id": "run_BjylUJgDqYK9bOhy4yjAiMrn", + "assistant_id": "asst_abc123", + "run_id": "run_abc123", "metadata": {} } @@ -8558,10 +8626,10 @@ components: $ref: "#/components/schemas/MessageObject" first_id: type: string - example: "msg_hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "msg_abc123" last_id: type: string - example: "msg_QLoItBbqwyAJEzlTy4y9kOMM" + example: "msg_abc123" has_more: type: boolean example: false @@ -8800,10 +8868,10 @@ components: $ref: "#/components/schemas/RunStepObject" first_id: type: string - example: "step_hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "step_abc123" last_id: type: string - example: "step_QLoItBbqwyAJEzlTy4y9kOMM" + example: "step_abc456" has_more: type: boolean example: false @@ -9013,10 +9081,10 @@ components: beta: true example: | { - "id": "file-wB6RM6wHdA49HfS2DJ9fEyrH", + "id": "file-abc123", "object": "assistant.file", "created_at": 1699055364, - "assistant_id": "asst_FBOFvAOHhwEWMghbMGseaPGQ" + "assistant_id": "asst_abc123" } CreateAssistantFileRequest: @@ -9055,10 +9123,10 @@ components: $ref: "#/components/schemas/AssistantFileObject" first_id: type: string - example: "file-hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "file-abc123" last_id: type: string - example: "file-QLoItBbqwyAJEzlTy4y9kOMM" + example: "file-abc456" has_more: type: boolean example: false @@ -9098,11 +9166,11 @@ components: beta: true example: | { - "id": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "id": "file-abc123", "object": "thread.message.file", "created_at": 1698107661, "message_id": "message_QLoItBbqwyAJEzlTy4y9kOMM", - "file_id": "file-BK7bzQj3FfZFXr7DbL6xJwfo" + "file_id": "file-abc123" } ListMessageFilesResponse: @@ -9116,10 +9184,10 @@ components: $ref: "#/components/schemas/MessageFileObject" first_id: type: string - example: "file-hLBK7PXBv5Lr2NQT7KLY0ag1" + example: "file-abc123" last_id: type: string - example: "file-QLoItBbqwyAJEzlTy4y9kOMM" + example: "file-abc456" has_more: type: boolean example: false @@ -9177,7 +9245,7 @@ x-oaiMeta: description: | Given a list of messages comprising a conversation, the model will return a response. - Related guide: [Chat Completions](/docs/guides/gpt) + Related guide: [Chat Completions](/docs/guides/text-generation) sections: - type: object key: CreateChatCompletionResponse @@ -9188,20 +9256,6 @@ x-oaiMeta: - type: endpoint key: createChatCompletion path: create - - id: completions - title: Completions - legacy: true - description: | - Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. We recommend most users use our Chat Completions API. [Learn more](/docs/deprecations/2023-07-06-gpt-and-embeddings) - - Related guide: [Legacy Completions](/docs/guides/gpt/completions-api) - sections: - - type: object - key: CreateCompletionResponse - path: object - - type: endpoint - key: createCompletion - path: create - id: embeddings title: Embeddings description: | @@ -9452,13 +9506,37 @@ x-oaiMeta: - type: endpoint key: listRunSteps path: listRunSteps + - id: completions + title: Completions + legacy: true + description: | + Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. Most models that support the legacy Completions endpoint [will be shut off on January 4th, 2024](/docs/deprecations/2023-07-06-gpt-and-embeddings). + sections: + - type: object + key: CreateCompletionResponse + path: object + - type: endpoint + key: createCompletion + path: create + - id: edits + title: Edits + deprecated: true + description: | + Given a prompt and an instruction, the model will return an edited version of the prompt. The Edits endpoint is deprecated will be [shut off on January 4th, 2024](/docs/deprecations/edit-models-endpoint). + sections: + - type: object + key: CreateEditResponse + path: object + - type: endpoint + key: createEdit + path: create - id: fine-tunes title: Fine-tunes deprecated: true description: | - Manage legacy fine-tuning jobs to tailor a model to your specific training data. + Manage fine-tuning jobs to tailor a model to your specific training data. The [updated Fine-tuning endpoint](/docs/guides/fine-tuning) offers more capabilites and newer models. - We recommend transitioning to the updating [fine-tuning API](/docs/guides/fine-tuning) + The Fine-tunes endpoint will be [shut off on January 4th, 2024](/docs/deprecations/2023-08-22-fine-tunes-endpoint). sections: - type: object key: FineTune @@ -9481,15 +9559,3 @@ x-oaiMeta: - type: endpoint key: listFineTuneEvents path: list-events - - id: edits - title: Edits - deprecated: true - description: | - Given a prompt and an instruction, the model will return an edited version of the prompt. - sections: - - type: object - key: CreateEditResponse - path: object - - type: endpoint - key: createEdit - path: create