Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions lib/req_llm/providers/azure.ex
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,11 @@ defmodule ReqLLM.Providers.Azure do
max_completion_tokens: [
type: :any,
doc: "Maximum completion tokens (OpenAI reasoning models)"
],
verbosity: [
type: {:or, [:atom, :string]},
doc:
"Constrains the verbosity of the model's response. Supported values: 'low', 'medium', 'high'. Defaults to 'medium'. (OpenAI models only)"
]
]

Expand Down
10 changes: 10 additions & 0 deletions lib/req_llm/providers/azure/openai.ex
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ defmodule ReqLLM.Providers.Azure.OpenAI do
|> maybe_put(:n, opts[:n])
|> maybe_put(:reasoning_effort, provider_opts[:reasoning_effort])
|> maybe_put(:service_tier, provider_opts[:service_tier])
|> add_verbosity(provider_opts)
|> add_stream_options(opts)
|> AdapterHelpers.add_parallel_tool_calls(opts, provider_opts)
|> AdapterHelpers.translate_tool_choice_format()
Expand Down Expand Up @@ -205,6 +206,15 @@ defmodule ReqLLM.Providers.Azure.OpenAI do
end
end

defp add_verbosity(body, provider_opts) do
verbosity = provider_opts[:verbosity]
maybe_put(body, :verbosity, normalize_verbosity(verbosity))
end

defp normalize_verbosity(nil), do: nil
defp normalize_verbosity(v) when is_atom(v), do: Atom.to_string(v)
defp normalize_verbosity(v) when is_binary(v), do: v

@doc """
Formats an embedding request for Azure OpenAI.

Expand Down
5 changes: 5 additions & 0 deletions lib/req_llm/providers/openai.ex
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,11 @@ defmodule ReqLLM.Providers.OpenAI do
service_tier: [
type: {:or, [:atom, :string]},
doc: "Service tier for request prioritization ('auto', 'default', 'flex' or 'priority')"
],
verbosity: [
type: {:or, [:atom, :string]},
doc:
"Constrains the verbosity of the model's response. Supported values: 'low', 'medium', 'high'. Defaults to 'medium'."
]
]

Expand Down
11 changes: 11 additions & 0 deletions lib/req_llm/providers/openai/chat_api.ex
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ defmodule ReqLLM.Providers.OpenAI.ChatAPI do
|> add_stream_options(opts_map)
|> add_reasoning_effort(opts_map)
|> add_service_tier(opts_map)
|> add_verbosity(opts_map)
|> add_response_format(opts_map)
|> add_parallel_tool_calls(opts_map)
|> translate_tool_choice_format()
Expand Down Expand Up @@ -207,6 +208,16 @@ defmodule ReqLLM.Providers.OpenAI.ChatAPI do
maybe_put(body, :service_tier, service_tier)
end

defp add_verbosity(body, request_options) do
provider_opts = request_options[:provider_options] || []
verbosity = provider_opts[:verbosity]
maybe_put(body, :verbosity, normalize_verbosity(verbosity))
end

defp normalize_verbosity(nil), do: nil
defp normalize_verbosity(v) when is_atom(v), do: Atom.to_string(v)
defp normalize_verbosity(v) when is_binary(v), do: v

defp translate_tool_choice_format(body) do
{tool_choice, body_key} =
cond do
Expand Down
53 changes: 33 additions & 20 deletions lib/req_llm/providers/openai/responses_api.ex
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ defmodule ReqLLM.Providers.OpenAI.ResponsesAPI do
reasoning = encode_reasoning_effort(opts_map[:reasoning_effort])
service_tier = opts_map[:service_tier] || provider_opts[:service_tier]

text_format = encode_text_format(provider_opts[:response_format])
text_format = encode_text_format(provider_opts[:response_format], provider_opts[:verbosity])

final_input =
if previous_response_id == nil and reasoning_items != [] do
Expand Down Expand Up @@ -768,34 +768,47 @@ defmodule ReqLLM.Providers.OpenAI.ResponsesAPI do
defp encode_reasoning_effort(_), do: nil

@doc false
def encode_text_format(nil), do: nil
def encode_text_format(response_format, verbosity \\ nil)

def encode_text_format(response_format) when is_map(response_format) do
# Extract type - could be atom or string key
def encode_text_format(nil, nil), do: nil

def encode_text_format(nil, verbosity) do
%{"verbosity" => normalize_verbosity(verbosity)}
end

def encode_text_format(response_format, verbosity) when is_map(response_format) do
type = response_format[:type] || response_format["type"]

case type do
"json_schema" ->
json_schema = response_format[:json_schema] || response_format["json_schema"]
# Schema.to_json handles both keyword lists (converts) and maps (pass-through)
schema = ReqLLM.Schema.to_json(json_schema[:schema] || json_schema["schema"])
base =
case type do
"json_schema" ->
json_schema = response_format[:json_schema] || response_format["json_schema"]
schema = ReqLLM.Schema.to_json(json_schema[:schema] || json_schema["schema"])

# ResponsesAPI expects a flattened structure:
# text.format.{type, name, strict, schema} instead of text.format.json_schema.{name, strict, schema}
%{
"format" => %{
"type" => "json_schema",
"name" => json_schema[:name] || json_schema["name"],
"strict" => json_schema[:strict] || json_schema["strict"],
"schema" => schema
%{
"format" => %{
"type" => "json_schema",
"name" => json_schema[:name] || json_schema["name"],
"strict" => json_schema[:strict] || json_schema["strict"],
"schema" => schema
}
}
}

_ ->
nil
_ ->
%{}
end

case {base, verbosity} do
{b, nil} when map_size(b) == 0 -> nil
{b, v} when map_size(b) == 0 -> %{"verbosity" => normalize_verbosity(v)}
{b, nil} -> b
{b, v} -> Map.put(b, "verbosity", normalize_verbosity(v))
end
end

defp normalize_verbosity(v) when is_atom(v), do: Atom.to_string(v)
defp normalize_verbosity(v) when is_binary(v), do: v

defp decode_responses_success({req, resp}) do
body = ReqLLM.Provider.Utils.ensure_parsed_body(resp.body)

Expand Down
39 changes: 39 additions & 0 deletions test/provider/azure/azure_test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,45 @@ defmodule ReqLLM.Providers.AzureTest do
end
end

describe "verbosity option" do
test "OpenAI models include verbosity when provided as atom" do
context = ReqLLM.Context.new([ReqLLM.Context.user("Hello")])
opts = [stream: false, provider_options: [verbosity: :low]]

body = Azure.OpenAI.format_request("gpt-4o", context, opts)

assert body[:verbosity] == "low"
end

test "OpenAI models include verbosity when provided as string" do
context = ReqLLM.Context.new([ReqLLM.Context.user("Hello")])
opts = [stream: false, provider_options: [verbosity: "high"]]

body = Azure.OpenAI.format_request("gpt-4o", context, opts)

assert body[:verbosity] == "high"
end

test "OpenAI models omit verbosity when not provided" do
context = ReqLLM.Context.new([ReqLLM.Context.user("Hello")])
opts = [stream: false]

body = Azure.OpenAI.format_request("gpt-4o", context, opts)

refute Map.has_key?(body, :verbosity)
end

test "verbosity works with reasoning models" do
context = ReqLLM.Context.new([ReqLLM.Context.user("Hello")])
opts = [stream: false, provider_options: [verbosity: :medium, reasoning_effort: "high"]]

body = Azure.OpenAI.format_request("o3-mini", context, opts)

assert body[:verbosity] == "medium"
assert body[:reasoning_effort] == "high"
end
end

describe "reasoning model features" do
import ExUnit.CaptureLog

Expand Down
54 changes: 54 additions & 0 deletions test/provider/openai/responses_api_unit_test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,60 @@ defmodule Provider.OpenAI.ResponsesAPIUnitTest do
assert body["text"]["format"]["strict"] == true
assert body["text"]["format"]["schema"] == json_schema
end

test "encodes verbosity when provided as atom" do
request = build_request(provider_options: [verbosity: :low])

encoded = ResponsesAPI.encode_body(request)
body = Jason.decode!(encoded.body)

assert body["text"]["verbosity"] == "low"
end

test "encodes verbosity when provided as string" do
request = build_request(provider_options: [verbosity: "high"])

encoded = ResponsesAPI.encode_body(request)
body = Jason.decode!(encoded.body)

assert body["text"]["verbosity"] == "high"
end

test "omits text field when no verbosity or response_format" do
request = build_request(provider_options: [])

encoded = ResponsesAPI.encode_body(request)
body = Jason.decode!(encoded.body)

refute Map.has_key?(body, "text")
end

test "encodes verbosity alongside response_format in text object" do
json_schema = %{
"type" => "object",
"properties" => %{"name" => %{"type" => "string"}},
"required" => ["name"]
}

response_format = %{
type: "json_schema",
json_schema: %{
name: "test_schema",
strict: true,
schema: json_schema
}
}

request =
build_request(provider_options: [response_format: response_format, verbosity: :medium])

encoded = ResponsesAPI.encode_body(request)
body = Jason.decode!(encoded.body)

assert body["text"]["format"]["type"] == "json_schema"
assert body["text"]["format"]["name"] == "test_schema"
assert body["text"]["verbosity"] == "medium"
end
end

describe "decode_response/1" do
Expand Down
53 changes: 53 additions & 0 deletions test/providers/openai_test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,59 @@ defmodule ReqLLM.Providers.OpenAITest do

assert decoded["service_tier"] == "flex"
end

test "encode_body includes verbosity when provided as atom" do
{:ok, model} = ReqLLM.model("openai:gpt-4o")
context = context_fixture()

mock_request = %Req.Request{
options: [
context: context,
model: model.model,
provider_options: [verbosity: :low]
]
}

updated_request = OpenAI.encode_body(mock_request)
decoded = Jason.decode!(updated_request.body)

assert decoded["verbosity"] == "low"
end

test "encode_body includes verbosity when provided as string" do
{:ok, model} = ReqLLM.model("openai:gpt-4o")
context = context_fixture()

mock_request = %Req.Request{
options: [
context: context,
model: model.model,
provider_options: [verbosity: "high"]
]
}

updated_request = OpenAI.encode_body(mock_request)
decoded = Jason.decode!(updated_request.body)

assert decoded["verbosity"] == "high"
end

test "encode_body omits verbosity when not provided" do
{:ok, model} = ReqLLM.model("openai:gpt-4o")
context = context_fixture()

mock_request = %Req.Request{
options: [
context: context,
model: model.model
]
}

updated_request = OpenAI.encode_body(mock_request)
decoded = Jason.decode!(updated_request.body)

refute Map.has_key?(decoded, "verbosity")
end
end

describe "response decoding" do
Expand Down