|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +"""MiniMax Chat Model.""" |
| 3 | + |
| 4 | +import os |
| 5 | +import re |
| 6 | +from typing import Any, AsyncGenerator, Callable, Dict, Literal, Type |
| 7 | + |
| 8 | +from pydantic import BaseModel |
| 9 | + |
| 10 | +from openjudge.models.openai_chat_model import OpenAIChatModel |
| 11 | +from openjudge.models.schema.oai.message import ChatMessage |
| 12 | +from openjudge.models.schema.oai.response import ChatResponse |
| 13 | + |
| 14 | +# MiniMax-supported models with 204K context window |
| 15 | +MINIMAX_MODELS = [ |
| 16 | + "MiniMax-M2.7", |
| 17 | + "MiniMax-M2.7-highspeed", |
| 18 | + "MiniMax-M2.5", |
| 19 | + "MiniMax-M2.5-highspeed", |
| 20 | +] |
| 21 | + |
| 22 | +_THINK_TAG_RE = re.compile(r"<think>.*?</think>", re.DOTALL) |
| 23 | + |
| 24 | + |
| 25 | +def _strip_think_tags(text: str) -> str: |
| 26 | + """Remove <think>...</think> reasoning blocks from MiniMax model output.""" |
| 27 | + return _THINK_TAG_RE.sub("", text).strip() |
| 28 | + |
| 29 | + |
| 30 | +class MiniMaxChatModel(OpenAIChatModel): |
| 31 | + """MiniMax chat model, using the OpenAI-compatible API at api.minimax.io. |
| 32 | +
|
| 33 | + Supported models (all with 204K context window): |
| 34 | + - ``MiniMax-M2.7`` — latest generation, best quality |
| 35 | + - ``MiniMax-M2.7-highspeed`` — latest generation, faster inference |
| 36 | + - ``MiniMax-M2.5`` — previous generation |
| 37 | + - ``MiniMax-M2.5-highspeed`` — previous generation, faster inference |
| 38 | +
|
| 39 | + .. note:: |
| 40 | + MiniMax requires ``temperature`` to be in the range ``(0.0, 1.0]``. |
| 41 | + Values outside this range are automatically clamped. |
| 42 | +
|
| 43 | + Example: |
| 44 | + >>> import asyncio, os |
| 45 | + >>> from openjudge.models import MiniMaxChatModel |
| 46 | + >>> from openjudge.graders.common.correctness import CorrectnessGrader |
| 47 | + >>> |
| 48 | + >>> model = MiniMaxChatModel(model="MiniMax-M2.7") |
| 49 | + >>> grader = CorrectnessGrader(model=model) |
| 50 | + >>> result = asyncio.run(grader.aevaluate( |
| 51 | + ... query="What is the capital of France?", |
| 52 | + ... response="Paris is the capital of France.", |
| 53 | + ... reference_response="The capital of France is Paris.", |
| 54 | + ... )) |
| 55 | + >>> print(result.score) |
| 56 | + """ |
| 57 | + |
| 58 | + MINIMAX_BASE_URL = "https://api.minimax.io/v1" |
| 59 | + |
| 60 | + def __init__( |
| 61 | + self, |
| 62 | + model: str = "MiniMax-M2.7", |
| 63 | + api_key: str | None = None, |
| 64 | + base_url: str | None = None, |
| 65 | + stream: bool = False, |
| 66 | + client_args: Dict[str, Any] | None = None, |
| 67 | + max_retries: int | None = None, |
| 68 | + timeout: float | None = None, |
| 69 | + **kwargs: Any, |
| 70 | + ) -> None: |
| 71 | + """Initialize the MiniMax chat model. |
| 72 | +
|
| 73 | + Args: |
| 74 | + model: MiniMax model name. Defaults to ``"MiniMax-M2.7"``. |
| 75 | + Available models: ``MiniMax-M2.7``, ``MiniMax-M2.7-highspeed``, |
| 76 | + ``MiniMax-M2.5``, ``MiniMax-M2.5-highspeed``. |
| 77 | + api_key: MiniMax API key. Falls back to the ``MINIMAX_API_KEY`` |
| 78 | + environment variable. |
| 79 | + base_url: API base URL. Defaults to ``https://api.minimax.io/v1``. |
| 80 | + stream: Whether to use streaming output. Defaults to ``False``. |
| 81 | + client_args: Extra keyword arguments forwarded to :class:`openai.AsyncOpenAI`. |
| 82 | + max_retries: Number of retry attempts on transient errors. |
| 83 | + timeout: Request timeout in seconds. |
| 84 | + **kwargs: Extra keyword arguments forwarded to each API call |
| 85 | + (e.g. ``max_tokens``). ``temperature`` is clamped to ``(0, 1.0]`` |
| 86 | + automatically. |
| 87 | + """ |
| 88 | + resolved_api_key = api_key or os.getenv("MINIMAX_API_KEY") |
| 89 | + resolved_base_url = base_url or self.MINIMAX_BASE_URL |
| 90 | + |
| 91 | + # Clamp temperature to MiniMax's allowed range (0.0, 1.0] |
| 92 | + if "temperature" in kwargs: |
| 93 | + raw_temp = float(kwargs["temperature"]) |
| 94 | + kwargs["temperature"] = max(1e-6, min(raw_temp, 1.0)) |
| 95 | + |
| 96 | + super().__init__( |
| 97 | + model=model, |
| 98 | + api_key=resolved_api_key, |
| 99 | + base_url=resolved_base_url, |
| 100 | + stream=stream, |
| 101 | + client_args=client_args, |
| 102 | + max_retries=max_retries, |
| 103 | + timeout=timeout, |
| 104 | + **kwargs, |
| 105 | + ) |
| 106 | + |
| 107 | + async def achat( |
| 108 | + self, |
| 109 | + messages: list[dict | ChatMessage], |
| 110 | + tools: list[dict] | None = None, |
| 111 | + tool_choice: Literal["auto", "none", "any", "required"] | str | None = None, |
| 112 | + structured_model: Type[BaseModel] | None = None, |
| 113 | + callback: Callable | None = None, |
| 114 | + **kwargs: Any, |
| 115 | + ) -> ChatResponse | AsyncGenerator[ChatResponse, None]: |
| 116 | + """Chat with a MiniMax model. |
| 117 | +
|
| 118 | + Wraps :meth:`OpenAIChatModel.achat` with MiniMax-specific adjustments: |
| 119 | +
|
| 120 | + * ``temperature`` values outside ``(0.0, 1.0]`` are clamped. |
| 121 | + * ``<think>…</think>`` reasoning blocks produced by M2.5/M2.7 models |
| 122 | + are stripped from non-streaming responses so downstream graders always |
| 123 | + receive clean text. |
| 124 | +
|
| 125 | + Args: |
| 126 | + messages: Conversation history as a list of message dicts or |
| 127 | + :class:`ChatMessage` objects. |
| 128 | + tools: Tool/function schemas available to the model. |
| 129 | + tool_choice: Tool selection strategy. |
| 130 | + structured_model: Pydantic model for structured output. |
| 131 | + callback: Optional callback invoked on the final response. |
| 132 | + **kwargs: Additional keyword arguments forwarded to the API. |
| 133 | + ``temperature`` is clamped to ``(0.0, 1.0]``. |
| 134 | +
|
| 135 | + Returns: |
| 136 | + A :class:`ChatResponse` or an async generator thereof (streaming). |
| 137 | + """ |
| 138 | + # Clamp temperature supplied at call-time as well |
| 139 | + if "temperature" in kwargs: |
| 140 | + raw_temp = float(kwargs["temperature"]) |
| 141 | + kwargs["temperature"] = max(1e-6, min(raw_temp, 1.0)) |
| 142 | + |
| 143 | + result = await super().achat( |
| 144 | + messages=messages, |
| 145 | + tools=tools, |
| 146 | + tool_choice=tool_choice, |
| 147 | + structured_model=structured_model, |
| 148 | + callback=callback, |
| 149 | + **kwargs, |
| 150 | + ) |
| 151 | + |
| 152 | + # Strip <think>…</think> blocks from non-streaming responses |
| 153 | + if isinstance(result, ChatResponse): |
| 154 | + if result.content and isinstance(result.content, str): |
| 155 | + result.content = _strip_think_tags(result.content) |
| 156 | + |
| 157 | + return result |
| 158 | + |
| 159 | + |
| 160 | +__all__ = ["MiniMaxChatModel", "MINIMAX_MODELS"] |
0 commit comments