|
| 1 | +""" |
| 2 | +MiniMax API support for text-based LLM evaluation. |
| 3 | +OpenAI-compatible chat completions endpoint. |
| 4 | +Set MINIMAX_API_KEY or pass key=... |
| 5 | +
|
| 6 | +Models: MiniMax-M2.7, MiniMax-M2.5, MiniMax-M2.5-highspeed |
| 7 | +API docs: https://platform.minimaxi.com/document/guides/chat-model/text-generation |
| 8 | +""" |
| 9 | +import json |
| 10 | +import os |
| 11 | + |
| 12 | +import requests |
| 13 | + |
| 14 | +from ..smp import get_logger |
| 15 | +from .base import BaseAPI |
| 16 | + |
| 17 | +MINIMAX_API_BASE = "https://api.minimax.io/v1/chat/completions" |
| 18 | + |
| 19 | + |
| 20 | +class MiniMaxAPI(BaseAPI): |
| 21 | + """Text LLM API using MiniMax (OpenAI-compatible).""" |
| 22 | + |
| 23 | + is_api: bool = True |
| 24 | + |
| 25 | + def __init__( |
| 26 | + self, |
| 27 | + model: str = "MiniMax-M2.7", |
| 28 | + key: str = None, |
| 29 | + api_base: str = None, |
| 30 | + retry: int = 10, |
| 31 | + wait: int = 1, |
| 32 | + system_prompt: str = None, |
| 33 | + verbose: bool = True, |
| 34 | + temperature: float = 0, |
| 35 | + max_tokens: int = 2048, |
| 36 | + timeout: int = 300, |
| 37 | + **kwargs, |
| 38 | + ): |
| 39 | + self.model = model |
| 40 | + self.key = key or os.environ.get("MINIMAX_API_KEY") |
| 41 | + self.api_base = api_base or os.environ.get("MINIMAX_API_BASE", MINIMAX_API_BASE) |
| 42 | + self.temperature = temperature |
| 43 | + self.max_tokens = max_tokens |
| 44 | + self.timeout = timeout |
| 45 | + |
| 46 | + if not self.key: |
| 47 | + raise ValueError( |
| 48 | + "MiniMax API key is required. Set MINIMAX_API_KEY or pass key=..." |
| 49 | + ) |
| 50 | + |
| 51 | + super().__init__( |
| 52 | + retry=retry, |
| 53 | + wait=wait, |
| 54 | + system_prompt=system_prompt, |
| 55 | + verbose=verbose, |
| 56 | + **kwargs, |
| 57 | + ) |
| 58 | + |
| 59 | + self.logger.info(f"MiniMaxAPI: model={self.model}, api_base={self.api_base}") |
| 60 | + |
| 61 | + def _prepare_messages(self, inputs): |
| 62 | + """Build OpenAI-style messages from VLMEvalKit input format.""" |
| 63 | + messages = [] |
| 64 | + if self.system_prompt: |
| 65 | + messages.append({"role": "system", "content": self.system_prompt}) |
| 66 | + |
| 67 | + # Handle multi-turn chat format |
| 68 | + if inputs and isinstance(inputs[0], dict) and "role" in inputs[0]: |
| 69 | + for item in inputs: |
| 70 | + content_parts = item.get("content", []) |
| 71 | + text = "\n".join( |
| 72 | + x["value"] for x in content_parts if x["type"] == "text" |
| 73 | + ) |
| 74 | + messages.append({"role": item["role"], "content": text or ""}) |
| 75 | + else: |
| 76 | + # Single-turn: extract text from inputs |
| 77 | + text = "\n".join( |
| 78 | + x["value"] for x in inputs if x["type"] == "text" |
| 79 | + ) |
| 80 | + messages.append({"role": "user", "content": text or ""}) |
| 81 | + |
| 82 | + return messages |
| 83 | + |
| 84 | + def generate_inner(self, inputs, **kwargs): |
| 85 | + temperature = kwargs.pop("temperature", self.temperature) |
| 86 | + max_tokens = kwargs.pop("max_tokens", self.max_tokens) |
| 87 | + |
| 88 | + messages = self._prepare_messages(inputs) |
| 89 | + payload = { |
| 90 | + "model": self.model, |
| 91 | + "messages": messages, |
| 92 | + "temperature": temperature, |
| 93 | + "max_tokens": max_tokens, |
| 94 | + } |
| 95 | + |
| 96 | + try: |
| 97 | + response = requests.post( |
| 98 | + self.api_base, |
| 99 | + headers={ |
| 100 | + "Authorization": f"Bearer {self.key}", |
| 101 | + "Content-Type": "application/json", |
| 102 | + }, |
| 103 | + data=json.dumps(payload), |
| 104 | + timeout=self.timeout * 1.1, |
| 105 | + ) |
| 106 | + except Exception as err: |
| 107 | + if self.verbose: |
| 108 | + self.logger.error(f"{type(err).__name__}: {err}") |
| 109 | + return -1, self.fail_msg, str(err) |
| 110 | + |
| 111 | + ret_code = response.status_code |
| 112 | + ret_code = 0 if (200 <= ret_code < 300) else ret_code |
| 113 | + answer = self.fail_msg |
| 114 | + |
| 115 | + try: |
| 116 | + data = response.json() |
| 117 | + answer = data["choices"][0]["message"]["content"].strip() |
| 118 | + except Exception as err: |
| 119 | + if self.verbose: |
| 120 | + self.logger.error(f"{type(err).__name__}: {err}") |
| 121 | + self.logger.error(response.text) |
| 122 | + |
| 123 | + return ret_code, answer, response |
0 commit comments