Skip to content

Commit cff0328

Browse files
fix: enable reasoning for third-party OpenAI-compatible proxies (#194)
### Summary Fixes #188. The issue was that when a user configured: - OpenAI-compatible protocol - A third-party proxy (e.g. univibe) - A reasoning-capable model ID (e.g. ) would return because it only enabled reasoning for **official OpenAI API**. This caused the synthesized PiModel to have , and the gateway would reject the request or return empty content, resulting in and the UI showing completed but with an empty preview. ### The fix Extend the heuristic: for third-party OpenAI-compatible gateways, still check if the model ID matches a known reasoning model pattern (same patterns already used for OpenRouter). This enables for Claude 4, o1/o3/gpt-5, qwq, deepseek-r, etc when proxied through OpenAI-compatible endpoints. ### Testing - Added test cases covering the issue scenario - Existing tests still pass - Fix verified by the issue author's log: the model on will now correctly get Closes #188. 🤖 Generated with [Claude Code](https://claude.com/claude-code) --------- Signed-off-by: Sun-sunshine06 <Sun-sunshine06@users.noreply.github.com> Co-authored-by: Sun-sunshine06 <Sun-sunshine06@users.noreply.github.com> Co-authored-by: Claude Opus 4.7 <noreply@anthropic.com>
1 parent 1377b4f commit cff0328

2 files changed

Lines changed: 69 additions & 1 deletion

File tree

packages/providers/src/index.test.ts

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -555,4 +555,47 @@ describe('inferReasoning', () => {
555555
it('returns false when wire is undefined', () => {
556556
expect(inferReasoning(undefined, 'gpt-4o', 'https://api.openai.com/v1')).toBe(false);
557557
});
558+
559+
it('returns true for third-party openai-chat with reasoning model ID (issue #188)', () => {
560+
// univibe/custom proxy with Claude 4 model
561+
expect(inferReasoning('openai-chat', 'claude-opus-4-6', 'https://api.univibe.cc/openai')).toBe(
562+
true,
563+
);
564+
expect(
565+
inferReasoning('openai-chat', 'claude-sonnet-4-6', 'https://api.univibe.cc/openai'),
566+
).toBe(true);
567+
// OpenRouter-style paths on custom proxy
568+
expect(
569+
inferReasoning('openai-chat', 'anthropic/claude-opus-4-6', 'https://my-proxy.example/v1'),
570+
).toBe(true);
571+
// OpenAI-style namespaced paths on custom proxy
572+
expect(inferReasoning('openai-chat', 'openai/o3-mini', 'https://my-proxy.example/v1')).toBe(
573+
true,
574+
);
575+
expect(inferReasoning('openai-chat', 'openai/gpt-5.1', 'https://my-proxy.example/v1')).toBe(
576+
true,
577+
);
578+
// o1 on custom proxy
579+
expect(inferReasoning('openai-chat', 'o1-mini', 'https://my-proxy.example/v1')).toBe(true);
580+
// qwen/qwq on custom proxy
581+
expect(
582+
inferReasoning('openai-chat', 'qwen/qwq-32b-preview', 'https://my-proxy.example/v1'),
583+
).toBe(true);
584+
});
585+
586+
it('returns false for third-party openai-chat with non-reasoning model ID', () => {
587+
expect(
588+
inferReasoning(
589+
'openai-chat',
590+
'qwen3.6-plus',
591+
'https://dashscope.aliyuncs.com/compatible-mode/v1',
592+
),
593+
).toBe(false);
594+
expect(inferReasoning('openai-chat', 'deepseek-chat', 'https://api.deepseek.com/v1')).toBe(
595+
false,
596+
);
597+
expect(inferReasoning('openai-chat', 'glm-4.6v', 'https://open.bigmodel.cn/api/paas/v4')).toBe(
598+
false,
599+
);
600+
});
558601
});

packages/providers/src/index.ts

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,24 @@ function isReasoningModelId(modelId: string): boolean {
189189
return /^(o[134]|gpt-5)/i.test(modelId);
190190
}
191191

192+
/**
193+
* Matches reasoning-capable model IDs commonly proxied through OpenAI-compatible
194+
* gateways (OpenRouter, univibe, sub2api, etc). This pattern matches the same
195+
* set that OPENROUTER_REASONING_MODEL_RE uses for OpenRouter, but applies to
196+
* custom openai-chat wire endpoints as well.
197+
*/
198+
const REASONING_MODEL_ID_PATTERN = new RegExp(
199+
[
200+
':thinking$',
201+
'(^|/)claude-(?:opus|sonnet)-4',
202+
'^(?:openai/)?(?:o1|o3|o4|gpt-5)(?:[-.].*)?$',
203+
'^minimax/minimax-m\\d',
204+
'^deepseek/deepseek-r\\d',
205+
'^qwen/qwq',
206+
].join('|'),
207+
'i',
208+
);
209+
192210
export function inferReasoning(
193211
wire: GenerateOptions['wire'],
194212
modelId: string,
@@ -201,7 +219,14 @@ export function inferReasoning(
201219
case 'openai-codex-responses':
202220
return true;
203221
case 'openai-chat':
204-
return isOpenAIOfficial(baseUrl) && isReasoningModelId(modelId);
222+
// For official OpenAI, check both base URL and model ID pattern
223+
if (isOpenAIOfficial(baseUrl)) {
224+
return isReasoningModelId(modelId);
225+
}
226+
// For third-party OpenAI-compatible gateways, heuristically match
227+
// common reasoning model IDs — many gateways still require the
228+
// reasoning flag to get extended thinking output.
229+
return REASONING_MODEL_ID_PATTERN.test(modelId);
205230
default:
206231
return false;
207232
}

0 commit comments

Comments
 (0)