diff --git a/src/services/apis/openai-token-params.mjs b/src/services/apis/openai-token-params.mjs index d5193376..c9cf9bfc 100644 --- a/src/services/apis/openai-token-params.mjs +++ b/src/services/apis/openai-token-params.mjs @@ -1,4 +1,4 @@ -const GPT5_CHAT_COMPLETIONS_MODEL_PATTERN = /(^|\/)gpt-5([.-]|$)/ +const GPT5_CHAT_COMPLETIONS_MODEL_PATTERN = /^gpt-5([.-]|$)/ function shouldUseMaxCompletionTokens(provider, model) { const normalizedProvider = String(provider || '').toLowerCase() diff --git a/tests/unit/services/apis/openai-token-params.test.mjs b/tests/unit/services/apis/openai-token-params.test.mjs index fd64f893..8f7e7c43 100644 --- a/tests/unit/services/apis/openai-token-params.test.mjs +++ b/tests/unit/services/apis/openai-token-params.test.mjs @@ -8,9 +8,9 @@ test('uses max_completion_tokens for gpt-5.x chat models', () => { }) }) -test('uses max_completion_tokens for provider-prefixed gpt-5.x models', () => { +test('uses max_tokens for provider-prefixed gpt-5.x model names', () => { assert.deepEqual(getChatCompletionsTokenParams('openai', 'openai/gpt-5.2', 2048), { - max_completion_tokens: 2048, + max_tokens: 2048, }) })