From c2d809b35803bf93c5b24bcebf0c83bafa178233 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Tue, 14 Apr 2026 06:44:35 +0000 Subject: [PATCH] feat: add GPT-5.4 Codex Spark fast models for Codex provider Add gpt-5.4-codex-spark and gpt-5.4-mini-codex-spark model variants to the OpenAI Codex provider, following the existing gpt-5.3-codex-spark pattern. These are fast, text-only coding models with 128k context and 8192 max output tokens, accessible via ChatGPT subscription. Closes #12112 --- packages/types/src/providers/openai-codex.ts | 28 +++++++++++ .../providers/__tests__/openai-codex.spec.ts | 50 ++++++++++++++----- 2 files changed, 66 insertions(+), 12 deletions(-) diff --git a/packages/types/src/providers/openai-codex.ts b/packages/types/src/providers/openai-codex.ts index 47809723761..2c3cc13718a 100644 --- a/packages/types/src/providers/openai-codex.ts +++ b/packages/types/src/providers/openai-codex.ts @@ -202,6 +202,34 @@ export const openAiCodexModels = { supportsTemperature: false, description: "GPT-5.4 Mini: Lower-cost GPT-5.4 model via ChatGPT subscription", }, + "gpt-5.4-codex-spark": { + maxTokens: 8192, + contextWindow: 128000, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: false, + supportsPromptCache: true, + supportsReasoningEffort: ["low", "medium", "high", "xhigh"], + reasoningEffort: "medium", + inputPrice: 0, + outputPrice: 0, + supportsTemperature: false, + description: "GPT-5.4 Codex Spark: Fast, text-only coding model via ChatGPT subscription", + }, + "gpt-5.4-mini-codex-spark": { + maxTokens: 8192, + contextWindow: 128000, + includedTools: ["apply_patch"], + excludedTools: ["apply_diff", "write_to_file"], + supportsImages: false, + supportsPromptCache: true, + supportsReasoningEffort: ["low", "medium", "high", "xhigh"], + reasoningEffort: "medium", + inputPrice: 0, + outputPrice: 0, + supportsTemperature: false, + description: "GPT-5.4 Mini Codex Spark: Fast, text-only coding model via ChatGPT subscription", + }, "gpt-5.2": { maxTokens: 128000, contextWindow: 400000, diff --git a/src/api/providers/__tests__/openai-codex.spec.ts b/src/api/providers/__tests__/openai-codex.spec.ts index dcc0c4d0357..03e053bb836 100644 --- a/src/api/providers/__tests__/openai-codex.spec.ts +++ b/src/api/providers/__tests__/openai-codex.spec.ts @@ -3,18 +3,24 @@ import { OpenAiCodexHandler } from "../openai-codex" describe("OpenAiCodexHandler.getModel", () => { - it.each(["gpt-5.1", "gpt-5", "gpt-5.1-codex", "gpt-5-codex", "gpt-5-codex-mini", "gpt-5.3-codex-spark"])( - "should return specified model when a valid model id is provided: %s", - (apiModelId) => { - const handler = new OpenAiCodexHandler({ apiModelId }) - const model = handler.getModel() - - expect(model.id).toBe(apiModelId) - expect(model.info).toBeDefined() - // Default reasoning effort for GPT-5 family - expect(model.info.reasoningEffort).toBe("medium") - }, - ) + it.each([ + "gpt-5.1", + "gpt-5", + "gpt-5.1-codex", + "gpt-5-codex", + "gpt-5-codex-mini", + "gpt-5.3-codex-spark", + "gpt-5.4-codex-spark", + "gpt-5.4-mini-codex-spark", + ])("should return specified model when a valid model id is provided: %s", (apiModelId) => { + const handler = new OpenAiCodexHandler({ apiModelId }) + const model = handler.getModel() + + expect(model.id).toBe(apiModelId) + expect(model.info).toBeDefined() + // Default reasoning effort for GPT-5 family + expect(model.info.reasoningEffort).toBe("medium") + }) it("should fall back to default model when an invalid model id is provided", () => { const handler = new OpenAiCodexHandler({ apiModelId: "not-a-real-model" }) @@ -34,6 +40,26 @@ describe("OpenAiCodexHandler.getModel", () => { expect(model.info.supportsImages).toBe(false) }) + it("should use GPT-5.4 Spark-specific limits and capabilities", () => { + const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.4-codex-spark" }) + const model = handler.getModel() + + expect(model.id).toBe("gpt-5.4-codex-spark") + expect(model.info.contextWindow).toBe(128000) + expect(model.info.maxTokens).toBe(8192) + expect(model.info.supportsImages).toBe(false) + }) + + it("should use GPT-5.4 Mini Spark-specific limits and capabilities", () => { + const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.4-mini-codex-spark" }) + const model = handler.getModel() + + expect(model.id).toBe("gpt-5.4-mini-codex-spark") + expect(model.info.contextWindow).toBe(128000) + expect(model.info.maxTokens).toBe(8192) + expect(model.info.supportsImages).toBe(false) + }) + it("should use GPT-5.4 Mini capabilities when selected", () => { const handler = new OpenAiCodexHandler({ apiModelId: "gpt-5.4-mini" }) const model = handler.getModel()