Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
67958c9
feat: add script to fetch models from OpenRouter API
AlemTuzlak Apr 8, 2026
eaaa9f1
feat: add script to sync OpenRouter models into provider packages
AlemTuzlak Apr 8, 2026
a7f2841
fix: harden regex and replacement patterns in sync script
AlemTuzlak Apr 8, 2026
0be4eee
feat: add generate:models:fetch and generate:models:sync scripts
AlemTuzlak Apr 8, 2026
6e636be
ci: add daily model sync workflow
AlemTuzlak Apr 8, 2026
07e63c2
fix: include openrouter.models.ts in CI commit
AlemTuzlak Apr 8, 2026
c5f891d
fix: use actual input modalities, exclude non-chat models, reduce CI …
AlemTuzlak Apr 8, 2026
a7911d9
ci: apply automated fixes
autofix-ci[bot] Apr 8, 2026
6954915
fix: fix YAML parsing in sync-models workflow
AlemTuzlak Apr 9, 2026
ca7db2e
Merge branch 'main' into worktree-mellow-meandering-torvalds
AlemTuzlak Apr 9, 2026
4c00a3e
fix: address PR review feedback
AlemTuzlak Apr 9, 2026
eea0168
ci: apply automated fixes
autofix-ci[bot] Apr 9, 2026
137f440
refactor: rename scripts and move changeset creation into sync script
AlemTuzlak Apr 10, 2026
cfa3ebe
chore: sync model metadata from OpenRouter API
AlemTuzlak Apr 10, 2026
499c756
ci: apply automated fixes
autofix-ci[bot] Apr 10, 2026
e94c1c6
feat: add model age filtering and provider skip patterns
AlemTuzlak Apr 10, 2026
0e53447
ci: apply automated fixes
autofix-ci[bot] Apr 10, 2026
100672d
Merge branch 'main' into worktree-mellow-meandering-torvalds
AlemTuzlak Apr 10, 2026
b7c7ba2
fix: reuse existing sync-models changeset instead of creating duplicates
AlemTuzlak Apr 10, 2026
19d6552
ci: apply automated fixes
autofix-ci[bot] Apr 10, 2026
94dac91
chore: sync model metadata from OpenRouter API
AlemTuzlak Apr 10, 2026
1ec4c40
ci: apply automated fixes
autofix-ci[bot] Apr 10, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .changeset/sync-models.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
'@tanstack/ai-anthropic': patch
'@tanstack/ai-gemini': patch
'@tanstack/ai-grok': patch
'@tanstack/ai-openai': patch
'@tanstack/ai-openrouter': patch
---

Update model metadata from OpenRouter API
76 changes: 76 additions & 0 deletions .github/workflows/sync-models.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
name: Sync Model Metadata

on:
schedule:
- cron: '0 6 * * *'
workflow_dispatch:

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

permissions:
contents: write
pull-requests: write

jobs:
sync:
name: Sync Models
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6.0.2
with:
fetch-depth: 0

- name: Setup Tools
uses: TanStack/config/.github/setup@main

- name: Fetch and sync model metadata
run: pnpm generate:models

- name: Check for package changes
id: changes
run: |
if git diff --quiet -- packages/; then
echo "changed=false" >> $GITHUB_OUTPUT
else
echo "changed=true" >> $GITHUB_OUTPUT
fi

- name: Commit and force-push
if: steps.changes.outputs.changed == 'true'
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add packages/ scripts/openrouter.models.ts scripts/.sync-models-last-run .changeset/
git commit -m "chore: sync model metadata from OpenRouter"
git push --force origin HEAD:automated/sync-models
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

- name: Create or update PR
if: steps.changes.outputs.changed == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
BRANCH="automated/sync-models"
EXISTING_PR=$(gh pr list --head "$BRANCH" --base main --json number --jq '.[0].number' 2>/dev/null || true)

if [ -z "$EXISTING_PR" ] || [ "$EXISTING_PR" = "null" ]; then
BODY=$(cat <<'PRBODY'
Automated daily sync of model metadata from the OpenRouter API.

- Fetches the latest model list from OpenRouter
- Converts to the internal adapter format
- Syncs provider-specific model metadata for affected packages
- Creates a patch changeset for all changed packages
PRBODY
)
gh pr create \
--title "chore: sync model metadata from OpenRouter" \
--body "$BODY" \
--base main \
--head "$BRANCH"
fi
4 changes: 3 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@
"dev": "pnpm run watch",
"format": "prettier --experimental-cli --ignore-unknown '**/*' --write",
"generate-docs": "node scripts/generate-docs.ts && pnpm run copy:readme",
"generate:models": "tsx scripts/convert-openrouter-models.ts",
"generate:models": "pnpm generate:models:fetch && pnpm regenerate:models && tsx scripts/sync-provider-models.ts",
"generate:models:fetch": "tsx scripts/fetch-openrouter-models.ts",
"regenerate:models": "tsx scripts/convert-openrouter-models.ts",
"sync-docs-config": "node scripts/sync-docs-config.ts",
"copy:readme": "cp README.md packages/typescript/ai/README.md && cp README.md packages/typescript/ai-devtools/README.md && cp README.md packages/typescript/preact-ai-devtools/README.md && cp README.md packages/typescript/ai-client/README.md && cp README.md packages/typescript/ai-gemini/README.md && cp README.md packages/typescript/ai-ollama/README.md && cp README.md packages/typescript/ai-openai/README.md && cp README.md packages/typescript/ai-react/README.md && cp README.md packages/typescript/ai-react-ui/README.md && cp README.md packages/typescript/react-ai-devtools/README.md && cp README.md packages/typescript/solid-ai-devtools/README.md",
"changeset": "changeset",
Expand Down
41 changes: 41 additions & 0 deletions packages/typescript/ai-anthropic/src/model-meta.ts
Original file line number Diff line number Diff line change
Expand Up @@ -423,6 +423,36 @@ const CLAUDE_HAIKU_3 = {
? TMessageCapabilities
: unknown */

const CLAUDE_OPUS_4_6_FAST = {
name: 'claude-opus-4.6-fast',
id: 'claude-opus-4.6-fast',
context_window: 1_000_000,
max_output_tokens: 128_000,
supports: {
input: ['text', 'image'],
extended_thinking: true,
priority_tier: true,
},
pricing: {
input: {
normal: 30,
cached: 3,
},
output: {
normal: 150,
},
},
} as const satisfies ModelMeta<
AnthropicContainerOptions &
AnthropicContextManagementOptions &
AnthropicMCPOptions &
AnthropicServiceTierOptions &
AnthropicStopSequencesOptions &
AnthropicThinkingOptions &
AnthropicToolChoiceOptions &
AnthropicSamplingOptions
>

export const ANTHROPIC_MODELS = [
CLAUDE_OPUS_4_6.id,
CLAUDE_OPUS_4_5.id,
Expand All @@ -435,6 +465,8 @@ export const ANTHROPIC_MODELS = [
CLAUDE_OPUS_4.id,
CLAUDE_HAIKU_3_5.id,
CLAUDE_HAIKU_3.id,

CLAUDE_OPUS_4_6_FAST.id,
] as const

// const ANTHROPIC_IMAGE_MODELS = [] as const
Expand Down Expand Up @@ -537,6 +569,14 @@ export type AnthropicChatModelProviderOptionsByName = {
AnthropicStopSequencesOptions &
AnthropicToolChoiceOptions &
AnthropicSamplingOptions
[CLAUDE_OPUS_4_6_FAST.id]: AnthropicContainerOptions &
AnthropicContextManagementOptions &
AnthropicMCPOptions &
AnthropicServiceTierOptions &
AnthropicStopSequencesOptions &
AnthropicThinkingOptions &
AnthropicToolChoiceOptions &
AnthropicSamplingOptions
}

/**
Expand All @@ -562,4 +602,5 @@ export type AnthropicModelInputModalitiesByName = {
[CLAUDE_OPUS_4.id]: typeof CLAUDE_OPUS_4.supports.input
[CLAUDE_HAIKU_3_5.id]: typeof CLAUDE_HAIKU_3_5.supports.input
[CLAUDE_HAIKU_3.id]: typeof CLAUDE_HAIKU_3.supports.input
[CLAUDE_OPUS_4_6_FAST.id]: typeof CLAUDE_OPUS_4_6_FAST.supports.input
}
43 changes: 43 additions & 0 deletions packages/typescript/ai-grok/src/model-meta.ts
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,44 @@ const GROK_2_IMAGE = {
* Grok Chat Models
* Based on xAI's available models as of 2025
*/
const GROK_4_20 = {
name: 'grok-4.20',
context_window: 2_000_000,
supports: {
input: ['text', 'image', 'document'],
output: ['text'],
capabilities: ['reasoning', 'structured_outputs', 'tool_calling'],
},
pricing: {
input: {
normal: 2,
cached: 0.2,
},
output: {
normal: 6,
},
},
} as const satisfies ModelMeta

const GROK_4_20_MULTI_AGENT = {
name: 'grok-4.20-multi-agent',
context_window: 2_000_000,
supports: {
input: ['text', 'image', 'document'],
output: ['text'],
capabilities: ['reasoning', 'structured_outputs', 'tool_calling'],
},
pricing: {
input: {
normal: 2,
cached: 0.2,
},
output: {
normal: 6,
},
},
} as const satisfies ModelMeta

export const GROK_CHAT_MODELS = [
GROK_4_1_FAST_REASONING.name,
GROK_4_1_FAST_NON_REASONING.name,
Expand All @@ -223,6 +261,9 @@ export const GROK_CHAT_MODELS = [
GROK_3.name,
GROK_3_MINI.name,
GROK_2_VISION.name,

GROK_4_20.name,
GROK_4_20_MULTI_AGENT.name,
] as const

/**
Expand All @@ -247,6 +288,8 @@ export type GrokModelInputModalitiesByName = {
[GROK_3.name]: typeof GROK_3.supports.input
[GROK_3_MINI.name]: typeof GROK_3_MINI.supports.input
[GROK_2_VISION.name]: typeof GROK_2_VISION.supports.input
[GROK_4_20.name]: typeof GROK_4_20.supports.input
[GROK_4_20_MULTI_AGENT.name]: typeof GROK_4_20_MULTI_AGENT.supports.input
}

/**
Expand Down
97 changes: 97 additions & 0 deletions packages/typescript/ai-openai/src/model-meta.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1646,6 +1646,86 @@ const TTS_1_HD = {
> */

// Chat/text completion models (based on endpoints: "chat" or "chat-completions")
const GPT_5_4_MINI = {
name: 'gpt-5.4-mini',
context_window: 400_000,
max_output_tokens: 128_000,
supports: {
input: ['image', 'text'],
output: ['text'],
endpoints: ['chat', 'chat-completions'],
features: [
'streaming',
'function_calling',
'structured_outputs',
'distillation',
],
tools: [
'web_search',
'file_search',
'image_generation',
'code_interpreter',
'mcp',
],
},
pricing: {
input: {
normal: 0.75,
cached: 0.075,
},
output: {
normal: 4.5,
},
},
} as const satisfies ModelMeta<
OpenAIBaseOptions &
OpenAIReasoningOptions &
OpenAIStructuredOutputOptions &
OpenAIToolsOptions &
OpenAIStreamingOptions &
OpenAIMetadataOptions
>

const GPT_5_4_NANO = {
name: 'gpt-5.4-nano',
context_window: 400_000,
max_output_tokens: 128_000,
supports: {
input: ['image', 'text'],
output: ['text'],
endpoints: ['chat', 'chat-completions'],
features: [
'streaming',
'function_calling',
'structured_outputs',
'distillation',
],
tools: [
'web_search',
'file_search',
'image_generation',
'code_interpreter',
'mcp',
],
},
pricing: {
input: {
normal: 0.2,
cached: 0.02,
},
output: {
normal: 1.25,
},
},
} as const satisfies ModelMeta<
OpenAIBaseOptions &
OpenAIReasoningOptions &
OpenAIStructuredOutputOptions &
OpenAIToolsOptions &
OpenAIStreamingOptions &
OpenAIMetadataOptions
>

export const OPENAI_CHAT_MODELS = [
// Frontier models
GPT5_2.name,
Expand Down Expand Up @@ -1694,6 +1774,9 @@ export const OPENAI_CHAT_MODELS = [
// Legacy reasoning
O1.name,
O1_PRO.name,

GPT_5_4_MINI.name,
GPT_5_4_NANO.name,
] as const

export type OpenAIChatModel = (typeof OPENAI_CHAT_MODELS)[number]
Expand Down Expand Up @@ -1947,6 +2030,18 @@ export type OpenAIChatModelProviderOptionsByName = {
OpenAIToolsOptions &
OpenAIStreamingOptions &
OpenAIMetadataOptions
[GPT_5_4_MINI.name]: OpenAIBaseOptions &
OpenAIReasoningOptions &
OpenAIStructuredOutputOptions &
OpenAIToolsOptions &
OpenAIStreamingOptions &
OpenAIMetadataOptions
[GPT_5_4_NANO.name]: OpenAIBaseOptions &
OpenAIReasoningOptions &
OpenAIStructuredOutputOptions &
OpenAIToolsOptions &
OpenAIStreamingOptions &
OpenAIMetadataOptions
}

/**
Expand Down Expand Up @@ -2002,4 +2097,6 @@ export type OpenAIModelInputModalitiesByName = {
[O3_MINI.name]: typeof O3_MINI.supports.input
[GPT_4O_SEARCH_PREVIEW.name]: typeof GPT_4O_SEARCH_PREVIEW.supports.input
[GPT_4O_MINI_SEARCH_PREVIEW.name]: typeof GPT_4O_MINI_SEARCH_PREVIEW.supports.input
[GPT_5_4_MINI.name]: typeof GPT_5_4_MINI.supports.input
[GPT_5_4_NANO.name]: typeof GPT_5_4_NANO.supports.input
}
Loading
Loading