This is page 5 of 7. Use http://codebase.md/stripe/agent-toolkit?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .github
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ └── feature_request.yml
│ └── workflows
│ ├── main.yml
│ ├── npm_release_shared.yml
│ ├── pypi_release.yml
│ └── sync-skills.yml
├── .gitignore
├── .vscode
│ ├── extensions.json
│ ├── launch.json
│ └── settings.json
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── gemini-extension.json
├── LICENSE
├── llm
│ ├── ai-sdk
│ │ ├── jest.config.ts
│ │ ├── LICENSE
│ │ ├── meter
│ │ │ ├── examples
│ │ │ │ ├── .env.example
│ │ │ │ ├── .gitignore
│ │ │ │ ├── anthropic.ts
│ │ │ │ ├── google.ts
│ │ │ │ ├── openai.ts
│ │ │ │ ├── README.md
│ │ │ │ └── tsconfig.json
│ │ │ ├── index.ts
│ │ │ ├── meter-event-logging.ts
│ │ │ ├── meter-event-types.ts
│ │ │ ├── README.md
│ │ │ ├── tests
│ │ │ │ ├── ai-sdk-billing-wrapper-anthropic.test.ts
│ │ │ │ ├── ai-sdk-billing-wrapper-general.test.ts
│ │ │ │ ├── ai-sdk-billing-wrapper-google.test.ts
│ │ │ │ ├── ai-sdk-billing-wrapper-openai.test.ts
│ │ │ │ ├── ai-sdk-billing-wrapper-other-providers.test.ts
│ │ │ │ ├── meter-event-logging.test.ts
│ │ │ │ └── model-name-normalization.test.ts
│ │ │ ├── tsconfig.json
│ │ │ ├── types.ts
│ │ │ ├── utils.ts
│ │ │ └── wrapperV2.ts
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── provider
│ │ │ ├── examples
│ │ │ │ ├── .env.example
│ │ │ │ ├── .gitignore
│ │ │ │ ├── anthropic.ts
│ │ │ │ ├── google.ts
│ │ │ │ ├── openai.ts
│ │ │ │ ├── README.md
│ │ │ │ └── tsconfig.json
│ │ │ ├── index.ts
│ │ │ ├── README.md
│ │ │ ├── stripe-language-model.ts
│ │ │ ├── stripe-provider.ts
│ │ │ ├── tests
│ │ │ │ ├── stripe-language-model.test.ts
│ │ │ │ ├── stripe-provider.test.ts
│ │ │ │ └── utils.test.ts
│ │ │ ├── tsconfig.build.json
│ │ │ ├── tsconfig.json
│ │ │ ├── types.ts
│ │ │ └── utils.ts
│ │ ├── README.md
│ │ ├── tsconfig.json
│ │ └── tsup.config.ts
│ ├── README.md
│ └── token-meter
│ ├── examples
│ │ ├── anthropic.ts
│ │ ├── gemini.ts
│ │ └── openai.ts
│ ├── index.ts
│ ├── jest.config.ts
│ ├── LICENSE
│ ├── meter-event-logging.ts
│ ├── meter-event-types.ts
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── README.md
│ ├── tests
│ │ ├── meter-event-logging.test.ts
│ │ ├── model-name-normalization.test.ts
│ │ ├── token-meter-anthropic.test.ts
│ │ ├── token-meter-gemini.test.ts
│ │ ├── token-meter-general.test.ts
│ │ ├── token-meter-openai.test.ts
│ │ └── type-detection.test.ts
│ ├── token-meter.ts
│ ├── tsconfig.build.json
│ ├── tsconfig.json
│ ├── types.ts
│ └── utils
│ └── type-detection.ts
├── README.md
├── SECURITY.md
├── skills
│ ├── get-started-kiro.md
│ ├── README.md
│ ├── stripe-best-practices.md
│ └── sync.js
└── tools
├── modelcontextprotocol
│ ├── .dxtignore
│ ├── .gitignore
│ ├── .node-version
│ ├── .prettierrc
│ ├── build-dxt.js
│ ├── Dockerfile
│ ├── eslint.config.mjs
│ ├── jest.config.ts
│ ├── LICENSE
│ ├── manifest.json
│ ├── package.json
│ ├── pnpm-lock.yaml
│ ├── README.md
│ ├── server.json
│ ├── src
│ │ ├── index.ts
│ │ └── test
│ │ └── index.test.ts
│ ├── stripe_icon.png
│ └── tsconfig.json
├── python
│ ├── .editorconfig
│ ├── .flake8
│ ├── examples
│ │ ├── crewai
│ │ │ ├── .env.template
│ │ │ ├── main.py
│ │ │ └── README.md
│ │ ├── langchain
│ │ │ ├── __init__.py
│ │ │ ├── .env.template
│ │ │ ├── main.py
│ │ │ └── README.md
│ │ ├── openai
│ │ │ ├── .env.template
│ │ │ ├── customer_support
│ │ │ │ ├── .env.template
│ │ │ │ ├── emailer.py
│ │ │ │ ├── env.py
│ │ │ │ ├── main.py
│ │ │ │ ├── pyproject.toml
│ │ │ │ ├── README.md
│ │ │ │ ├── repl.py
│ │ │ │ └── support_agent.py
│ │ │ ├── file_search
│ │ │ │ ├── main.py
│ │ │ │ └── README.md
│ │ │ └── web_search
│ │ │ ├── .env.template
│ │ │ ├── main.py
│ │ │ └── README.md
│ │ └── strands
│ │ └── main.py
│ ├── Makefile
│ ├── pyproject.toml
│ ├── README.md
│ ├── requirements.txt
│ ├── stripe_agent_toolkit
│ │ ├── __init__.py
│ │ ├── api.py
│ │ ├── configuration.py
│ │ ├── crewai
│ │ │ ├── tool.py
│ │ │ └── toolkit.py
│ │ ├── functions.py
│ │ ├── langchain
│ │ │ ├── tool.py
│ │ │ └── toolkit.py
│ │ ├── openai
│ │ │ ├── hooks.py
│ │ │ ├── tool.py
│ │ │ └── toolkit.py
│ │ ├── prompts.py
│ │ ├── schema.py
│ │ ├── strands
│ │ │ ├── __init__.py
│ │ │ ├── hooks.py
│ │ │ ├── tool.py
│ │ │ └── toolkit.py
│ │ └── tools.py
│ └── tests
│ ├── __init__.py
│ ├── test_configuration.py
│ └── test_functions.py
├── README.md
└── typescript
├── .gitignore
├── .prettierrc
├── eslint.config.mjs
├── examples
│ ├── ai-sdk
│ │ ├── .env.template
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── README.md
│ │ └── tsconfig.json
│ ├── cloudflare
│ │ ├── .dev.vars.example
│ │ ├── .gitignore
│ │ ├── biome.json
│ │ ├── package.json
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── app.ts
│ │ │ ├── imageGenerator.ts
│ │ │ ├── index.ts
│ │ │ ├── oauth.ts
│ │ │ └── utils.ts
│ │ ├── tsconfig.json
│ │ ├── worker-configuration.d.ts
│ │ └── wrangler.jsonc
│ ├── langchain
│ │ ├── .env.template
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── README.md
│ │ └── tsconfig.json
│ └── openai
│ ├── .env.template
│ ├── index.ts
│ ├── package.json
│ ├── README.md
│ └── tsconfig.json
├── jest.config.ts
├── LICENSE
├── package.json
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── README.md
├── src
│ ├── ai-sdk
│ │ ├── index.ts
│ │ ├── tool.ts
│ │ └── toolkit.ts
│ ├── cloudflare
│ │ ├── index.ts
│ │ └── README.md
│ ├── langchain
│ │ ├── index.ts
│ │ ├── tool.ts
│ │ └── toolkit.ts
│ ├── modelcontextprotocol
│ │ ├── index.ts
│ │ ├── README.md
│ │ ├── register-paid-tool.ts
│ │ └── toolkit.ts
│ ├── openai
│ │ ├── index.ts
│ │ └── toolkit.ts
│ ├── shared
│ │ ├── api.ts
│ │ ├── balance
│ │ │ └── retrieveBalance.ts
│ │ ├── configuration.ts
│ │ ├── coupons
│ │ │ ├── createCoupon.ts
│ │ │ └── listCoupons.ts
│ │ ├── customers
│ │ │ ├── createCustomer.ts
│ │ │ └── listCustomers.ts
│ │ ├── disputes
│ │ │ ├── listDisputes.ts
│ │ │ └── updateDispute.ts
│ │ ├── documentation
│ │ │ └── searchDocumentation.ts
│ │ ├── invoiceItems
│ │ │ └── createInvoiceItem.ts
│ │ ├── invoices
│ │ │ ├── createInvoice.ts
│ │ │ ├── finalizeInvoice.ts
│ │ │ └── listInvoices.ts
│ │ ├── paymentIntents
│ │ │ └── listPaymentIntents.ts
│ │ ├── paymentLinks
│ │ │ └── createPaymentLink.ts
│ │ ├── prices
│ │ │ ├── createPrice.ts
│ │ │ └── listPrices.ts
│ │ ├── products
│ │ │ ├── createProduct.ts
│ │ │ └── listProducts.ts
│ │ ├── refunds
│ │ │ └── createRefund.ts
│ │ ├── subscriptions
│ │ │ ├── cancelSubscription.ts
│ │ │ ├── listSubscriptions.ts
│ │ │ └── updateSubscription.ts
│ │ └── tools.ts
│ └── test
│ ├── modelcontextprotocol
│ │ └── register-paid-tool.test.ts
│ └── shared
│ ├── balance
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ ├── configuration.test.ts
│ ├── customers
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ ├── disputes
│ │ └── functions.test.ts
│ ├── documentation
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ ├── invoiceItems
│ │ ├── functions.test.ts
│ │ ├── parameters.test.ts
│ │ └── prompts.test.ts
│ ├── invoices
│ │ ├── functions.test.ts
│ │ ├── parameters.test.ts
│ │ └── prompts.test.ts
│ ├── paymentIntents
│ │ ├── functions.test.ts
│ │ ├── parameters.test.ts
│ │ └── prompts.test.ts
│ ├── paymentLinks
│ │ ├── functions.test.ts
│ │ ├── parameters.test.ts
│ │ └── prompts.test.ts
│ ├── prices
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ ├── products
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ ├── refunds
│ │ ├── functions.test.ts
│ │ └── parameters.test.ts
│ └── subscriptions
│ ├── functions.test.ts
│ ├── parameters.test.ts
│ └── prompts.test.ts
├── tsconfig.json
└── tsup.config.ts
```
# Files
--------------------------------------------------------------------------------
/tools/typescript/examples/cloudflare/src/utils.ts:
--------------------------------------------------------------------------------
```typescript
1 | // From: https://github.com/cloudflare/ai/blob/main/demos/remote-mcp-server/src/utils.ts
2 |
3 | // Helper to generate the layout
4 | import {html, raw} from 'hono/html';
5 | import type {HtmlEscapedString} from 'hono/utils/html';
6 | import type {AuthRequest} from '@cloudflare/workers-oauth-provider';
7 |
8 | // This file mainly exists as a dumping ground for uninteresting html and CSS
9 | // to remove clutter and noise from the auth logic. You likely do not need
10 | // anything from this file.
11 |
12 | export const layout = (
13 | content: HtmlEscapedString | string,
14 | title: string
15 | ) => html`
16 | <!DOCTYPE html>
17 | <html lang="en">
18 | <head>
19 | <meta charset="UTF-8" />
20 | <meta name="viewport" content="width=device-width, initial-scale=1.0" />
21 | <title>${title}</title>
22 | <script src="https://cdn.tailwindcss.com"></script>
23 | <script>
24 | tailwind.config = {
25 | theme: {
26 | extend: {
27 | colors: {
28 | primary: '#3498db',
29 | secondary: '#2ecc71',
30 | accent: '#f39c12',
31 | },
32 | fontFamily: {
33 | sans: ['Inter', 'system-ui', 'sans-serif'],
34 | heading: ['Roboto', 'system-ui', 'sans-serif'],
35 | },
36 | },
37 | },
38 | };
39 | </script>
40 | <style>
41 | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=Roboto:wght@400;500;700&display=swap');
42 |
43 | /* Custom styling for markdown content */
44 | .markdown h1 {
45 | font-size: 2.25rem;
46 | font-weight: 700;
47 | font-family: 'Roboto', system-ui, sans-serif;
48 | color: #1a202c;
49 | margin-bottom: 1rem;
50 | line-height: 1.2;
51 | }
52 |
53 | .markdown h2 {
54 | font-size: 1.5rem;
55 | font-weight: 600;
56 | font-family: 'Roboto', system-ui, sans-serif;
57 | color: #2d3748;
58 | margin-top: 1.5rem;
59 | margin-bottom: 0.75rem;
60 | line-height: 1.3;
61 | }
62 |
63 | .markdown h3 {
64 | font-size: 1.25rem;
65 | font-weight: 600;
66 | font-family: 'Roboto', system-ui, sans-serif;
67 | color: #2d3748;
68 | margin-top: 1.25rem;
69 | margin-bottom: 0.5rem;
70 | }
71 |
72 | .markdown p {
73 | font-size: 1.125rem;
74 | color: #4a5568;
75 | margin-bottom: 1rem;
76 | line-height: 1.6;
77 | }
78 |
79 | .markdown a {
80 | color: #3498db;
81 | font-weight: 500;
82 | text-decoration: none;
83 | }
84 |
85 | .markdown a:hover {
86 | text-decoration: underline;
87 | }
88 |
89 | .markdown blockquote {
90 | border-left: 4px solid #f39c12;
91 | padding-left: 1rem;
92 | padding-top: 0.75rem;
93 | padding-bottom: 0.75rem;
94 | margin-top: 1.5rem;
95 | margin-bottom: 1.5rem;
96 | background-color: #fffbeb;
97 | font-style: italic;
98 | }
99 |
100 | .markdown blockquote p {
101 | margin-bottom: 0.25rem;
102 | }
103 |
104 | .markdown ul,
105 | .markdown ol {
106 | margin-top: 1rem;
107 | margin-bottom: 1rem;
108 | margin-left: 1.5rem;
109 | font-size: 1.125rem;
110 | color: #4a5568;
111 | }
112 |
113 | .markdown li {
114 | margin-bottom: 0.5rem;
115 | }
116 |
117 | .markdown ul li {
118 | list-style-type: disc;
119 | }
120 |
121 | .markdown ol li {
122 | list-style-type: decimal;
123 | }
124 |
125 | .markdown pre {
126 | background-color: #f7fafc;
127 | padding: 1rem;
128 | border-radius: 0.375rem;
129 | margin-top: 1rem;
130 | margin-bottom: 1rem;
131 | overflow-x: auto;
132 | }
133 |
134 | .markdown code {
135 | font-family: monospace;
136 | font-size: 0.875rem;
137 | background-color: #f7fafc;
138 | padding: 0.125rem 0.25rem;
139 | border-radius: 0.25rem;
140 | }
141 |
142 | .markdown pre code {
143 | background-color: transparent;
144 | padding: 0;
145 | }
146 | </style>
147 | </head>
148 | <body
149 | class="bg-gray-50 text-gray-800 font-sans leading-relaxed flex flex-col min-h-screen"
150 | >
151 | <header class="bg-white shadow-sm mb-8">
152 | <div
153 | class="container mx-auto px-4 py-4 flex justify-between items-center"
154 | >
155 | <a
156 | href="/"
157 | class="text-xl font-heading font-bold text-primary hover:text-primary/80 transition-colors"
158 | >MCP Remote Auth Demo</a
159 | >
160 | </div>
161 | </header>
162 | <main class="container mx-auto px-4 pb-12 flex-grow">${content}</main>
163 | <footer class="bg-gray-100 py-6 mt-12">
164 | <div class="container mx-auto px-4 text-center text-gray-600">
165 | <p>
166 | © ${new Date().getFullYear()} MCP Remote Auth Demo. All rights
167 | reserved.
168 | </p>
169 | </div>
170 | </footer>
171 | </body>
172 | </html>
173 | `;
174 |
175 | export const homeContent = async (req: Request): Promise<HtmlEscapedString> => {
176 | return html`
177 | <div class="max-w-4xl mx-auto markdown">Example Paid MCP Server</div>
178 | `;
179 | };
180 |
181 | export const renderLoggedInAuthorizeScreen = async (
182 | oauthScopes: {name: string; description: string}[],
183 | oauthReqInfo: AuthRequest
184 | ) => {
185 | return html`
186 | <div class="max-w-md mx-auto bg-white p-8 rounded-lg shadow-md">
187 | <h1 class="text-2xl font-heading font-bold mb-6 text-gray-900">
188 | Authorization Request
189 | </h1>
190 |
191 | <div class="mb-8">
192 | <h2 class="text-lg font-semibold mb-3 text-gray-800">
193 | MCP Remote Auth Demo would like permission to:
194 | </h2>
195 | <ul class="space-y-2">
196 | ${oauthScopes.map(
197 | (scope) => html`
198 | <li class="flex items-start">
199 | <span class="inline-block mr-2 mt-1 text-secondary">✓</span>
200 | <div>
201 | <p class="font-medium">${scope.name}</p>
202 | <p class="text-gray-600 text-sm">${scope.description}</p>
203 | </div>
204 | </li>
205 | `
206 | )}
207 | </ul>
208 | </div>
209 | <form action="/approve" method="POST" class="space-y-4">
210 | <input
211 | type="hidden"
212 | name="oauthReqInfo"
213 | value="${JSON.stringify(oauthReqInfo)}"
214 | />
215 | <input
216 | name="email"
217 | value="[email protected]"
218 | required
219 | placeholder="Enter email"
220 | class="w-full px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-primary/50 focus:border-primary"
221 | />
222 | <button
223 | type="submit"
224 | name="action"
225 | value="approve"
226 | class="w-full py-3 px-4 bg-secondary text-white rounded-md font-medium hover:bg-secondary/90 transition-colors"
227 | >
228 | Approve
229 | </button>
230 | <button
231 | type="submit"
232 | name="action"
233 | value="reject"
234 | class="w-full py-3 px-4 border border-gray-300 text-gray-700 rounded-md font-medium hover:bg-gray-50 transition-colors"
235 | >
236 | Reject
237 | </button>
238 | </form>
239 | </div>
240 | `;
241 | };
242 |
243 | export const renderLoggedOutAuthorizeScreen = async (
244 | oauthScopes: {name: string; description: string}[],
245 | oauthReqInfo: AuthRequest
246 | ) => {
247 | return html`
248 | <div class="max-w-md mx-auto bg-white p-8 rounded-lg shadow-md">
249 | <h1 class="text-2xl font-heading font-bold mb-6 text-gray-900">
250 | Authorization Request
251 | </h1>
252 |
253 | <div class="mb-8">
254 | <h2 class="text-lg font-semibold mb-3 text-gray-800">
255 | MCP Remote Auth Demo would like permission to:
256 | </h2>
257 | <ul class="space-y-2">
258 | ${oauthScopes.map(
259 | (scope) => html`
260 | <li class="flex items-start">
261 | <span class="inline-block mr-2 mt-1 text-secondary">✓</span>
262 | <div>
263 | <p class="font-medium">${scope.name}</p>
264 | <p class="text-gray-600 text-sm">${scope.description}</p>
265 | </div>
266 | </li>
267 | `
268 | )}
269 | </ul>
270 | </div>
271 | <form action="/approve" method="POST" class="space-y-4">
272 | <input
273 | type="hidden"
274 | name="oauthReqInfo"
275 | value="${JSON.stringify(oauthReqInfo)}"
276 | />
277 | <div class="space-y-4">
278 | <div>
279 | <label
280 | for="email"
281 | class="block text-sm font-medium text-gray-700 mb-1"
282 | >Email</label
283 | >
284 | <input
285 | type="email"
286 | id="email"
287 | name="email"
288 | required
289 | class="w-full px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-primary/50 focus:border-primary"
290 | />
291 | </div>
292 | <div>
293 | <label
294 | for="password"
295 | class="block text-sm font-medium text-gray-700 mb-1"
296 | >Password</label
297 | >
298 | <input
299 | type="password"
300 | id="password"
301 | name="password"
302 | required
303 | class="w-full px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-primary/50 focus:border-primary"
304 | />
305 | </div>
306 | </div>
307 | <button
308 | type="submit"
309 | name="action"
310 | value="login_approve"
311 | class="w-full py-3 px-4 bg-primary text-white rounded-md font-medium hover:bg-primary/90 transition-colors"
312 | >
313 | Log in and Approve
314 | </button>
315 | <button
316 | type="submit"
317 | name="action"
318 | value="reject"
319 | class="w-full py-3 px-4 border border-gray-300 text-gray-700 rounded-md font-medium hover:bg-gray-50 transition-colors"
320 | >
321 | Reject
322 | </button>
323 | </form>
324 | </div>
325 | `;
326 | };
327 |
328 | export const renderApproveContent = async (
329 | message: string,
330 | status: string,
331 | redirectUrl: string
332 | ) => {
333 | return html`
334 | <div class="max-w-md mx-auto bg-white p-8 rounded-lg shadow-md text-center">
335 | <div class="mb-4">
336 | <span
337 | class="inline-block p-3 ${status === 'success'
338 | ? 'bg-green-100 text-green-800'
339 | : 'bg-red-100 text-red-800'} rounded-full"
340 | >
341 | ${status === 'success' ? '✓' : '✗'}
342 | </span>
343 | </div>
344 | <h1 class="text-2xl font-heading font-bold mb-4 text-gray-900">
345 | ${message}
346 | </h1>
347 | <p class="mb-8 text-gray-600">
348 | You will be redirected back to the application shortly.
349 | </p>
350 | ${raw(`
351 | <script>
352 | setTimeout(() => {
353 | window.location.href = "${redirectUrl}";
354 | }, 1000);
355 | </script>
356 | `)}
357 | </div>
358 | `;
359 | };
360 |
361 | export const renderAuthorizationApprovedContent = async (
362 | redirectUrl: string
363 | ) => {
364 | return renderApproveContent(
365 | 'Authorization approved!',
366 | 'success',
367 | redirectUrl
368 | );
369 | };
370 |
371 | export const renderAuthorizationRejectedContent = async (
372 | redirectUrl: string
373 | ) => {
374 | return renderApproveContent('Authorization rejected.', 'error', redirectUrl);
375 | };
376 |
377 | export const parseApproveFormBody = async (body: {
378 | [x: string]: string | File;
379 | }) => {
380 | const action = body.action as string;
381 | const email = body.email as string;
382 | const password = body.password as string;
383 | let oauthReqInfo: AuthRequest | null = null;
384 | try {
385 | oauthReqInfo = JSON.parse(body.oauthReqInfo as string) as AuthRequest;
386 | } catch (e) {
387 | oauthReqInfo = null;
388 | }
389 |
390 | return {action, oauthReqInfo, email, password};
391 | };
392 |
393 | export const renderPaymentSuccessContent =
394 | async (): Promise<HtmlEscapedString> => {
395 | return html`
396 | <div
397 | class="max-w-md mx-auto bg-white p-8 rounded-lg shadow-md text-center"
398 | >
399 | <h1 class="text-2xl font-heading font-bold mb-4 text-gray-900">
400 | Payment Successful!
401 | </h1>
402 | <p class="mb-8 text-gray-600">
403 | You can return to the MCP client now and rerun the tool.
404 | </p>
405 | </div>
406 | `;
407 | };
408 |
```
--------------------------------------------------------------------------------
/skills/get-started-kiro.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: "stripe"
3 | displayName: "Stripe Payments"
4 | description: "Build payment integrations with Stripe - accept payments, manage subscriptions, handle billing, and process refunds"
5 | keywords:
6 | [
7 | "stripe",
8 | "payments",
9 | "checkout",
10 | "subscriptions",
11 | "billing",
12 | "invoices",
13 | "refunds",
14 | "payment-intents",
15 | ]
16 | author: "Stripe"
17 | ---
18 |
19 | # Stripe Payments Power
20 |
21 | ## Overview
22 |
23 | Build payment integrations with Stripe's comprehensive payment platform. Accept one-time payments and subscriptions, manage customer billing, process refunds, and handle complex payment flows. This power provides access to Stripe's APIs through an MCP server, enabling you to build production-ready payment systems.
24 |
25 | Use Stripe Checkout for hosted payment pages, Payment Intents for custom payment flows, or Billing APIs for subscription management. The platform handles PCI compliance, fraud detection, and supports 135+ currencies and payment methods worldwide.
26 |
27 | **Key capabilities:**
28 |
29 | - **Checkout Sessions**: Hosted payment pages for one-time payments and subscriptions
30 | - **Payment Intents**: Custom payment flows with full control over the checkout experience
31 | - **Subscriptions**: Recurring billing with flexible pricing models
32 | - **Customers**: Manage customer data and saved payment methods
33 | - **Invoices**: Generate and send invoices with automatic payment collection
34 | - **Refunds**: Process full or partial refunds
35 | - **Payment Methods**: Save and reuse payment methods for future charges
36 |
37 | **Authentication**: Requires Stripe secret API key for server-side operations. Never expose in client code. Requires Stripe publishable key (only for client-side operations like Elements or Checkout); safe to include in browser code.
38 |
39 | ## Available MCP Servers
40 |
41 | ### stripe
42 |
43 | **Connection:** HTTPS API endpoint at `https://mcp.stripe.com`
44 | **Authorization:** Use OAuth to connect to the Stripe MCP server
45 |
46 | ## Best Practices
47 |
48 | ### Integration Approach
49 |
50 | **Always prefer Checkout Sessions** for standard payment flows:
51 |
52 | - One-time payments
53 | - Subscription sign-ups
54 | - Hosted (preferred) or embedded checkout forms
55 |
56 | **Use Payment Intents** only when:
57 |
58 | - Building custom checkout UI
59 | - Handling off-session payments
60 | - Need full control over payment state
61 |
62 | **Never use the deprecated Charges API** - migrate to Checkout Sessions or Payment Intents.
63 | **Use Payment Links** when:
64 |
65 | - User wants a _No code_ Stripe integration
66 | - Quickly create shareable payment pages
67 | - Selling products or collecting donations with minimal setup
68 |
69 | ### Payment Methods
70 |
71 | **Enable dynamic payment methods** in Dashboard settings instead of hardcoding `payment_method_types`. Stripe automatically shows optimal payment methods based on:
72 |
73 | - Customer location
74 | - Available wallets
75 | - User preferences
76 | - Transaction context
77 |
78 | ### Subscriptions
79 |
80 | **For recurring revenue models**, use Billing APIs:
81 |
82 | - Follow [Subscription Use Cases](https://docs.stripe.com/billing/subscriptions/use-cases)
83 | - Use [SaaS integration patterns](https://docs.stripe.com/saas)
84 | - Combine with Checkout for frontend
85 | - [Plan your integration](https://docs.stripe.com/billing/subscriptions/designing-integration)
86 | - [Usage-based billing to charge customers based on their usage of your product or service](https://docs.stripe.com/billing/subscriptions/usage-based)
87 |
88 | ### Stripe Connect
89 |
90 | **For platforms managing fund flows**:
91 |
92 | - Use **direct charges** if platform accepts risk (Stripe handles liability)
93 | - Use **destination charges** if platform manages risk (platform handles negative balances)
94 | - Use `on_behalf_of` parameter to control merchant of record
95 | - Never mix charge types
96 | - Refer to [controller properties](https://docs.stripe.com/connect/migrate-to-controller-properties.md) not legacy Standard/Express/Custom terms
97 | - Follow [integration recommendations](https://docs.stripe.com/connect/integration-recommendations.md)
98 |
99 | ### Saving Payment Methods
100 |
101 | **Use Setup Intents API** to save payment methods for future use:
102 |
103 | - Never use deprecated Sources API
104 | - For pre-authorization before payment, use Confirmation Tokens
105 | - Don't call `createPaymentMethod` or `createToken` directly
106 |
107 | ### PCI Compliance
108 |
109 | **For server-side raw PAN data**:
110 |
111 | - Requires PCI compliance proof
112 | - Use `payment_method_data` parameter
113 | - For migrations, follow [PAN import process](https://docs.stripe.com/get-started/data-migrations/pan-import)
114 |
115 | ### Before Going Live
116 |
117 | Review the [Go Live Checklist](https://docs.stripe.com/get-started/checklist/go-live):
118 |
119 | - Test with sandbox keys
120 | - Handle webhooks for async events
121 | - Implement error handling
122 | - Set up proper logging
123 | - Configure tax and compliance settings
124 |
125 | ## Common Workflows
126 |
127 | ### Workflow 1: Accept One-Time Payment
128 |
129 | ```javascript
130 | // Step 1: Create Checkout Session
131 | const session = createCheckoutSession({
132 | mode: "payment",
133 | line_items: [
134 | {
135 | price_data: {
136 | currency: "usd",
137 | product_data: { name: "Premium Plan" },
138 | unit_amount: 2999,
139 | },
140 | quantity: 1,
141 | },
142 | ],
143 | success_url: "https://example.com/success",
144 | cancel_url: "https://example.com/cancel",
145 | });
146 |
147 | // Step 2: Redirect customer to session.url
148 | // Step 3: Handle webhook for payment_intent.succeeded
149 | ```
150 |
151 | ### Workflow 2: Create Subscription
152 |
153 | ```javascript
154 | // Step 1: Create or retrieve customer
155 | const customer = createCustomer({
156 | email: "[email protected]",
157 | name: "Jane Doe",
158 | });
159 |
160 | // Step 2: Create Checkout Session for subscription
161 | const session = createCheckoutSession({
162 | mode: "subscription",
163 | customer: customer.id,
164 | line_items: [
165 | {
166 | price: "price_monthly_premium",
167 | quantity: 1,
168 | },
169 | ],
170 | success_url: "https://example.com/success",
171 | cancel_url: "https://example.com/cancel",
172 | });
173 |
174 | // Step 3: Handle webhook for customer.subscription.created
175 | ```
176 |
177 | ### Workflow 3: Process Refund
178 |
179 | ```javascript
180 | // Step 1: Retrieve payment intent or charge
181 | const paymentIntent = retrievePaymentIntent("pi_xxx");
182 |
183 | // Step 2: Create refund
184 | const refund = createRefund({
185 | payment_intent: paymentIntent.id,
186 | amount: 1000, // Partial refund in cents, omit for full refund
187 | });
188 |
189 | // Step 3: Handle webhook for charge.refunded
190 | ```
191 |
192 | ### Workflow 4: Save Payment Method for Future Use
193 |
194 | ```javascript
195 | // Step 1: Create Setup Intent
196 | const setupIntent = createSetupIntent({
197 | customer: "cus_xxx",
198 | payment_method_types: ["card"],
199 | });
200 |
201 | // Step 2: Collect payment method on frontend with Setup Intent
202 | // Step 3: Handle webhook for setup_intent.succeeded
203 | // Step 4: Use saved payment method for future charges
204 | const paymentIntent = createPaymentIntent({
205 | amount: 2999,
206 | currency: "usd",
207 | customer: "cus_xxx",
208 | payment_method: "pm_xxx",
209 | off_session: true,
210 | confirm: true,
211 | });
212 | ```
213 |
214 | ## Best Practices Summary
215 |
216 | ### ✅ Do:
217 |
218 | - **Use Checkout Sessions** for standard payment flows
219 | - **Enable dynamic payment methods** in Dashboard settings
220 | - **Use Billing APIs** for subscription models
221 | - **Use Setup Intents** to save payment methods
222 | - **Handle webhooks** for all async events
223 | - **Test thoroughly** in sandbox before going live
224 | - **Follow the Go Live Checklist** before production
225 | - **Do not include API version** in code snippets. Read https://docs.stripe.com/api/versioning.md for more information on versions
226 | - **Implement idempotency keys** for safe retries
227 | - **Log all API interactions** for debugging
228 |
229 | ### ❌ Don't:
230 |
231 | - **Use Charges API** - it's deprecated, migrate to Payment Intents
232 | - **Use Sources API** - deprecated for saving cards
233 | - **Use Card Element** - migrate to Payment Element
234 | - **Hardcode payment_method_types** - use dynamic payment methods
235 | - **Mix Connect charge types** - choose one approach
236 | - **Skip webhook handling** - critical for payment confirmation
237 | - **Use production keys in development** - always use test keys
238 | - **Ignore errors** - implement proper error handling
239 | - **Skip PCI compliance** - required for handling card data
240 | - **Forget to test edge cases** - declined cards, network failures, etc.
241 | - **Expose API secret keys** - never include secret keys in client-side code, mobile apps, or public repositories
242 |
243 | ## Configuration
244 |
245 | **Authentication Required**: Stripe secret key
246 |
247 | **Setup Steps:**
248 |
249 | 1. Create Stripe account at https://stripe.com
250 | 2. Navigate to Developers → API keys
251 | 3. Copy your secret key (starts with `sk_test_` for [sandboxes](https://docs.stripe.com/sandboxes/dashboard/manage))
252 | 4. (Optional) Copy your publishable key (starts with `pk_test_` for sandboxes). Only needed for Stripe client-side code.
253 | 5. For production, use live mode key (starts with `sk_live_` and `pk_live_`)
254 | 6. Configure key in Kiro Powers UI when installing this power
255 |
256 | **Permissions**: Secret key has full API access - keep secure and never expose client-side. Publishable keys (pk\_...) are intended and acceptable to embed in client-side code.
257 |
258 | **MCP Configuration:**
259 |
260 | ```json
261 | {
262 | "mcpServers": {
263 | "stripe": {
264 | "url": "https://mcp.stripe.com"
265 | }
266 | }
267 | }
268 | ```
269 |
270 | ## Troubleshooting
271 |
272 | ### Error: "Invalid API key"
273 |
274 | **Cause:** Incorrect or missing API key
275 | **Solution:**
276 |
277 | 1. Verify key starts with `sk_test_` or `sk_live_`
278 | 2. Check key hasn't been deleted in Dashboard
279 | 3. Ensure using secret key, not publishable key
280 | 4. Regenerate key if compromised
281 |
282 | ### Error: "Payment method not available"
283 |
284 | **Cause:** Payment method not enabled or not supported in region
285 | **Solution:**
286 |
287 | 1. Enable payment methods in Dashboard → Settings → Payment methods
288 | 2. Check customer location supports the payment method
289 | 3. Use dynamic payment methods instead of hardcoding types
290 | 4. Verify currency is supported by payment method
291 |
292 | ### Error: "Customer not found"
293 |
294 | **Cause:** Invalid customer ID or customer deleted
295 | **Solution:**
296 |
297 | 1. Verify customer ID format (starts with `cus_`)
298 | 2. Check customer exists in Dashboard
299 | 3. Ensure using correct API mode (test vs live)
300 | 4. Create customer if doesn't exist
301 |
302 | ### Error: "Subscription creation failed"
303 |
304 | **Cause:** Missing required parameters or invalid price ID
305 | **Solution:**
306 |
307 | 1. Verify price ID exists (starts with `price_`)
308 | 2. Ensure price is active in Dashboard
309 | 3. Check customer has valid payment method
310 | 4. Review subscription parameters match price configuration
311 |
312 | ### Webhook not received
313 |
314 | **Cause:** Webhook endpoint not configured or failing
315 | **Solution:**
316 |
317 | 1. Configure webhook endpoint in Dashboard → Developers → Webhooks
318 | 2. Verify endpoint is publicly accessible
319 | 3. Check endpoint returns 200 status
320 | 4. Review webhook logs in Dashboard
321 | 5. Test with Stripe CLI: `stripe listen --forward-to localhost:3000/webhook`
322 |
323 | ### Payment declined
324 |
325 | **Cause:** Card declined by issuer or failed fraud check
326 | **Solution:**
327 |
328 | 1. Use test cards from [Stripe testing docs](https://docs.stripe.com/testing)
329 | 2. Check decline code in error response
330 | 3. Implement proper error messaging for customer
331 | 4. For production, customer should contact their bank
332 | 5. Review Radar rules if fraud detection triggered
333 |
334 | ## Tips
335 |
336 | 1. **Start with Checkout** - Fastest way to accept payments with minimal code
337 | 2. **Use sandbox extensively** - Test all scenarios before going live
338 | 3. **Implement webhooks early** - Critical for handling async events
339 | 4. **Use Stripe CLI** - Test webhooks locally during development
340 | 5. **Follow integration guides** - Use [API Tour](https://docs.stripe.com/payments-api/tour) and [Integration Options](https://docs.stripe.com/payments/payment-methods/integration-options)
341 | 6. **Monitor Dashboard** - Review payments, disputes, and logs regularly
342 | 7. **Handle errors gracefully** - Show clear messages to customers
343 | 8. **Use idempotency keys** - Prevent duplicate charges on retries
344 | 9. **Keep keys secure** - Never commit to version control
345 | 10. **Stay updated** - Review API changelog for new features and deprecations
346 |
347 | ## Resources
348 |
349 | - [Integration Options](https://docs.stripe.com/payments/payment-methods/integration-options)
350 | - [API Tour](https://docs.stripe.com/payments-api/tour)
351 | - [Go Live Checklist](https://docs.stripe.com/get-started/checklist/go-live)
352 | - [Checkout Sessions](https://docs.stripe.com/api/checkout/sessions)
353 | - [Payment Intents](https://docs.stripe.com/payments/paymentintents/lifecycle)
354 | - [Subscription Use Cases](https://docs.stripe.com/billing/subscriptions/use-cases)
355 | - [Connect Integration](https://docs.stripe.com/connect/design-an-integration)
356 | - [Testing](https://docs.stripe.com/testing)
357 |
358 | ---
359 |
360 | **License:** Proprietary
361 |
```
--------------------------------------------------------------------------------
/llm/ai-sdk/provider/tests/utils.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Stripe provider utility functions
3 | */
4 |
5 | import {
6 | convertToOpenAIMessages,
7 | mapOpenAIFinishReason,
8 | normalizeModelId,
9 | } from '../utils';
10 |
11 | describe('Stripe Provider Utils', () => {
12 | describe('convertToOpenAIMessages', () => {
13 | it('should convert system message', () => {
14 | const result = convertToOpenAIMessages([
15 | {
16 | role: 'system',
17 | content: 'You are a helpful assistant.',
18 | },
19 | ]);
20 |
21 | expect(result).toHaveLength(1);
22 | expect(result[0]).toEqual({
23 | role: 'system',
24 | content: 'You are a helpful assistant.',
25 | });
26 | });
27 |
28 | it('should convert user text message', () => {
29 | const result = convertToOpenAIMessages([
30 | {
31 | role: 'user',
32 | content: [{type: 'text', text: 'Hello!'}],
33 | },
34 | ]);
35 |
36 | expect(result).toHaveLength(1);
37 | expect(result[0].role).toBe('user');
38 | // Single text messages are sent as simple strings for Anthropic compatibility
39 | expect(result[0].content).toBe('Hello!');
40 | });
41 |
42 | it('should convert user message with file URL', () => {
43 | const result = convertToOpenAIMessages([
44 | {
45 | role: 'user',
46 | content: [
47 | {type: 'text', text: 'What is this?'},
48 | {
49 | type: 'file',
50 | data: 'https://example.com/image.jpg',
51 | mediaType: 'image/jpeg',
52 | },
53 | ],
54 | },
55 | ]);
56 |
57 | expect(result).toHaveLength(1);
58 | // Multi-part messages should remain as arrays
59 | expect(Array.isArray(result[0].content)).toBe(true);
60 | const content = result[0].content as any[];
61 | expect(content).toHaveLength(2);
62 | expect(content[1].type).toBe('image_url');
63 | expect(content[1].image_url.url).toBe('https://example.com/image.jpg');
64 | });
65 |
66 | it('should convert user message with file Uint8Array to base64', () => {
67 | const fileData = new Uint8Array([137, 80, 78, 71]); // PNG header
68 |
69 | const result = convertToOpenAIMessages([
70 | {
71 | role: 'user',
72 | content: [
73 | {
74 | type: 'file',
75 | data: fileData,
76 | mediaType: 'image/png',
77 | },
78 | ],
79 | },
80 | ]);
81 |
82 | expect(result).toHaveLength(1);
83 | // Single file becomes array with one element
84 | expect(Array.isArray(result[0].content)).toBe(true);
85 | const content = result[0].content as any[];
86 | expect(content).toHaveLength(1);
87 | expect(content[0].type).toBe('image_url');
88 | expect(content[0].image_url.url).toMatch(/^data:image\/png;base64,/);
89 | });
90 |
91 | it('should convert assistant message with text', () => {
92 | const result = convertToOpenAIMessages([
93 | {
94 | role: 'assistant',
95 | content: [{type: 'text', text: 'Hello! How can I help?'}],
96 | },
97 | ]);
98 |
99 | expect(result).toHaveLength(1);
100 | expect(result[0]).toEqual({
101 | role: 'assistant',
102 | content: 'Hello! How can I help?',
103 | tool_calls: undefined,
104 | });
105 | });
106 |
107 | it('should convert assistant message with tool calls', () => {
108 | const result = convertToOpenAIMessages([
109 | {
110 | role: 'assistant',
111 | content: [
112 | {
113 | type: 'tool-call',
114 | toolCallId: 'call_123',
115 | toolName: 'getWeather',
116 | input: {location: 'San Francisco'},
117 | },
118 | ],
119 | },
120 | ]);
121 |
122 | expect(result).toHaveLength(1);
123 | expect(result[0].role).toBe('assistant');
124 | expect(result[0].content).toBe(''); // Empty string when only tool calls
125 | expect(result[0].tool_calls).toHaveLength(1);
126 | expect(result[0].tool_calls![0]).toEqual({
127 | id: 'call_123',
128 | type: 'function',
129 | function: {
130 | name: 'getWeather',
131 | arguments: '{"location":"San Francisco"}',
132 | },
133 | });
134 | });
135 |
136 | it('should convert assistant message with text and tool calls', () => {
137 | const result = convertToOpenAIMessages([
138 | {
139 | role: 'assistant',
140 | content: [
141 | {type: 'text', text: 'Let me check the weather.'},
142 | {
143 | type: 'tool-call',
144 | toolCallId: 'call_123',
145 | toolName: 'getWeather',
146 | input: {location: 'Paris'},
147 | },
148 | ],
149 | },
150 | ]);
151 |
152 | expect(result).toHaveLength(1);
153 | expect(result[0].role).toBe('assistant');
154 | expect(result[0].content).toBe('Let me check the weather.');
155 | expect(result[0].tool_calls).toHaveLength(1);
156 | });
157 |
158 | it('should convert tool message', () => {
159 | const result = convertToOpenAIMessages([
160 | {
161 | role: 'tool',
162 | content: [
163 | {
164 | type: 'tool-result',
165 | toolCallId: 'call_123',
166 | toolName: 'getWeather',
167 | output: {type: 'json', value: {temperature: 72, condition: 'Sunny'}},
168 | },
169 | ],
170 | },
171 | ]);
172 |
173 | expect(result).toHaveLength(1);
174 | expect(result[0]).toEqual({
175 | role: 'tool',
176 | tool_call_id: 'call_123',
177 | name: 'getWeather',
178 | content: '{"temperature":72,"condition":"Sunny"}',
179 | });
180 | });
181 |
182 | it('should handle string tool call args', () => {
183 | const result = convertToOpenAIMessages([
184 | {
185 | role: 'assistant',
186 | content: [
187 | {
188 | type: 'tool-call',
189 | toolCallId: 'call_123',
190 | toolName: 'test',
191 | input: '{"key":"value"}',
192 | },
193 | ],
194 | },
195 | ]);
196 |
197 | expect(result[0].tool_calls![0].function.arguments).toBe('{"key":"value"}');
198 | });
199 |
200 | it('should handle string tool result', () => {
201 | const result = convertToOpenAIMessages([
202 | {
203 | role: 'tool',
204 | content: [
205 | {
206 | type: 'tool-result',
207 | toolCallId: 'call_123',
208 | toolName: 'test',
209 | output: {type: 'text', value: 'Simple string result'},
210 | },
211 | ],
212 | },
213 | ]);
214 |
215 | expect(result[0].content).toBe('Simple string result');
216 | });
217 |
218 | it('should convert multiple messages', () => {
219 | const result = convertToOpenAIMessages([
220 | {role: 'system', content: 'You are helpful.'},
221 | {role: 'user', content: [{type: 'text', text: 'Hello'}]},
222 | {role: 'assistant', content: [{type: 'text', text: 'Hi!'}]},
223 | ]);
224 |
225 | expect(result).toHaveLength(3);
226 | expect(result[0].role).toBe('system');
227 | expect(result[1].role).toBe('user');
228 | expect(result[2].role).toBe('assistant');
229 | });
230 |
231 | it('should throw error for unsupported message role', () => {
232 | expect(() => {
233 | convertToOpenAIMessages([
234 | // @ts-expect-error - Testing invalid role
235 | {role: 'invalid', content: 'test'},
236 | ]);
237 | }).toThrow('Unsupported message role');
238 | });
239 |
240 | it('should throw error for unsupported part type', () => {
241 | expect(() => {
242 | convertToOpenAIMessages([
243 | {
244 | role: 'user',
245 | // @ts-expect-error - Testing invalid part type
246 | content: [{type: 'unsupported', data: 'test'}],
247 | },
248 | ]);
249 | }).toThrow('Unsupported user message part type');
250 | });
251 | });
252 |
253 | describe('mapOpenAIFinishReason', () => {
254 | it('should map "stop" to "stop"', () => {
255 | expect(mapOpenAIFinishReason('stop')).toBe('stop');
256 | });
257 |
258 | it('should map "length" to "length"', () => {
259 | expect(mapOpenAIFinishReason('length')).toBe('length');
260 | });
261 |
262 | it('should map "content_filter" to "content-filter"', () => {
263 | expect(mapOpenAIFinishReason('content_filter')).toBe('content-filter');
264 | });
265 |
266 | it('should map "tool_calls" to "tool-calls"', () => {
267 | expect(mapOpenAIFinishReason('tool_calls')).toBe('tool-calls');
268 | });
269 |
270 | it('should map "function_call" to "tool-calls"', () => {
271 | expect(mapOpenAIFinishReason('function_call')).toBe('tool-calls');
272 | });
273 |
274 | it('should map null to "unknown"', () => {
275 | expect(mapOpenAIFinishReason(null)).toBe('unknown');
276 | });
277 |
278 | it('should map undefined to "unknown"', () => {
279 | expect(mapOpenAIFinishReason(undefined)).toBe('unknown');
280 | });
281 |
282 | it('should map unknown reason to "unknown"', () => {
283 | expect(mapOpenAIFinishReason('some_other_reason')).toBe('unknown');
284 | });
285 | });
286 |
287 | describe('normalizeModelId', () => {
288 | describe('Anthropic models', () => {
289 | it('should remove date suffix (YYYYMMDD format)', () => {
290 | // Note: The date is removed AND version dashes are converted to dots
291 | expect(normalizeModelId('anthropic/claude-3-5-sonnet-20241022')).toBe(
292 | 'anthropic/claude-3.5-sonnet'
293 | );
294 | expect(normalizeModelId('anthropic/claude-sonnet-4-20250101')).toBe(
295 | 'anthropic/claude-sonnet-4'
296 | );
297 | expect(normalizeModelId('anthropic/claude-opus-4-20241231')).toBe(
298 | 'anthropic/claude-opus-4'
299 | );
300 | });
301 |
302 | it('should remove -latest suffix', () => {
303 | expect(normalizeModelId('anthropic/claude-sonnet-4-latest')).toBe(
304 | 'anthropic/claude-sonnet-4'
305 | );
306 | expect(normalizeModelId('anthropic/claude-opus-4-latest')).toBe(
307 | 'anthropic/claude-opus-4'
308 | );
309 | });
310 |
311 | it('should convert version dashes to dots (claude-3-5 → claude-3.5)', () => {
312 | expect(normalizeModelId('anthropic/claude-3-5-sonnet')).toBe(
313 | 'anthropic/claude-3.5-sonnet'
314 | );
315 | expect(normalizeModelId('anthropic/claude-3-7-sonnet')).toBe(
316 | 'anthropic/claude-3.7-sonnet'
317 | );
318 | });
319 |
320 | it('should handle version numbers without model names (sonnet-4-5 → sonnet-4.5)', () => {
321 | expect(normalizeModelId('anthropic/sonnet-4-5')).toBe(
322 | 'anthropic/sonnet-4.5'
323 | );
324 | expect(normalizeModelId('anthropic/opus-4-1')).toBe(
325 | 'anthropic/opus-4.1'
326 | );
327 | });
328 |
329 | it('should handle combined date suffix and version conversion', () => {
330 | expect(normalizeModelId('anthropic/claude-3-5-sonnet-20241022')).toBe(
331 | 'anthropic/claude-3.5-sonnet'
332 | );
333 | expect(normalizeModelId('anthropic/claude-3-7-sonnet-20250115')).toBe(
334 | 'anthropic/claude-3.7-sonnet'
335 | );
336 | });
337 |
338 | it('should handle -latest suffix with version conversion', () => {
339 | expect(normalizeModelId('anthropic/claude-3-5-sonnet-latest')).toBe(
340 | 'anthropic/claude-3.5-sonnet'
341 | );
342 | expect(normalizeModelId('anthropic/sonnet-4-5-latest')).toBe(
343 | 'anthropic/sonnet-4.5'
344 | );
345 | });
346 |
347 | it('should handle models without dates or versions', () => {
348 | expect(normalizeModelId('anthropic/claude-sonnet')).toBe(
349 | 'anthropic/claude-sonnet'
350 | );
351 | expect(normalizeModelId('anthropic/claude-opus')).toBe(
352 | 'anthropic/claude-opus'
353 | );
354 | });
355 |
356 | it('should handle case-insensitive provider names', () => {
357 | expect(normalizeModelId('Anthropic/claude-3-5-sonnet-20241022')).toBe(
358 | 'Anthropic/claude-3.5-sonnet'
359 | );
360 | expect(normalizeModelId('ANTHROPIC/claude-3-5-sonnet-20241022')).toBe(
361 | 'ANTHROPIC/claude-3.5-sonnet'
362 | );
363 | });
364 | });
365 |
366 | describe('OpenAI models', () => {
367 | it('should remove date suffix in YYYY-MM-DD format', () => {
368 | expect(normalizeModelId('openai/gpt-4-turbo-2024-04-09')).toBe(
369 | 'openai/gpt-4-turbo'
370 | );
371 | expect(normalizeModelId('openai/gpt-4-2024-12-31')).toBe(
372 | 'openai/gpt-4'
373 | );
374 | });
375 |
376 | it('should keep gpt-4o-2024-05-13 as an exception', () => {
377 | expect(normalizeModelId('openai/gpt-4o-2024-05-13')).toBe(
378 | 'openai/gpt-4o-2024-05-13'
379 | );
380 | });
381 |
382 | it('should handle models without dates', () => {
383 | expect(normalizeModelId('openai/gpt-5')).toBe('openai/gpt-5');
384 | expect(normalizeModelId('openai/gpt-4.1')).toBe('openai/gpt-4.1');
385 | expect(normalizeModelId('openai/o3')).toBe('openai/o3');
386 | });
387 |
388 | it('should handle case-insensitive provider names', () => {
389 | expect(normalizeModelId('OpenAI/gpt-4-2024-12-31')).toBe(
390 | 'OpenAI/gpt-4'
391 | );
392 | expect(normalizeModelId('OPENAI/gpt-4-turbo-2024-04-09')).toBe(
393 | 'OPENAI/gpt-4-turbo'
394 | );
395 | });
396 |
397 | it('should not affect YYYYMMDD format (only YYYY-MM-DD)', () => {
398 | // OpenAI only removes YYYY-MM-DD format, not YYYYMMDD
399 | expect(normalizeModelId('openai/gpt-4-20241231')).toBe(
400 | 'openai/gpt-4-20241231'
401 | );
402 | });
403 | });
404 |
405 | describe('Google/Gemini models', () => {
406 | it('should keep models as-is', () => {
407 | expect(normalizeModelId('google/gemini-2.5-pro')).toBe(
408 | 'google/gemini-2.5-pro'
409 | );
410 | expect(normalizeModelId('google/gemini-2.0-flash')).toBe(
411 | 'google/gemini-2.0-flash'
412 | );
413 | expect(normalizeModelId('google/gemini-1.5-pro')).toBe(
414 | 'google/gemini-1.5-pro'
415 | );
416 | });
417 |
418 | it('should not remove any suffixes', () => {
419 | expect(normalizeModelId('google/gemini-2.5-pro-20250101')).toBe(
420 | 'google/gemini-2.5-pro-20250101'
421 | );
422 | expect(normalizeModelId('google/gemini-2.5-pro-latest')).toBe(
423 | 'google/gemini-2.5-pro-latest'
424 | );
425 | });
426 | });
427 |
428 | describe('Other providers', () => {
429 | it('should keep unknown provider models as-is', () => {
430 | expect(normalizeModelId('bedrock/claude-3-5-sonnet')).toBe(
431 | 'bedrock/claude-3-5-sonnet'
432 | );
433 | expect(normalizeModelId('azure/gpt-4-2024-12-31')).toBe(
434 | 'azure/gpt-4-2024-12-31'
435 | );
436 | expect(normalizeModelId('custom/my-model-1-2-3')).toBe(
437 | 'custom/my-model-1-2-3'
438 | );
439 | });
440 | });
441 |
442 | describe('Edge cases', () => {
443 | it('should handle model IDs without provider prefix', () => {
444 | // If no slash, return as-is
445 | expect(normalizeModelId('gpt-5')).toBe('gpt-5');
446 | expect(normalizeModelId('claude-sonnet-4')).toBe('claude-sonnet-4');
447 | });
448 |
449 | it('should handle model IDs with multiple slashes', () => {
450 | // If more than one slash, return as-is
451 | expect(normalizeModelId('provider/category/model')).toBe(
452 | 'provider/category/model'
453 | );
454 | });
455 |
456 | it('should handle empty strings', () => {
457 | expect(normalizeModelId('')).toBe('');
458 | });
459 | });
460 | });
461 | });
462 |
463 |
```
--------------------------------------------------------------------------------
/llm/token-meter/tests/type-detection.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for type detection utilities
3 | */
4 |
5 | import {
6 | detectResponse,
7 | isGeminiStream,
8 | extractUsageFromChatStream,
9 | extractUsageFromResponseStream,
10 | extractUsageFromAnthropicStream,
11 | } from '../utils/type-detection';
12 |
13 | describe('detectResponse - OpenAI Chat Completions', () => {
14 | it('should detect OpenAI chat completion response', () => {
15 | const response = {
16 | id: 'chatcmpl-123',
17 | object: 'chat.completion',
18 | created: Date.now(),
19 | model: 'gpt-4',
20 | choices: [
21 | {
22 | index: 0,
23 | message: {
24 | role: 'assistant',
25 | content: 'Hello!',
26 | },
27 | finish_reason: 'stop',
28 | },
29 | ],
30 | usage: {
31 | prompt_tokens: 10,
32 | completion_tokens: 5,
33 | total_tokens: 15,
34 | },
35 | };
36 |
37 | const detected = detectResponse(response);
38 |
39 | expect(detected).not.toBeNull();
40 | expect(detected?.provider).toBe('openai');
41 | expect(detected?.type).toBe('chat_completion');
42 | expect(detected?.model).toBe('gpt-4');
43 | expect(detected?.inputTokens).toBe(10);
44 | expect(detected?.outputTokens).toBe(5);
45 | });
46 |
47 | it('should handle missing usage data in chat completion', () => {
48 | const response = {
49 | id: 'chatcmpl-123',
50 | object: 'chat.completion',
51 | created: Date.now(),
52 | model: 'gpt-4',
53 | choices: [
54 | {
55 | index: 0,
56 | message: {
57 | role: 'assistant',
58 | content: 'Hello!',
59 | },
60 | finish_reason: 'stop',
61 | },
62 | ],
63 | };
64 |
65 | const detected = detectResponse(response);
66 |
67 | expect(detected).not.toBeNull();
68 | expect(detected?.inputTokens).toBe(0);
69 | expect(detected?.outputTokens).toBe(0);
70 | });
71 |
72 | it('should handle partial usage data', () => {
73 | const response = {
74 | id: 'chatcmpl-123',
75 | object: 'chat.completion',
76 | created: Date.now(),
77 | model: 'gpt-4',
78 | choices: [
79 | {
80 | index: 0,
81 | message: {
82 | role: 'assistant',
83 | content: 'Hello!',
84 | },
85 | finish_reason: 'stop',
86 | },
87 | ],
88 | usage: {
89 | prompt_tokens: 10,
90 | },
91 | };
92 |
93 | const detected = detectResponse(response);
94 |
95 | expect(detected).not.toBeNull();
96 | expect(detected?.inputTokens).toBe(10);
97 | expect(detected?.outputTokens).toBe(0);
98 | });
99 | });
100 |
101 | describe('detectResponse - OpenAI Responses API', () => {
102 | it('should detect OpenAI responses API response', () => {
103 | const response = {
104 | id: 'resp_123',
105 | object: 'response',
106 | created: Date.now(),
107 | model: 'gpt-4',
108 | output: 'Hello!',
109 | usage: {
110 | input_tokens: 10,
111 | output_tokens: 5,
112 | },
113 | };
114 |
115 | const detected = detectResponse(response);
116 |
117 | expect(detected).not.toBeNull();
118 | expect(detected?.provider).toBe('openai');
119 | expect(detected?.type).toBe('response_api');
120 | expect(detected?.model).toBe('gpt-4');
121 | expect(detected?.inputTokens).toBe(10);
122 | expect(detected?.outputTokens).toBe(5);
123 | });
124 |
125 | it('should return null for responses API with empty usage', () => {
126 | const response = {
127 | id: 'resp_123',
128 | object: 'response',
129 | created: Date.now(),
130 | model: 'gpt-4',
131 | output: 'Hello!',
132 | usage: {},
133 | };
134 |
135 | const detected = detectResponse(response);
136 |
137 | // Empty usage object doesn't match the type guard, so returns null
138 | expect(detected).toBeNull();
139 | });
140 | });
141 |
142 | describe('detectResponse - OpenAI Embeddings', () => {
143 | it('should detect OpenAI embedding response', () => {
144 | const response = {
145 | object: 'list',
146 | data: [
147 | {
148 | object: 'embedding',
149 | embedding: [0.1, 0.2, 0.3],
150 | index: 0,
151 | },
152 | ],
153 | model: 'text-embedding-ada-002',
154 | usage: {
155 | prompt_tokens: 8,
156 | total_tokens: 8,
157 | },
158 | };
159 |
160 | const detected = detectResponse(response);
161 |
162 | expect(detected).not.toBeNull();
163 | expect(detected?.provider).toBe('openai');
164 | expect(detected?.type).toBe('embedding');
165 | expect(detected?.model).toBe('text-embedding-ada-002');
166 | expect(detected?.inputTokens).toBe(8);
167 | expect(detected?.outputTokens).toBe(0); // Embeddings don't have output tokens
168 | });
169 |
170 | it('should handle missing usage data in embeddings', () => {
171 | const response = {
172 | object: 'list',
173 | data: [
174 | {
175 | object: 'embedding',
176 | embedding: [0.1, 0.2, 0.3],
177 | index: 0,
178 | },
179 | ],
180 | model: 'text-embedding-ada-002',
181 | };
182 |
183 | const detected = detectResponse(response);
184 |
185 | expect(detected).not.toBeNull();
186 | expect(detected?.inputTokens).toBe(0);
187 | expect(detected?.outputTokens).toBe(0);
188 | });
189 | });
190 |
191 | describe('detectResponse - Anthropic Messages', () => {
192 | it('should detect Anthropic message response', () => {
193 | const response = {
194 | id: 'msg_123',
195 | type: 'message',
196 | role: 'assistant',
197 | content: [{type: 'text', text: 'Hello!'}],
198 | model: 'claude-3-5-sonnet-20241022',
199 | stop_reason: 'end_turn',
200 | stop_sequence: null,
201 | usage: {
202 | input_tokens: 10,
203 | output_tokens: 5,
204 | },
205 | };
206 |
207 | const detected = detectResponse(response);
208 |
209 | expect(detected).not.toBeNull();
210 | expect(detected?.provider).toBe('anthropic');
211 | expect(detected?.type).toBe('chat_completion');
212 | expect(detected?.model).toBe('claude-3-5-sonnet-20241022');
213 | expect(detected?.inputTokens).toBe(10);
214 | expect(detected?.outputTokens).toBe(5);
215 | });
216 |
217 | it('should return null for Anthropic messages with empty usage', () => {
218 | const response = {
219 | id: 'msg_123',
220 | type: 'message',
221 | role: 'assistant',
222 | content: [{type: 'text', text: 'Hello!'}],
223 | model: 'claude-3-opus-20240229',
224 | stop_reason: 'end_turn',
225 | stop_sequence: null,
226 | usage: {},
227 | };
228 |
229 | const detected = detectResponse(response);
230 |
231 | // Empty usage object doesn't match the type guard, so returns null
232 | expect(detected).toBeNull();
233 | });
234 | });
235 |
236 | describe('detectResponse - Gemini', () => {
237 | it('should detect Gemini response', () => {
238 | const response = {
239 | response: {
240 | text: () => 'Hello!',
241 | usageMetadata: {
242 | promptTokenCount: 10,
243 | candidatesTokenCount: 5,
244 | totalTokenCount: 15,
245 | },
246 | modelVersion: 'gemini-1.5-pro',
247 | },
248 | };
249 |
250 | const detected = detectResponse(response);
251 |
252 | expect(detected).not.toBeNull();
253 | expect(detected?.provider).toBe('google');
254 | expect(detected?.type).toBe('chat_completion');
255 | expect(detected?.model).toBe('gemini-1.5-pro');
256 | expect(detected?.inputTokens).toBe(10);
257 | expect(detected?.outputTokens).toBe(5);
258 | });
259 |
260 | it('should include reasoning tokens in output for extended thinking models', () => {
261 | const response = {
262 | response: {
263 | text: () => 'Hello!',
264 | usageMetadata: {
265 | promptTokenCount: 10,
266 | candidatesTokenCount: 5,
267 | thoughtsTokenCount: 3, // Reasoning tokens
268 | totalTokenCount: 18,
269 | },
270 | modelVersion: 'gemini-1.5-pro',
271 | },
272 | };
273 |
274 | const detected = detectResponse(response);
275 |
276 | expect(detected).not.toBeNull();
277 | expect(detected?.outputTokens).toBe(8); // 5 + 3 reasoning tokens
278 | });
279 |
280 | it('should return null when usageMetadata is missing', () => {
281 | const response = {
282 | response: {
283 | text: () => 'Hello!',
284 | },
285 | };
286 |
287 | const detected = detectResponse(response);
288 |
289 | // Missing usageMetadata doesn't match the type guard, so returns null
290 | expect(detected).toBeNull();
291 | });
292 |
293 | it('should use default model name when modelVersion is missing', () => {
294 | const response = {
295 | response: {
296 | text: () => 'Hello!',
297 | usageMetadata: {
298 | promptTokenCount: 10,
299 | candidatesTokenCount: 5,
300 | totalTokenCount: 15,
301 | },
302 | },
303 | };
304 |
305 | const detected = detectResponse(response);
306 |
307 | expect(detected).not.toBeNull();
308 | expect(detected?.model).toBe('gemini');
309 | });
310 | });
311 |
312 | describe('detectResponse - Unknown types', () => {
313 | it('should return null for unknown response types', () => {
314 | const response = {
315 | some: 'data',
316 | that: 'does not match any provider',
317 | };
318 |
319 | const detected = detectResponse(response);
320 |
321 | expect(detected).toBeNull();
322 | });
323 |
324 | it('should return null for null input', () => {
325 | const detected = detectResponse(null);
326 |
327 | expect(detected).toBeNull();
328 | });
329 |
330 | it('should return null for undefined input', () => {
331 | const detected = detectResponse(undefined);
332 |
333 | expect(detected).toBeNull();
334 | });
335 | });
336 |
337 | describe('isGeminiStream', () => {
338 | it('should detect Gemini stream structure', () => {
339 | const geminiStream = {
340 | stream: {
341 | [Symbol.asyncIterator]: function* () {
342 | yield {text: () => 'test'};
343 | },
344 | },
345 | response: Promise.resolve({}),
346 | };
347 |
348 | expect(isGeminiStream(geminiStream)).toBe(true);
349 | });
350 |
351 | it('should return false for OpenAI-style streams', () => {
352 | const openaiStream = {
353 | tee: () => [{}, {}],
354 | toReadableStream: () => {},
355 | };
356 |
357 | expect(isGeminiStream(openaiStream)).toBe(false);
358 | });
359 |
360 | it('should return false for non-stream objects', () => {
361 | expect(isGeminiStream({})).toBe(false);
362 | // null and undefined return falsy values which coerce to false in boolean context
363 | expect(isGeminiStream(null)).toBeFalsy();
364 | expect(isGeminiStream(undefined)).toBeFalsy();
365 | });
366 | });
367 |
368 | describe('extractUsageFromChatStream', () => {
369 | it('should extract usage from OpenAI chat stream', async () => {
370 | const chunks = [
371 | {
372 | id: 'chatcmpl-123',
373 | object: 'chat.completion.chunk',
374 | created: Date.now(),
375 | model: 'gpt-4',
376 | choices: [
377 | {
378 | index: 0,
379 | delta: {content: 'Hello'},
380 | finish_reason: null,
381 | },
382 | ],
383 | },
384 | {
385 | id: 'chatcmpl-123',
386 | object: 'chat.completion.chunk',
387 | created: Date.now(),
388 | model: 'gpt-4',
389 | choices: [
390 | {
391 | index: 0,
392 | delta: {},
393 | finish_reason: 'stop',
394 | },
395 | ],
396 | usage: {
397 | prompt_tokens: 10,
398 | completion_tokens: 5,
399 | total_tokens: 15,
400 | },
401 | },
402 | ];
403 |
404 | const mockStream = {
405 | async *[Symbol.asyncIterator]() {
406 | for (const chunk of chunks) {
407 | yield chunk;
408 | }
409 | },
410 | };
411 |
412 | const detected = await extractUsageFromChatStream(mockStream as any);
413 |
414 | expect(detected).not.toBeNull();
415 | expect(detected?.provider).toBe('openai');
416 | expect(detected?.model).toBe('gpt-4');
417 | expect(detected?.inputTokens).toBe(10);
418 | expect(detected?.outputTokens).toBe(5);
419 | });
420 |
421 | it('should handle streams without usage data', async () => {
422 | const chunks = [
423 | {
424 | id: 'chatcmpl-123',
425 | object: 'chat.completion.chunk',
426 | created: Date.now(),
427 | model: 'gpt-4',
428 | choices: [
429 | {
430 | index: 0,
431 | delta: {content: 'Hello'},
432 | finish_reason: 'stop',
433 | },
434 | ],
435 | },
436 | ];
437 |
438 | const mockStream = {
439 | async *[Symbol.asyncIterator]() {
440 | for (const chunk of chunks) {
441 | yield chunk;
442 | }
443 | },
444 | };
445 |
446 | const detected = await extractUsageFromChatStream(mockStream as any);
447 |
448 | expect(detected).not.toBeNull();
449 | expect(detected?.inputTokens).toBe(0);
450 | expect(detected?.outputTokens).toBe(0);
451 | });
452 |
453 | it('should handle stream errors gracefully', async () => {
454 | const mockStream = {
455 | async *[Symbol.asyncIterator]() {
456 | throw new Error('Stream error');
457 | },
458 | };
459 |
460 | const detected = await extractUsageFromChatStream(mockStream as any);
461 |
462 | expect(detected).toBeNull();
463 | });
464 | });
465 |
466 | describe('extractUsageFromResponseStream', () => {
467 | it('should extract usage from OpenAI Responses API stream', async () => {
468 | const chunks = [
469 | {
470 | type: 'response.output_text.delta',
471 | delta: 'Hello',
472 | },
473 | {
474 | type: 'response.done',
475 | response: {
476 | id: 'resp_123',
477 | model: 'gpt-4',
478 | usage: {
479 | input_tokens: 10,
480 | output_tokens: 5,
481 | },
482 | },
483 | },
484 | ];
485 |
486 | const mockStream = {
487 | async *[Symbol.asyncIterator]() {
488 | for (const chunk of chunks) {
489 | yield chunk;
490 | }
491 | },
492 | };
493 |
494 | const detected = await extractUsageFromResponseStream(mockStream as any);
495 |
496 | expect(detected).not.toBeNull();
497 | expect(detected?.provider).toBe('openai');
498 | expect(detected?.type).toBe('response_api');
499 | expect(detected?.model).toBe('gpt-4');
500 | expect(detected?.inputTokens).toBe(10);
501 | expect(detected?.outputTokens).toBe(5);
502 | });
503 |
504 | it('should handle streams without usage data', async () => {
505 | const chunks = [
506 | {
507 | type: 'response.output_text.delta',
508 | delta: 'Hello',
509 | },
510 | {
511 | type: 'response.done',
512 | response: {
513 | id: 'resp_123',
514 | model: 'gpt-4',
515 | },
516 | },
517 | ];
518 |
519 | const mockStream = {
520 | async *[Symbol.asyncIterator]() {
521 | for (const chunk of chunks) {
522 | yield chunk;
523 | }
524 | },
525 | };
526 |
527 | const detected = await extractUsageFromResponseStream(mockStream as any);
528 |
529 | expect(detected).not.toBeNull();
530 | expect(detected?.inputTokens).toBe(0);
531 | expect(detected?.outputTokens).toBe(0);
532 | });
533 | });
534 |
535 | describe('extractUsageFromAnthropicStream', () => {
536 | it('should extract usage from Anthropic stream', async () => {
537 | const chunks = [
538 | {
539 | type: 'message_start',
540 | message: {
541 | id: 'msg_123',
542 | type: 'message',
543 | role: 'assistant',
544 | content: [],
545 | model: 'claude-3-opus-20240229',
546 | usage: {
547 | input_tokens: 10,
548 | output_tokens: 0,
549 | },
550 | },
551 | },
552 | {
553 | type: 'content_block_start',
554 | index: 0,
555 | content_block: {type: 'text', text: ''},
556 | },
557 | {
558 | type: 'content_block_delta',
559 | index: 0,
560 | delta: {type: 'text_delta', text: 'Hello'},
561 | },
562 | {
563 | type: 'message_delta',
564 | delta: {stop_reason: 'end_turn'},
565 | usage: {
566 | output_tokens: 5,
567 | },
568 | },
569 | ];
570 |
571 | const mockStream = {
572 | async *[Symbol.asyncIterator]() {
573 | for (const chunk of chunks) {
574 | yield chunk;
575 | }
576 | },
577 | };
578 |
579 | const detected = await extractUsageFromAnthropicStream(mockStream as any);
580 |
581 | expect(detected).not.toBeNull();
582 | expect(detected?.provider).toBe('anthropic');
583 | expect(detected?.model).toBe('claude-3-opus-20240229');
584 | expect(detected?.inputTokens).toBe(10);
585 | expect(detected?.outputTokens).toBe(5);
586 | });
587 |
588 | it('should handle streams without usage data', async () => {
589 | const chunks = [
590 | {
591 | type: 'message_start',
592 | message: {
593 | id: 'msg_123',
594 | model: 'claude-3-opus-20240229',
595 | usage: {},
596 | },
597 | },
598 | ];
599 |
600 | const mockStream = {
601 | async *[Symbol.asyncIterator]() {
602 | for (const chunk of chunks) {
603 | yield chunk;
604 | }
605 | },
606 | };
607 |
608 | const detected = await extractUsageFromAnthropicStream(mockStream as any);
609 |
610 | expect(detected).not.toBeNull();
611 | expect(detected?.inputTokens).toBe(0);
612 | expect(detected?.outputTokens).toBe(0);
613 | });
614 | });
615 |
616 |
```
--------------------------------------------------------------------------------
/llm/token-meter/tests/token-meter-anthropic.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for TokenMeter - Anthropic Provider
3 | */
4 |
5 | import Stripe from 'stripe';
6 | import {createTokenMeter} from '../token-meter';
7 | import type {MeterConfig} from '../types';
8 |
9 | // Mock Stripe
10 | jest.mock('stripe');
11 |
12 | describe('TokenMeter - Anthropic Provider', () => {
13 | let mockMeterEventsCreate: jest.Mock;
14 | let config: MeterConfig;
15 | const TEST_API_KEY = 'sk_test_mock_key';
16 |
17 | beforeEach(() => {
18 | jest.clearAllMocks();
19 | mockMeterEventsCreate = jest.fn().mockResolvedValue({});
20 |
21 | // Mock the Stripe constructor
22 | (Stripe as unknown as jest.Mock).mockImplementation(() => ({
23 | v2: {
24 | billing: {
25 | meterEvents: {
26 | create: mockMeterEventsCreate,
27 | },
28 | },
29 | },
30 | }));
31 |
32 | config = {};
33 | });
34 |
35 | describe('Messages - Non-streaming', () => {
36 | it('should track usage from basic message', async () => {
37 | const meter = createTokenMeter(TEST_API_KEY, config);
38 |
39 | const response = {
40 | id: 'msg_123',
41 | type: 'message',
42 | role: 'assistant',
43 | content: [{type: 'text', text: 'Hello, World!'}],
44 | model: 'claude-3-5-sonnet-20241022',
45 | stop_reason: 'end_turn',
46 | stop_sequence: null,
47 | usage: {
48 | input_tokens: 15,
49 | output_tokens: 8,
50 | },
51 | };
52 |
53 | meter.trackUsage(response, 'cus_123');
54 |
55 | // Wait for fire-and-forget logging to complete
56 | await new Promise(resolve => setImmediate(resolve));
57 |
58 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
59 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
60 | expect.objectContaining({
61 | event_name: 'token-billing-tokens',
62 | payload: expect.objectContaining({
63 | stripe_customer_id: 'cus_123',
64 | value: '15',
65 | model: 'anthropic/claude-3.5-sonnet',
66 | token_type: 'input',
67 | }),
68 | })
69 | );
70 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
71 | expect.objectContaining({
72 | payload: expect.objectContaining({
73 | value: '8',
74 | token_type: 'output',
75 | }),
76 | })
77 | );
78 | });
79 |
80 | it('should track usage from message with system prompt', async () => {
81 | const meter = createTokenMeter(TEST_API_KEY, config);
82 |
83 | const response = {
84 | id: 'msg_456',
85 | type: 'message',
86 | role: 'assistant',
87 | content: [{type: 'text', text: 'I am a helpful assistant.'}],
88 | model: 'claude-3-5-sonnet-20241022',
89 | stop_reason: 'end_turn',
90 | stop_sequence: null,
91 | usage: {
92 | input_tokens: 50,
93 | output_tokens: 12,
94 | },
95 | };
96 |
97 | meter.trackUsage(response, 'cus_456');
98 |
99 | // Wait for fire-and-forget logging to complete
100 | await new Promise(resolve => setImmediate(resolve));
101 |
102 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
103 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
104 | expect.objectContaining({
105 | payload: expect.objectContaining({
106 | stripe_customer_id: 'cus_456',
107 | value: '50',
108 | model: 'anthropic/claude-3.5-sonnet',
109 | token_type: 'input',
110 | }),
111 | })
112 | );
113 | });
114 |
115 | it('should track usage from message with tool use', async () => {
116 | const meter = createTokenMeter(TEST_API_KEY, config);
117 |
118 | const response = {
119 | id: 'msg_789',
120 | type: 'message',
121 | role: 'assistant',
122 | content: [
123 | {
124 | type: 'tool_use',
125 | id: 'toolu_123',
126 | name: 'get_weather',
127 | input: {location: 'San Francisco'},
128 | },
129 | ],
130 | model: 'claude-3-5-sonnet-20241022',
131 | stop_reason: 'tool_use',
132 | stop_sequence: null,
133 | usage: {
134 | input_tokens: 100,
135 | output_tokens: 45,
136 | },
137 | };
138 |
139 | meter.trackUsage(response, 'cus_789');
140 |
141 | // Wait for fire-and-forget logging to complete
142 | await new Promise(resolve => setImmediate(resolve));
143 |
144 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
145 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
146 | expect.objectContaining({
147 | payload: expect.objectContaining({
148 | value: '100',
149 | model: 'anthropic/claude-3.5-sonnet',
150 | token_type: 'input',
151 | }),
152 | })
153 | );
154 | });
155 |
156 | it('should track usage from multi-turn conversation', async () => {
157 | const meter = createTokenMeter(TEST_API_KEY, config);
158 |
159 | const response = {
160 | id: 'msg_conv',
161 | type: 'message',
162 | role: 'assistant',
163 | content: [{type: 'text', text: 'The weather is sunny today.'}],
164 | model: 'claude-3-opus-20240229',
165 | stop_reason: 'end_turn',
166 | stop_sequence: null,
167 | usage: {
168 | input_tokens: 200, // Includes conversation history
169 | output_tokens: 15,
170 | },
171 | };
172 |
173 | meter.trackUsage(response, 'cus_123');
174 |
175 | // Wait for fire-and-forget logging to complete
176 | await new Promise(resolve => setImmediate(resolve));
177 |
178 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
179 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
180 | expect.objectContaining({
181 | payload: expect.objectContaining({
182 | value: '200',
183 | model: 'anthropic/claude-3-opus',
184 | token_type: 'input',
185 | }),
186 | })
187 | );
188 | });
189 |
190 | it('should track usage from message with mixed content', async () => {
191 | const meter = createTokenMeter(TEST_API_KEY, config);
192 |
193 | const response = {
194 | id: 'msg_mixed',
195 | type: 'message',
196 | role: 'assistant',
197 | content: [
198 | {type: 'text', text: 'Let me check the weather for you.'},
199 | {
200 | type: 'tool_use',
201 | id: 'toolu_456',
202 | name: 'get_weather',
203 | input: {location: 'New York'},
204 | },
205 | ],
206 | model: 'claude-3-5-haiku-20241022',
207 | stop_reason: 'tool_use',
208 | stop_sequence: null,
209 | usage: {
210 | input_tokens: 80,
211 | output_tokens: 60,
212 | },
213 | };
214 |
215 | meter.trackUsage(response, 'cus_999');
216 |
217 | // Wait for fire-and-forget logging to complete
218 | await new Promise(resolve => setImmediate(resolve));
219 |
220 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
221 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
222 | expect.objectContaining({
223 | payload: expect.objectContaining({
224 | value: '80',
225 | model: 'anthropic/claude-3.5-haiku',
226 | token_type: 'input',
227 | }),
228 | })
229 | );
230 | });
231 | });
232 |
233 | describe('Messages - Streaming', () => {
234 | it('should track usage from basic streaming message', async () => {
235 | const meter = createTokenMeter(TEST_API_KEY, config);
236 |
237 | const chunks = [
238 | {
239 | type: 'message_start',
240 | message: {
241 | id: 'msg_123',
242 | type: 'message',
243 | role: 'assistant',
244 | content: [],
245 | model: 'claude-3-5-sonnet-20241022',
246 | usage: {
247 | input_tokens: 15,
248 | output_tokens: 0,
249 | },
250 | },
251 | },
252 | {
253 | type: 'content_block_start',
254 | index: 0,
255 | content_block: {type: 'text', text: ''},
256 | },
257 | {
258 | type: 'content_block_delta',
259 | index: 0,
260 | delta: {type: 'text_delta', text: 'Hello, World!'},
261 | },
262 | {
263 | type: 'content_block_stop',
264 | index: 0,
265 | },
266 | {
267 | type: 'message_delta',
268 | delta: {stop_reason: 'end_turn'},
269 | usage: {
270 | output_tokens: 8,
271 | },
272 | },
273 | {
274 | type: 'message_stop',
275 | },
276 | ];
277 |
278 | const mockStream = createMockStreamWithTee(chunks);
279 | const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_123');
280 |
281 | for await (const _chunk of wrappedStream) {
282 | // Consume stream
283 | }
284 |
285 | // Wait for fire-and-forget logging to complete
286 | await new Promise(resolve => setImmediate(resolve));
287 |
288 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
289 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
290 | expect.objectContaining({
291 | payload: expect.objectContaining({
292 | stripe_customer_id: 'cus_123',
293 | value: '15',
294 | model: 'anthropic/claude-3.5-sonnet',
295 | token_type: 'input',
296 | }),
297 | })
298 | );
299 | });
300 |
301 | it('should track usage from streaming message with tool use', async () => {
302 | const meter = createTokenMeter(TEST_API_KEY, config);
303 |
304 | const chunks = [
305 | {
306 | type: 'message_start',
307 | message: {
308 | id: 'msg_456',
309 | type: 'message',
310 | role: 'assistant',
311 | content: [],
312 | model: 'claude-3-5-sonnet-20241022',
313 | usage: {
314 | input_tokens: 100,
315 | output_tokens: 0,
316 | },
317 | },
318 | },
319 | {
320 | type: 'content_block_start',
321 | index: 0,
322 | content_block: {
323 | type: 'tool_use',
324 | id: 'toolu_789',
325 | name: 'get_weather',
326 | input: {},
327 | },
328 | },
329 | {
330 | type: 'content_block_delta',
331 | index: 0,
332 | delta: {
333 | type: 'input_json_delta',
334 | partial_json: '{"location": "San Francisco"}',
335 | },
336 | },
337 | {
338 | type: 'content_block_stop',
339 | index: 0,
340 | },
341 | {
342 | type: 'message_delta',
343 | delta: {stop_reason: 'tool_use'},
344 | usage: {
345 | output_tokens: 45,
346 | },
347 | },
348 | {
349 | type: 'message_stop',
350 | },
351 | ];
352 |
353 | const mockStream = createMockStreamWithTee(chunks);
354 | const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_456');
355 |
356 | for await (const _chunk of wrappedStream) {
357 | // Consume stream
358 | }
359 |
360 | // Wait for fire-and-forget logging to complete
361 | await new Promise(resolve => setImmediate(resolve));
362 |
363 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
364 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
365 | expect.objectContaining({
366 | payload: expect.objectContaining({
367 | stripe_customer_id: 'cus_456',
368 | value: '100',
369 | model: 'anthropic/claude-3.5-sonnet',
370 | token_type: 'input',
371 | }),
372 | })
373 | );
374 | });
375 |
376 | it('should properly tee the stream', async () => {
377 | const meter = createTokenMeter(TEST_API_KEY, config);
378 |
379 | const chunks = [
380 | {
381 | type: 'message_start',
382 | message: {
383 | id: 'msg_123',
384 | usage: {
385 | input_tokens: 10,
386 | output_tokens: 0,
387 | },
388 | },
389 | },
390 | {
391 | type: 'content_block_delta',
392 | index: 0,
393 | delta: {type: 'text_delta', text: 'Hello'},
394 | },
395 | {
396 | type: 'content_block_delta',
397 | index: 0,
398 | delta: {type: 'text_delta', text: ' World'},
399 | },
400 | ];
401 |
402 | const mockStream = createMockStreamWithTee(chunks);
403 | const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_123');
404 |
405 | const receivedChunks: any[] = [];
406 | for await (const chunk of wrappedStream) {
407 | receivedChunks.push(chunk);
408 | }
409 |
410 | expect(receivedChunks).toHaveLength(3);
411 | expect(receivedChunks[0].type).toBe('message_start');
412 | expect(receivedChunks[1].delta.text).toBe('Hello');
413 | expect(receivedChunks[2].delta.text).toBe(' World');
414 | });
415 |
416 | it('should extract input tokens from message_start', async () => {
417 | const meter = createTokenMeter(TEST_API_KEY, config);
418 |
419 | const chunks = [
420 | {
421 | type: 'message_start',
422 | message: {
423 | id: 'msg_123',
424 | model: 'claude-3-opus-20240229',
425 | usage: {
426 | input_tokens: 250,
427 | output_tokens: 0,
428 | },
429 | },
430 | },
431 | {
432 | type: 'message_delta',
433 | delta: {stop_reason: 'end_turn'},
434 | usage: {
435 | output_tokens: 20,
436 | },
437 | },
438 | ];
439 |
440 | const mockStream = createMockStreamWithTee(chunks);
441 | const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_789');
442 |
443 | for await (const _chunk of wrappedStream) {
444 | // Consume stream
445 | }
446 |
447 | // Wait for fire-and-forget logging to complete
448 | await new Promise(resolve => setImmediate(resolve));
449 |
450 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
451 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
452 | expect.objectContaining({
453 | payload: expect.objectContaining({
454 | value: '250',
455 | model: 'anthropic/claude-3-opus',
456 | token_type: 'input',
457 | }),
458 | })
459 | );
460 | });
461 | });
462 |
463 | describe('Model Variants', () => {
464 | it('should track claude-3-opus', async () => {
465 | const meter = createTokenMeter(TEST_API_KEY, config);
466 |
467 | const response = {
468 | id: 'msg_opus',
469 | type: 'message',
470 | role: 'assistant',
471 | content: [{type: 'text', text: 'Response from Opus'}],
472 | model: 'claude-3-opus-20240229',
473 | stop_reason: 'end_turn',
474 | stop_sequence: null,
475 | usage: {
476 | input_tokens: 20,
477 | output_tokens: 10,
478 | },
479 | };
480 |
481 | meter.trackUsage(response, 'cus_123');
482 |
483 | // Wait for fire-and-forget logging to complete
484 | await new Promise(resolve => setImmediate(resolve));
485 |
486 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
487 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
488 | expect.objectContaining({
489 | payload: expect.objectContaining({
490 | value: '20',
491 | model: 'anthropic/claude-3-opus',
492 | token_type: 'input',
493 | }),
494 | })
495 | );
496 | });
497 |
498 | it('should track claude-3-5-haiku', async () => {
499 | const meter = createTokenMeter(TEST_API_KEY, config);
500 |
501 | const response = {
502 | id: 'msg_haiku',
503 | type: 'message',
504 | role: 'assistant',
505 | content: [{type: 'text', text: 'Response from Haiku'}],
506 | model: 'claude-3-5-haiku-20241022',
507 | stop_reason: 'end_turn',
508 | stop_sequence: null,
509 | usage: {
510 | input_tokens: 15,
511 | output_tokens: 8,
512 | },
513 | };
514 |
515 | meter.trackUsage(response, 'cus_456');
516 |
517 | // Wait for fire-and-forget logging to complete
518 | await new Promise(resolve => setImmediate(resolve));
519 |
520 | expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
521 | expect(mockMeterEventsCreate).toHaveBeenCalledWith(
522 | expect.objectContaining({
523 | payload: expect.objectContaining({
524 | value: '15',
525 | model: 'anthropic/claude-3.5-haiku',
526 | token_type: 'input',
527 | }),
528 | })
529 | );
530 | });
531 | });
532 | });
533 |
534 | // Helper function to create mock streams with tee()
535 | function createMockStreamWithTee(chunks: any[]) {
536 | return {
537 | tee() {
538 | const stream1 = {
539 | async *[Symbol.asyncIterator]() {
540 | for (const chunk of chunks) {
541 | yield chunk;
542 | }
543 | },
544 | tee() {
545 | const s1 = {
546 | async *[Symbol.asyncIterator]() {
547 | for (const chunk of chunks) {
548 | yield chunk;
549 | }
550 | },
551 | };
552 | const s2 = {
553 | async *[Symbol.asyncIterator]() {
554 | for (const chunk of chunks) {
555 | yield chunk;
556 | }
557 | },
558 | };
559 | return [s1, s2];
560 | },
561 | };
562 | const stream2 = {
563 | async *[Symbol.asyncIterator]() {
564 | for (const chunk of chunks) {
565 | yield chunk;
566 | }
567 | },
568 | };
569 | return [stream1, stream2];
570 | },
571 | async *[Symbol.asyncIterator]() {
572 | for (const chunk of chunks) {
573 | yield chunk;
574 | }
575 | },
576 | };
577 | }
578 |
579 |
```
--------------------------------------------------------------------------------
/llm/ai-sdk/provider/stripe-language-model.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Stripe Language Model implementation for AI SDK V2
3 | */
4 |
5 | import {
6 | LanguageModelV2,
7 | LanguageModelV2CallOptions,
8 | LanguageModelV2CallWarning,
9 | LanguageModelV2Content,
10 | LanguageModelV2FinishReason,
11 | LanguageModelV2StreamPart,
12 | } from '@ai-sdk/provider';
13 | import {
14 | ParseResult,
15 | createEventSourceResponseHandler,
16 | createJsonResponseHandler,
17 | createStatusCodeErrorResponseHandler,
18 | postJsonToApi,
19 | } from '@ai-sdk/provider-utils';
20 | import {z} from 'zod';
21 | import {StripeLanguageModelSettings, StripeProviderOptions} from './types';
22 | import {convertToOpenAIMessages, mapOpenAIFinishReason} from './utils';
23 |
24 | /**
25 | * OpenAI-compatible chat completion response schema
26 | */
27 | const openAIResponseSchema = z.object({
28 | choices: z.array(
29 | z.object({
30 | message: z.object({
31 | content: z.string().nullable().optional(),
32 | tool_calls: z
33 | .array(
34 | z.object({
35 | id: z.string(),
36 | type: z.literal('function'),
37 | function: z.object({
38 | name: z.string(),
39 | arguments: z.string(),
40 | }),
41 | })
42 | )
43 | .optional(),
44 | }),
45 | finish_reason: z.string().nullable(),
46 | })
47 | ),
48 | usage: z
49 | .object({
50 | prompt_tokens: z.number().optional(),
51 | completion_tokens: z.number().optional(),
52 | total_tokens: z.number().optional(),
53 | })
54 | .optional(),
55 | });
56 |
57 | type OpenAIResponse = z.infer<typeof openAIResponseSchema>;
58 |
59 | /**
60 | * OpenAI-compatible streaming chunk schema
61 | * Note: The event source handler may also return '[DONE]' string or null
62 | */
63 | const openAIStreamChunkSchema = z
64 | .union([
65 | z.object({
66 | choices: z
67 | .array(
68 | z.object({
69 | delta: z.object({
70 | content: z.string().optional(),
71 | tool_calls: z
72 | .array(
73 | z.object({
74 | index: z.number(),
75 | id: z.string().optional(),
76 | function: z
77 | .object({
78 | name: z.string().optional(),
79 | arguments: z.string().optional(),
80 | })
81 | .optional(),
82 | })
83 | )
84 | .optional(),
85 | }),
86 | finish_reason: z.string().nullable().optional(),
87 | })
88 | )
89 | .optional(),
90 | usage: z
91 | .object({
92 | prompt_tokens: z.number().optional(),
93 | completion_tokens: z.number().optional(),
94 | total_tokens: z.number().optional(),
95 | })
96 | .optional(),
97 | }),
98 | z.literal('[DONE]'),
99 | z.null(),
100 | ])
101 | .catch(null);
102 |
103 | type OpenAIStreamChunk = z.infer<typeof openAIStreamChunkSchema>;
104 |
105 | /**
106 | * Enhanced error class for Stripe AI SDK Provider access issues
107 | */
108 | export class StripeProviderAccessError extends Error {
109 | constructor(originalError: any) {
110 | const message = [
111 | 'Stripe AI SDK Provider Access Required',
112 | '',
113 | 'You are probably seeing this error because you have not been granted access to the Stripe AI SDK Provider Private Preview.',
114 | '',
115 | 'To request access, please fill out the form here:',
116 | 'https://docs.stripe.com/billing/token-billing',
117 | '',
118 | '---',
119 | 'Original error: ' + (originalError.message || 'Unknown error'),
120 | ].join('\n');
121 |
122 | super(message);
123 | this.name = 'StripeProviderAccessError';
124 |
125 | // Preserve the original error
126 | this.cause = originalError;
127 | }
128 | }
129 |
130 | interface StripeProviderConfig {
131 | provider: string;
132 | baseURL: string;
133 | headers: () => Record<string, string>;
134 | }
135 |
136 | /**
137 | * Stripe Language Model that implements the AI SDK V2 specification
138 | */
139 | export class StripeLanguageModel implements LanguageModelV2 {
140 | readonly specificationVersion = 'v2' as const;
141 | readonly provider: string;
142 | readonly modelId: string;
143 |
144 | private readonly settings: StripeLanguageModelSettings;
145 | private readonly config: StripeProviderConfig;
146 |
147 | constructor(
148 | modelId: string,
149 | settings: StripeLanguageModelSettings,
150 | config: StripeProviderConfig
151 | ) {
152 | this.provider = config.provider;
153 | this.modelId = modelId;
154 | this.settings = settings;
155 | this.config = config;
156 | }
157 |
158 | /**
159 | * Stripe proxy doesn't require special URL handling - it accepts standard base64 data
160 | */
161 | get supportedUrls() {
162 | return {};
163 | }
164 |
165 | /**
166 | * Check if an error is due to lack of access to the Stripe AI SDK Provider
167 | */
168 | private isAccessDeniedError(error: any): boolean {
169 | // Check for the specific "Unrecognized request URL" error
170 | if (error.statusCode === 400 && error.responseBody) {
171 | try {
172 | const body = typeof error.responseBody === 'string'
173 | ? JSON.parse(error.responseBody)
174 | : error.responseBody;
175 |
176 | if (body.error?.type === 'invalid_request_error' &&
177 | body.error?.message?.includes('Unrecognized request URL')) {
178 | return true;
179 | }
180 | } catch {
181 | // If we can't parse the response, it's not the error we're looking for
182 | }
183 | }
184 | return false;
185 | }
186 |
187 | /**
188 | * Wrap API call errors with helpful messaging for access issues
189 | */
190 | private handleApiError(error: any): never {
191 | if (this.isAccessDeniedError(error)) {
192 | throw new StripeProviderAccessError(error);
193 | }
194 | throw error;
195 | }
196 |
197 | /**
198 | * Get model-specific default max output tokens for Anthropic models
199 | * Based on the official Anthropic provider implementation
200 | * @see https://github.com/vercel/ai/blob/main/packages/anthropic/src/anthropic-messages-language-model.ts
201 | */
202 | private getDefaultMaxTokens(modelId: string): number | undefined {
203 | if (!modelId.startsWith('anthropic/')) {
204 | return undefined; // No default for non-Anthropic models
205 | }
206 |
207 | // Extract model name after 'anthropic/' prefix
208 | const model = modelId.substring('anthropic/'.length);
209 |
210 | // Claude Sonnet 4 models (including variants like sonnet-4-1) and 3.7 Sonnet
211 | if (model.includes('sonnet-4') ||
212 | model.includes('claude-3-7-sonnet') ||
213 | model.includes('haiku-4-5')) {
214 | return 64000; // 64K tokens
215 | }
216 | // Claude Opus 4 models (including variants like opus-4-1)
217 | else if (model.includes('opus-4')) {
218 | return 32000; // 32K tokens
219 | }
220 | // Claude 3.5 Haiku
221 | else if (model.includes('claude-3-5-haiku')) {
222 | return 8192; // 8K tokens
223 | }
224 | // Default fallback for other Anthropic models
225 | else {
226 | return 4096;
227 | }
228 | }
229 |
230 | private getHeaders(
231 | options: LanguageModelV2CallOptions
232 | ): Record<string, string> {
233 | const baseHeaders = this.config.headers();
234 | const settingsHeaders = this.settings.headers || {};
235 |
236 | // Get provider-specific options
237 | const stripeOptions = (options.providerOptions?.stripe ||
238 | {}) as StripeProviderOptions;
239 |
240 | // Determine customer ID (priority: providerOptions > settings > error)
241 | const customerId =
242 | stripeOptions.customerId || this.settings.customerId || '';
243 |
244 | if (!customerId) {
245 | throw new Error(
246 | 'Stripe customer ID is required. Provide it via provider settings or providerOptions.'
247 | );
248 | }
249 |
250 | return {
251 | ...baseHeaders,
252 | ...settingsHeaders,
253 | ...(stripeOptions.headers || {}),
254 | 'X-Stripe-Customer-ID': customerId,
255 | };
256 | }
257 |
258 | private getArgs(options: LanguageModelV2CallOptions) {
259 | const warnings: LanguageModelV2CallWarning[] = [];
260 |
261 | // Convert AI SDK prompt to OpenAI-compatible format
262 | const messages = convertToOpenAIMessages(options.prompt);
263 |
264 | // Check if tools are provided and throw error (tool calling not supported by Stripe API)
265 | if (options.tools && options.tools.length > 0) {
266 | throw new Error(
267 | 'Tool calling is not supported by the Stripe AI SDK Provider. ' +
268 | 'The llm.stripe.com API does not currently support function calling or tool use. ' +
269 | 'Please remove the tools parameter from your request.'
270 | );
271 | }
272 |
273 | // Prepare tools if provided
274 | const tools =
275 | options.tools && options.tools.length > 0
276 | ? options.tools.map((tool) => {
277 | if (tool.type === 'function') {
278 | return {
279 | type: 'function',
280 | function: {
281 | name: tool.name,
282 | description: tool.description,
283 | parameters: tool.inputSchema,
284 | },
285 | };
286 | }
287 | // Provider-defined tools
288 | return tool;
289 | })
290 | : undefined;
291 |
292 | // Map tool choice
293 | let toolChoice: string | {type: string; function?: {name: string}} | undefined;
294 | if (options.toolChoice) {
295 | if (options.toolChoice.type === 'tool') {
296 | toolChoice = {
297 | type: 'function',
298 | function: {name: options.toolChoice.toolName},
299 | };
300 | } else {
301 | toolChoice = options.toolChoice.type; // 'auto', 'none', 'required'
302 | }
303 | }
304 |
305 | // Build request body, only including defined values
306 | const body: Record<string, any> = {
307 | model: this.modelId,
308 | messages,
309 | };
310 |
311 | // Add optional parameters only if they're defined
312 | if (options.temperature !== undefined) body.temperature = options.temperature;
313 |
314 | // Handle max_tokens with model-specific defaults for Anthropic
315 | const maxTokens = options.maxOutputTokens ?? this.getDefaultMaxTokens(this.modelId);
316 | if (maxTokens !== undefined) body.max_tokens = maxTokens;
317 |
318 | if (options.topP !== undefined) body.top_p = options.topP;
319 | if (options.frequencyPenalty !== undefined) body.frequency_penalty = options.frequencyPenalty;
320 | if (options.presencePenalty !== undefined) body.presence_penalty = options.presencePenalty;
321 | if (options.stopSequences !== undefined) body.stop = options.stopSequences;
322 | if (options.seed !== undefined) body.seed = options.seed;
323 | if (tools !== undefined) body.tools = tools;
324 | if (toolChoice !== undefined) body.tool_choice = toolChoice;
325 |
326 | return {args: body, warnings};
327 | }
328 |
329 | async doGenerate(options: LanguageModelV2CallOptions) {
330 | const {args, warnings} = this.getArgs(options);
331 | const headers = this.getHeaders(options);
332 |
333 | let response: OpenAIResponse;
334 | try {
335 | const result = await postJsonToApi({
336 | url: `${this.config.baseURL}/chat/completions`,
337 | headers,
338 | body: args,
339 | failedResponseHandler: createStatusCodeErrorResponseHandler(),
340 | successfulResponseHandler: createJsonResponseHandler(openAIResponseSchema),
341 | abortSignal: options.abortSignal,
342 | });
343 | response = result.value;
344 | } catch (error) {
345 | this.handleApiError(error);
346 | }
347 |
348 | const choice = response.choices[0];
349 |
350 | // Convert response to AI SDK V2 format
351 | const content: LanguageModelV2Content[] = [];
352 |
353 | // Add text content if present
354 | if (choice.message.content) {
355 | content.push({
356 | type: 'text',
357 | text: choice.message.content,
358 | });
359 | }
360 |
361 | // Add tool calls if present
362 | if (choice.message.tool_calls) {
363 | for (const toolCall of choice.message.tool_calls) {
364 | content.push({
365 | type: 'tool-call',
366 | toolCallId: toolCall.id,
367 | toolName: toolCall.function.name,
368 | input: toolCall.function.arguments,
369 | });
370 | }
371 | }
372 |
373 | return {
374 | content,
375 | finishReason: mapOpenAIFinishReason(choice.finish_reason),
376 | usage: {
377 | inputTokens: response.usage?.prompt_tokens,
378 | outputTokens: response.usage?.completion_tokens,
379 | totalTokens: response.usage?.total_tokens,
380 | },
381 | request: {body: args},
382 | response: {
383 | headers: {},
384 | body: response,
385 | },
386 | warnings,
387 | };
388 | }
389 |
390 | async doStream(options: LanguageModelV2CallOptions) {
391 | const {args, warnings} = this.getArgs(options);
392 | const headers = this.getHeaders(options);
393 |
394 | let response: ReadableStream<ParseResult<OpenAIStreamChunk>>;
395 | try {
396 | const result = await postJsonToApi({
397 | url: `${this.config.baseURL}/chat/completions`,
398 | headers,
399 | body: {
400 | ...args,
401 | stream: true,
402 | stream_options: {include_usage: true},
403 | },
404 | failedResponseHandler: createStatusCodeErrorResponseHandler(),
405 | successfulResponseHandler: createEventSourceResponseHandler(openAIStreamChunkSchema),
406 | abortSignal: options.abortSignal,
407 | });
408 | response = result.value as ReadableStream<ParseResult<OpenAIStreamChunk>>;
409 | } catch (error) {
410 | this.handleApiError(error);
411 | }
412 |
413 | let finishReason: LanguageModelV2FinishReason = 'unknown';
414 | let usage = {
415 | inputTokens: undefined as number | undefined,
416 | outputTokens: undefined as number | undefined,
417 | totalTokens: undefined as number | undefined,
418 | };
419 |
420 | // Track tool calls during streaming
421 | const toolCallDeltas: Record<
422 | number,
423 | {
424 | id: string;
425 | name: string;
426 | input: string;
427 | }
428 | > = {};
429 |
430 | // Track text chunks with IDs
431 | let currentTextId = '';
432 |
433 | return {
434 | stream: response.pipeThrough(
435 | new TransformStream<ParseResult<OpenAIStreamChunk>, LanguageModelV2StreamPart>({
436 | transform(chunk, controller) {
437 | if (!chunk.success) {
438 | controller.enqueue({type: 'error', error: chunk.error});
439 | return;
440 | }
441 |
442 | // The value is already parsed as an object by the event source handler
443 | // If value is null (schema validation failed), use rawValue
444 | const data = chunk.value ?? (chunk.rawValue as OpenAIStreamChunk);
445 |
446 | // Skip empty or [DONE] events
447 | if (!data || data === '[DONE]') {
448 | return;
449 | }
450 |
451 | try {
452 | // Type guard: at this point we know data is not null or '[DONE]'
453 | if (typeof data === 'object' && 'choices' in data && data.choices && data.choices.length > 0) {
454 | const delta = data.choices[0].delta;
455 |
456 | // Handle text content
457 | // Check if content exists (including empty string "") rather than checking truthiness
458 | if ('content' in delta && delta.content !== null && delta.content !== undefined) {
459 | if (!currentTextId) {
460 | currentTextId = `text-${Date.now()}`;
461 | controller.enqueue({
462 | type: 'text-start',
463 | id: currentTextId,
464 | });
465 | }
466 | // Only emit text-delta if content is not empty
467 | if (delta.content !== '') {
468 | controller.enqueue({
469 | type: 'text-delta',
470 | id: currentTextId,
471 | delta: delta.content,
472 | });
473 | }
474 | }
475 |
476 | // Handle tool calls
477 | if (delta.tool_calls) {
478 | for (const toolCall of delta.tool_calls) {
479 | const index = toolCall.index;
480 |
481 | // Initialize or update tool call
482 | if (!toolCallDeltas[index]) {
483 | const id = toolCall.id || `tool-${Date.now()}-${index}`;
484 | toolCallDeltas[index] = {
485 | id,
486 | name: toolCall.function?.name || '',
487 | input: '',
488 | };
489 |
490 | // Emit tool-input-start
491 | controller.enqueue({
492 | type: 'tool-input-start',
493 | id,
494 | toolName: toolCallDeltas[index].name,
495 | });
496 | }
497 |
498 | if (toolCall.id) {
499 | toolCallDeltas[index].id = toolCall.id;
500 | }
501 | if (toolCall.function?.name) {
502 | toolCallDeltas[index].name = toolCall.function.name;
503 | }
504 | if (toolCall.function?.arguments) {
505 | toolCallDeltas[index].input += toolCall.function.arguments;
506 |
507 | // Emit the delta
508 | controller.enqueue({
509 | type: 'tool-input-delta',
510 | id: toolCallDeltas[index].id,
511 | delta: toolCall.function.arguments,
512 | });
513 | }
514 | }
515 | }
516 |
517 | // Handle finish reason
518 | if (data.choices[0].finish_reason) {
519 | finishReason = mapOpenAIFinishReason(
520 | data.choices[0].finish_reason
521 | );
522 | }
523 | }
524 |
525 | // Handle usage (typically comes in final chunk)
526 | if (typeof data === 'object' && 'usage' in data && data.usage) {
527 | usage = {
528 | inputTokens: data.usage.prompt_tokens || undefined,
529 | outputTokens: data.usage.completion_tokens || undefined,
530 | totalTokens: data.usage.total_tokens || undefined,
531 | };
532 | }
533 | } catch (error) {
534 | controller.enqueue({
535 | type: 'error',
536 | error: error,
537 | });
538 | }
539 | },
540 |
541 | flush(controller) {
542 | // End current text if any
543 | if (currentTextId) {
544 | controller.enqueue({
545 | type: 'text-end',
546 | id: currentTextId,
547 | });
548 | }
549 |
550 | // Emit final tool calls
551 | for (const toolCall of Object.values(toolCallDeltas)) {
552 | controller.enqueue({
553 | type: 'tool-input-end',
554 | id: toolCall.id,
555 | });
556 | controller.enqueue({
557 | type: 'tool-call',
558 | toolCallId: toolCall.id,
559 | toolName: toolCall.name,
560 | input: toolCall.input,
561 | });
562 | }
563 |
564 | // Emit finish event
565 | controller.enqueue({
566 | type: 'finish',
567 | finishReason,
568 | usage,
569 | });
570 | },
571 | })
572 | ),
573 | request: {body: args},
574 | response: {headers: {}},
575 | warnings,
576 | };
577 | }
578 | }
579 |
```
--------------------------------------------------------------------------------
/llm/token-meter/tests/meter-event-logging.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for meter event logging utilities
3 | */
4 |
5 | import Stripe from 'stripe';
6 | import {logUsageEvent, sendMeterEventsToStripe} from '../meter-event-logging';
7 | import type {UsageEvent, MeterConfig} from '../types';
8 |
9 | // Mock Stripe
10 | jest.mock('stripe');
11 |
12 | describe('sendMeterEventsToStripe', () => {
13 | let mockStripe: jest.Mocked<any>;
14 | let consoleErrorSpy: jest.SpyInstance;
15 | let consoleLogSpy: jest.SpyInstance;
16 |
17 | beforeEach(() => {
18 | jest.clearAllMocks();
19 |
20 | mockStripe = {
21 | v2: {
22 | billing: {
23 | meterEvents: {
24 | create: jest.fn().mockResolvedValue({}),
25 | },
26 | },
27 | },
28 | };
29 |
30 | (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
31 |
32 | consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
33 | consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
34 | });
35 |
36 | afterEach(() => {
37 | consoleErrorSpy.mockRestore();
38 | consoleLogSpy.mockRestore();
39 | });
40 |
41 | it('should send meter events to Stripe', async () => {
42 | const config: MeterConfig = {};
43 |
44 | const event: UsageEvent = {
45 | model: 'gpt-4',
46 | provider: 'openai',
47 | usage: {
48 | inputTokens: 100,
49 | outputTokens: 50,
50 | },
51 | stripeCustomerId: 'cus_123',
52 | };
53 |
54 | await sendMeterEventsToStripe(mockStripe, config, event);
55 |
56 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(2);
57 | });
58 |
59 | it('should send separate events for input and output tokens', async () => {
60 | const config: MeterConfig = {};
61 |
62 | const event: UsageEvent = {
63 | model: 'gpt-4',
64 | provider: 'openai',
65 | usage: {
66 | inputTokens: 100,
67 | outputTokens: 50,
68 | },
69 | stripeCustomerId: 'cus_123',
70 | };
71 |
72 | await sendMeterEventsToStripe(mockStripe, config, event);
73 |
74 | const calls = mockStripe.v2.billing.meterEvents.create.mock.calls;
75 | expect(calls[0][0]).toMatchObject({
76 | event_name: 'token-billing-tokens',
77 | payload: {
78 | stripe_customer_id: 'cus_123',
79 | value: '100',
80 | model: 'openai/gpt-4',
81 | token_type: 'input',
82 | },
83 | });
84 | expect(calls[1][0]).toMatchObject({
85 | event_name: 'token-billing-tokens',
86 | payload: {
87 | stripe_customer_id: 'cus_123',
88 | value: '50',
89 | model: 'openai/gpt-4',
90 | token_type: 'output',
91 | },
92 | });
93 | });
94 |
95 | it('should handle zero input tokens', async () => {
96 | const config: MeterConfig = {};
97 |
98 | const event: UsageEvent = {
99 | model: 'gpt-4',
100 | provider: 'openai',
101 | usage: {
102 | inputTokens: 0,
103 | outputTokens: 50,
104 | },
105 | stripeCustomerId: 'cus_123',
106 | };
107 |
108 | await sendMeterEventsToStripe(mockStripe, config, event);
109 |
110 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
111 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
112 | expect(call.payload.token_type).toBe('output');
113 | });
114 |
115 | it('should handle zero output tokens', async () => {
116 | const config: MeterConfig = {};
117 |
118 | const event: UsageEvent = {
119 | model: 'gpt-4',
120 | provider: 'openai',
121 | usage: {
122 | inputTokens: 100,
123 | outputTokens: 0,
124 | },
125 | stripeCustomerId: 'cus_123',
126 | };
127 |
128 | await sendMeterEventsToStripe(mockStripe, config, event);
129 |
130 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
131 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
132 | expect(call.payload.token_type).toBe('input');
133 | });
134 |
135 | it('should handle Stripe API errors gracefully', async () => {
136 | mockStripe.v2.billing.meterEvents.create.mockRejectedValue(
137 | new Error('API Error')
138 | );
139 |
140 | const config: MeterConfig = {};
141 |
142 | const event: UsageEvent = {
143 | model: 'gpt-4',
144 | provider: 'openai',
145 | usage: {
146 | inputTokens: 100,
147 | outputTokens: 50,
148 | },
149 | stripeCustomerId: 'cus_123',
150 | };
151 |
152 | await sendMeterEventsToStripe(mockStripe, config, event);
153 |
154 | expect(consoleErrorSpy).toHaveBeenCalledWith(
155 | 'Error sending meter events to Stripe:',
156 | expect.any(Error)
157 | );
158 | });
159 |
160 | it('should include proper timestamp format', async () => {
161 | const config: MeterConfig = {};
162 |
163 | const event: UsageEvent = {
164 | model: 'gpt-4',
165 | provider: 'openai',
166 | usage: {
167 | inputTokens: 100,
168 | outputTokens: 50,
169 | },
170 | stripeCustomerId: 'cus_123',
171 | };
172 |
173 | await sendMeterEventsToStripe(mockStripe, config, event);
174 |
175 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
176 | expect(call.timestamp).toMatch(
177 | /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/
178 | );
179 | });
180 |
181 | describe('Model Name Normalization - Anthropic', () => {
182 | it('should remove date suffix (YYYYMMDD)', async () => {
183 | const config: MeterConfig = {};
184 | const event: UsageEvent = {
185 | model: 'claude-3-opus-20240229',
186 | provider: 'anthropic',
187 | usage: {inputTokens: 100, outputTokens: 50},
188 | stripeCustomerId: 'cus_123',
189 | };
190 |
191 | await sendMeterEventsToStripe(mockStripe, config, event);
192 |
193 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
194 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
195 | });
196 |
197 | it('should remove -latest suffix', async () => {
198 | const config: MeterConfig = {};
199 | const event: UsageEvent = {
200 | model: 'claude-3-opus-latest',
201 | provider: 'anthropic',
202 | usage: {inputTokens: 100, outputTokens: 50},
203 | stripeCustomerId: 'cus_123',
204 | };
205 |
206 | await sendMeterEventsToStripe(mockStripe, config, event);
207 |
208 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
209 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
210 | });
211 |
212 | it('should convert version numbers (3-5 to 3.5)', async () => {
213 | const config: MeterConfig = {};
214 | const event: UsageEvent = {
215 | model: 'claude-3-5-sonnet-20241022',
216 | provider: 'anthropic',
217 | usage: {inputTokens: 100, outputTokens: 50},
218 | stripeCustomerId: 'cus_123',
219 | };
220 |
221 | await sendMeterEventsToStripe(mockStripe, config, event);
222 |
223 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
224 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
225 | });
226 |
227 | it('should handle latest suffix before date suffix', async () => {
228 | const config: MeterConfig = {};
229 | const event: UsageEvent = {
230 | model: 'claude-3-opus-latest-20240229',
231 | provider: 'anthropic',
232 | usage: {inputTokens: 100, outputTokens: 50},
233 | stripeCustomerId: 'cus_123',
234 | };
235 |
236 | await sendMeterEventsToStripe(mockStripe, config, event);
237 |
238 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
239 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
240 | });
241 |
242 | it('should handle version numbers + date suffix', async () => {
243 | const config: MeterConfig = {};
244 | const event: UsageEvent = {
245 | model: 'claude-3-5-sonnet-20241022',
246 | provider: 'anthropic',
247 | usage: {inputTokens: 100, outputTokens: 50},
248 | stripeCustomerId: 'cus_123',
249 | };
250 |
251 | await sendMeterEventsToStripe(mockStripe, config, event);
252 |
253 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
254 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
255 | });
256 |
257 | it('should handle version numbers + latest suffix', async () => {
258 | const config: MeterConfig = {};
259 | const event: UsageEvent = {
260 | model: 'claude-3-5-sonnet-latest',
261 | provider: 'anthropic',
262 | usage: {inputTokens: 100, outputTokens: 50},
263 | stripeCustomerId: 'cus_123',
264 | };
265 |
266 | await sendMeterEventsToStripe(mockStripe, config, event);
267 |
268 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
269 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
270 | });
271 |
272 | it('should handle haiku model', async () => {
273 | const config: MeterConfig = {};
274 | const event: UsageEvent = {
275 | model: 'claude-3-5-haiku-20241022',
276 | provider: 'anthropic',
277 | usage: {inputTokens: 100, outputTokens: 50},
278 | stripeCustomerId: 'cus_123',
279 | };
280 |
281 | await sendMeterEventsToStripe(mockStripe, config, event);
282 |
283 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
284 | expect(call.payload.model).toBe('anthropic/claude-3.5-haiku');
285 | });
286 |
287 | it('should handle model without any suffixes', async () => {
288 | const config: MeterConfig = {};
289 | const event: UsageEvent = {
290 | model: 'claude-3-opus',
291 | provider: 'anthropic',
292 | usage: {inputTokens: 100, outputTokens: 50},
293 | stripeCustomerId: 'cus_123',
294 | };
295 |
296 | await sendMeterEventsToStripe(mockStripe, config, event);
297 |
298 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
299 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
300 | });
301 |
302 | it('should handle claude-2 models', async () => {
303 | const config: MeterConfig = {};
304 | const event: UsageEvent = {
305 | model: 'claude-2-1-20231120',
306 | provider: 'anthropic',
307 | usage: {inputTokens: 100, outputTokens: 50},
308 | stripeCustomerId: 'cus_123',
309 | };
310 |
311 | await sendMeterEventsToStripe(mockStripe, config, event);
312 |
313 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
314 | expect(call.payload.model).toBe('anthropic/claude-2.1');
315 | });
316 |
317 | it('should handle future version numbers (4-0)', async () => {
318 | const config: MeterConfig = {};
319 | const event: UsageEvent = {
320 | model: 'claude-4-0-sonnet-20251231',
321 | provider: 'anthropic',
322 | usage: {inputTokens: 100, outputTokens: 50},
323 | stripeCustomerId: 'cus_123',
324 | };
325 |
326 | await sendMeterEventsToStripe(mockStripe, config, event);
327 |
328 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
329 | expect(call.payload.model).toBe('anthropic/claude-4.0-sonnet');
330 | });
331 | });
332 |
333 | describe('Model Name Normalization - OpenAI', () => {
334 | it('should keep gpt-4o-2024-05-13 as-is (special exception)', async () => {
335 | const config: MeterConfig = {};
336 | const event: UsageEvent = {
337 | model: 'gpt-4o-2024-05-13',
338 | provider: 'openai',
339 | usage: {inputTokens: 100, outputTokens: 50},
340 | stripeCustomerId: 'cus_123',
341 | };
342 |
343 | await sendMeterEventsToStripe(mockStripe, config, event);
344 |
345 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
346 | expect(call.payload.model).toBe('openai/gpt-4o-2024-05-13');
347 | });
348 |
349 | it('should remove date suffix from gpt-4-turbo', async () => {
350 | const config: MeterConfig = {};
351 | const event: UsageEvent = {
352 | model: 'gpt-4-turbo-2024-04-09',
353 | provider: 'openai',
354 | usage: {inputTokens: 100, outputTokens: 50},
355 | stripeCustomerId: 'cus_123',
356 | };
357 |
358 | await sendMeterEventsToStripe(mockStripe, config, event);
359 |
360 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
361 | expect(call.payload.model).toBe('openai/gpt-4-turbo');
362 | });
363 |
364 | it('should remove date suffix from gpt-4o-mini', async () => {
365 | const config: MeterConfig = {};
366 | const event: UsageEvent = {
367 | model: 'gpt-4o-mini-2024-07-18',
368 | provider: 'openai',
369 | usage: {inputTokens: 100, outputTokens: 50},
370 | stripeCustomerId: 'cus_123',
371 | };
372 |
373 | await sendMeterEventsToStripe(mockStripe, config, event);
374 |
375 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
376 | expect(call.payload.model).toBe('openai/gpt-4o-mini');
377 | });
378 |
379 | it('should NOT remove short date codes (MMDD format)', async () => {
380 | const config: MeterConfig = {};
381 | const event: UsageEvent = {
382 | model: 'gpt-4-0613',
383 | provider: 'openai',
384 | usage: {inputTokens: 100, outputTokens: 50},
385 | stripeCustomerId: 'cus_123',
386 | };
387 |
388 | await sendMeterEventsToStripe(mockStripe, config, event);
389 |
390 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
391 | // Short date codes like -0613 are NOT in YYYY-MM-DD format, so they stay
392 | expect(call.payload.model).toBe('openai/gpt-4-0613');
393 | });
394 |
395 | it('should keep gpt-4 without date as-is', async () => {
396 | const config: MeterConfig = {};
397 | const event: UsageEvent = {
398 | model: 'gpt-4',
399 | provider: 'openai',
400 | usage: {inputTokens: 100, outputTokens: 50},
401 | stripeCustomerId: 'cus_123',
402 | };
403 |
404 | await sendMeterEventsToStripe(mockStripe, config, event);
405 |
406 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
407 | expect(call.payload.model).toBe('openai/gpt-4');
408 | });
409 |
410 | it('should keep gpt-3.5-turbo without date as-is', async () => {
411 | const config: MeterConfig = {};
412 | const event: UsageEvent = {
413 | model: 'gpt-3.5-turbo',
414 | provider: 'openai',
415 | usage: {inputTokens: 100, outputTokens: 50},
416 | stripeCustomerId: 'cus_123',
417 | };
418 |
419 | await sendMeterEventsToStripe(mockStripe, config, event);
420 |
421 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
422 | expect(call.payload.model).toBe('openai/gpt-3.5-turbo');
423 | });
424 |
425 | it('should NOT remove short date codes from gpt-3.5-turbo', async () => {
426 | const config: MeterConfig = {};
427 | const event: UsageEvent = {
428 | model: 'gpt-3.5-turbo-0125',
429 | provider: 'openai',
430 | usage: {inputTokens: 100, outputTokens: 50},
431 | stripeCustomerId: 'cus_123',
432 | };
433 |
434 | await sendMeterEventsToStripe(mockStripe, config, event);
435 |
436 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
437 | // Short date codes like -0125 are NOT in YYYY-MM-DD format, so they stay
438 | expect(call.payload.model).toBe('openai/gpt-3.5-turbo-0125');
439 | });
440 |
441 | it('should handle o1-preview model', async () => {
442 | const config: MeterConfig = {};
443 | const event: UsageEvent = {
444 | model: 'o1-preview-2024-09-12',
445 | provider: 'openai',
446 | usage: {inputTokens: 100, outputTokens: 50},
447 | stripeCustomerId: 'cus_123',
448 | };
449 |
450 | await sendMeterEventsToStripe(mockStripe, config, event);
451 |
452 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
453 | expect(call.payload.model).toBe('openai/o1-preview');
454 | });
455 |
456 | it('should handle o1-mini model', async () => {
457 | const config: MeterConfig = {};
458 | const event: UsageEvent = {
459 | model: 'o1-mini-2024-09-12',
460 | provider: 'openai',
461 | usage: {inputTokens: 100, outputTokens: 50},
462 | stripeCustomerId: 'cus_123',
463 | };
464 |
465 | await sendMeterEventsToStripe(mockStripe, config, event);
466 |
467 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
468 | expect(call.payload.model).toBe('openai/o1-mini');
469 | });
470 |
471 | it('should NOT remove 4-digit dates (not in YYYY-MM-DD format)', async () => {
472 | const config: MeterConfig = {};
473 | const event: UsageEvent = {
474 | model: 'gpt-4-0314',
475 | provider: 'openai',
476 | usage: {inputTokens: 100, outputTokens: 50},
477 | stripeCustomerId: 'cus_123',
478 | };
479 |
480 | await sendMeterEventsToStripe(mockStripe, config, event);
481 |
482 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
483 | expect(call.payload.model).toBe('openai/gpt-4-0314');
484 | });
485 | });
486 |
487 | describe('Model Name Normalization - Google', () => {
488 | it('should keep gemini-1.5-pro as-is', async () => {
489 | const config: MeterConfig = {};
490 | const event: UsageEvent = {
491 | model: 'gemini-1.5-pro',
492 | provider: 'google',
493 | usage: {inputTokens: 100, outputTokens: 50},
494 | stripeCustomerId: 'cus_123',
495 | };
496 |
497 | await sendMeterEventsToStripe(mockStripe, config, event);
498 |
499 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
500 | expect(call.payload.model).toBe('google/gemini-1.5-pro');
501 | });
502 |
503 | it('should keep gemini-2.5-flash as-is', async () => {
504 | const config: MeterConfig = {};
505 | const event: UsageEvent = {
506 | model: 'gemini-2.5-flash',
507 | provider: 'google',
508 | usage: {inputTokens: 100, outputTokens: 50},
509 | stripeCustomerId: 'cus_123',
510 | };
511 |
512 | await sendMeterEventsToStripe(mockStripe, config, event);
513 |
514 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
515 | expect(call.payload.model).toBe('google/gemini-2.5-flash');
516 | });
517 |
518 | it('should keep gemini-pro as-is', async () => {
519 | const config: MeterConfig = {};
520 | const event: UsageEvent = {
521 | model: 'gemini-pro',
522 | provider: 'google',
523 | usage: {inputTokens: 100, outputTokens: 50},
524 | stripeCustomerId: 'cus_123',
525 | };
526 |
527 | await sendMeterEventsToStripe(mockStripe, config, event);
528 |
529 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
530 | expect(call.payload.model).toBe('google/gemini-pro');
531 | });
532 |
533 | it('should keep any Google model name as-is (even with dates)', async () => {
534 | const config: MeterConfig = {};
535 | const event: UsageEvent = {
536 | model: 'gemini-1.5-pro-20241201',
537 | provider: 'google',
538 | usage: {inputTokens: 100, outputTokens: 50},
539 | stripeCustomerId: 'cus_123',
540 | };
541 |
542 | await sendMeterEventsToStripe(mockStripe, config, event);
543 |
544 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
545 | expect(call.payload.model).toBe('google/gemini-1.5-pro-20241201');
546 | });
547 | });
548 | });
549 |
550 | describe('logUsageEvent', () => {
551 | let mockStripe: jest.Mocked<any>;
552 |
553 | beforeEach(() => {
554 | jest.clearAllMocks();
555 |
556 | mockStripe = {
557 | v2: {
558 | billing: {
559 | meterEvents: {
560 | create: jest.fn().mockResolvedValue({}),
561 | },
562 | },
563 | },
564 | };
565 |
566 | (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
567 | });
568 |
569 | it('should call sendMeterEventsToStripe', () => {
570 | const config: MeterConfig = {};
571 |
572 | const event: UsageEvent = {
573 | model: 'gpt-4',
574 | provider: 'openai',
575 | usage: {
576 | inputTokens: 100,
577 | outputTokens: 50,
578 | },
579 | stripeCustomerId: 'cus_123',
580 | };
581 |
582 | // logUsageEvent is fire-and-forget, so we just ensure it doesn't throw
583 | expect(() => logUsageEvent(mockStripe, config, event)).not.toThrow();
584 | });
585 | });
586 |
587 |
```
--------------------------------------------------------------------------------
/llm/ai-sdk/meter/tests/meter-event-logging.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for meter event logging utilities
3 | */
4 |
5 | import Stripe from 'stripe';
6 | import {logUsageEvent, sendMeterEventsToStripe} from '../meter-event-logging';
7 | import type {UsageEvent, MeterConfig} from '../meter-event-types';
8 |
9 | // Mock Stripe
10 | jest.mock('stripe');
11 |
12 | describe('sendMeterEventsToStripe', () => {
13 | let mockStripe: jest.Mocked<any>;
14 | let consoleErrorSpy: jest.SpyInstance;
15 | let consoleLogSpy: jest.SpyInstance;
16 |
17 | beforeEach(() => {
18 | jest.clearAllMocks();
19 |
20 | mockStripe = {
21 | v2: {
22 | billing: {
23 | meterEvents: {
24 | create: jest.fn().mockResolvedValue({}),
25 | },
26 | },
27 | },
28 | };
29 |
30 | (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
31 |
32 | consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
33 | consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
34 | });
35 |
36 | afterEach(() => {
37 | consoleErrorSpy.mockRestore();
38 | consoleLogSpy.mockRestore();
39 | });
40 |
41 | it('should send meter events to Stripe', async () => {
42 | const config: MeterConfig = {};
43 |
44 | const event: UsageEvent = {
45 | model: 'gpt-4',
46 | provider: 'openai',
47 | usage: {
48 | inputTokens: 100,
49 | outputTokens: 50,
50 | },
51 | stripeCustomerId: 'cus_123',
52 | };
53 |
54 | await sendMeterEventsToStripe(mockStripe, config, event);
55 |
56 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(2);
57 | });
58 |
59 | it('should send separate events for input and output tokens', async () => {
60 | const config: MeterConfig = {};
61 |
62 | const event: UsageEvent = {
63 | model: 'gpt-4',
64 | provider: 'openai',
65 | usage: {
66 | inputTokens: 100,
67 | outputTokens: 50,
68 | },
69 | stripeCustomerId: 'cus_123',
70 | };
71 |
72 | await sendMeterEventsToStripe(mockStripe, config, event);
73 |
74 | const calls = mockStripe.v2.billing.meterEvents.create.mock.calls;
75 | expect(calls[0][0]).toMatchObject({
76 | event_name: 'token-billing-tokens',
77 | payload: {
78 | stripe_customer_id: 'cus_123',
79 | value: '100',
80 | model: 'openai/gpt-4',
81 | token_type: 'input',
82 | },
83 | });
84 | expect(calls[1][0]).toMatchObject({
85 | event_name: 'token-billing-tokens',
86 | payload: {
87 | stripe_customer_id: 'cus_123',
88 | value: '50',
89 | model: 'openai/gpt-4',
90 | token_type: 'output',
91 | },
92 | });
93 | });
94 |
95 | it('should handle zero input tokens', async () => {
96 | const config: MeterConfig = {};
97 |
98 | const event: UsageEvent = {
99 | model: 'gpt-4',
100 | provider: 'openai',
101 | usage: {
102 | inputTokens: 0,
103 | outputTokens: 50,
104 | },
105 | stripeCustomerId: 'cus_123',
106 | };
107 |
108 | await sendMeterEventsToStripe(mockStripe, config, event);
109 |
110 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
111 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
112 | expect(call.payload.token_type).toBe('output');
113 | });
114 |
115 | it('should handle zero output tokens', async () => {
116 | const config: MeterConfig = {};
117 |
118 | const event: UsageEvent = {
119 | model: 'gpt-4',
120 | provider: 'openai',
121 | usage: {
122 | inputTokens: 100,
123 | outputTokens: 0,
124 | },
125 | stripeCustomerId: 'cus_123',
126 | };
127 |
128 | await sendMeterEventsToStripe(mockStripe, config, event);
129 |
130 | expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
131 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
132 | expect(call.payload.token_type).toBe('input');
133 | });
134 |
135 | it('should handle Stripe API errors gracefully', async () => {
136 | mockStripe.v2.billing.meterEvents.create.mockRejectedValue(
137 | new Error('API Error')
138 | );
139 |
140 | const config: MeterConfig = {};
141 |
142 | const event: UsageEvent = {
143 | model: 'gpt-4',
144 | provider: 'openai',
145 | usage: {
146 | inputTokens: 100,
147 | outputTokens: 50,
148 | },
149 | stripeCustomerId: 'cus_123',
150 | };
151 |
152 | await sendMeterEventsToStripe(mockStripe, config, event);
153 |
154 | expect(consoleErrorSpy).toHaveBeenCalledWith(
155 | 'Error sending meter events to Stripe:',
156 | expect.any(Error)
157 | );
158 | });
159 |
160 | it('should include proper timestamp format', async () => {
161 | const config: MeterConfig = {};
162 |
163 | const event: UsageEvent = {
164 | model: 'gpt-4',
165 | provider: 'openai',
166 | usage: {
167 | inputTokens: 100,
168 | outputTokens: 50,
169 | },
170 | stripeCustomerId: 'cus_123',
171 | };
172 |
173 | await sendMeterEventsToStripe(mockStripe, config, event);
174 |
175 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
176 | expect(call.timestamp).toMatch(
177 | /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/
178 | );
179 | });
180 |
181 | describe('Model Name Normalization - Anthropic', () => {
182 | it('should remove date suffix (YYYYMMDD)', async () => {
183 | const config: MeterConfig = {};
184 | const event: UsageEvent = {
185 | model: 'claude-3-opus-20240229',
186 | provider: 'anthropic',
187 | usage: {inputTokens: 100, outputTokens: 50},
188 | stripeCustomerId: 'cus_123',
189 | };
190 |
191 | await sendMeterEventsToStripe(mockStripe, config, event);
192 |
193 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
194 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
195 | });
196 |
197 | it('should remove -latest suffix', async () => {
198 | const config: MeterConfig = {};
199 | const event: UsageEvent = {
200 | model: 'claude-3-opus-latest',
201 | provider: 'anthropic',
202 | usage: {inputTokens: 100, outputTokens: 50},
203 | stripeCustomerId: 'cus_123',
204 | };
205 |
206 | await sendMeterEventsToStripe(mockStripe, config, event);
207 |
208 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
209 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
210 | });
211 |
212 | it('should convert version numbers (3-5 to 3.5)', async () => {
213 | const config: MeterConfig = {};
214 | const event: UsageEvent = {
215 | model: 'claude-3-5-sonnet-20241022',
216 | provider: 'anthropic',
217 | usage: {inputTokens: 100, outputTokens: 50},
218 | stripeCustomerId: 'cus_123',
219 | };
220 |
221 | await sendMeterEventsToStripe(mockStripe, config, event);
222 |
223 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
224 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
225 | });
226 |
227 | it('should handle latest suffix before date suffix', async () => {
228 | const config: MeterConfig = {};
229 | const event: UsageEvent = {
230 | model: 'claude-3-opus-latest-20240229',
231 | provider: 'anthropic',
232 | usage: {inputTokens: 100, outputTokens: 50},
233 | stripeCustomerId: 'cus_123',
234 | };
235 |
236 | await sendMeterEventsToStripe(mockStripe, config, event);
237 |
238 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
239 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
240 | });
241 |
242 | it('should handle version numbers + date suffix', async () => {
243 | const config: MeterConfig = {};
244 | const event: UsageEvent = {
245 | model: 'claude-3-5-sonnet-20241022',
246 | provider: 'anthropic',
247 | usage: {inputTokens: 100, outputTokens: 50},
248 | stripeCustomerId: 'cus_123',
249 | };
250 |
251 | await sendMeterEventsToStripe(mockStripe, config, event);
252 |
253 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
254 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
255 | });
256 |
257 | it('should handle version numbers + latest suffix', async () => {
258 | const config: MeterConfig = {};
259 | const event: UsageEvent = {
260 | model: 'claude-3-5-sonnet-latest',
261 | provider: 'anthropic',
262 | usage: {inputTokens: 100, outputTokens: 50},
263 | stripeCustomerId: 'cus_123',
264 | };
265 |
266 | await sendMeterEventsToStripe(mockStripe, config, event);
267 |
268 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
269 | expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
270 | });
271 |
272 | it('should handle haiku model', async () => {
273 | const config: MeterConfig = {};
274 | const event: UsageEvent = {
275 | model: 'claude-3-5-haiku-20241022',
276 | provider: 'anthropic',
277 | usage: {inputTokens: 100, outputTokens: 50},
278 | stripeCustomerId: 'cus_123',
279 | };
280 |
281 | await sendMeterEventsToStripe(mockStripe, config, event);
282 |
283 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
284 | expect(call.payload.model).toBe('anthropic/claude-3.5-haiku');
285 | });
286 |
287 | it('should handle model without any suffixes', async () => {
288 | const config: MeterConfig = {};
289 | const event: UsageEvent = {
290 | model: 'claude-3-opus',
291 | provider: 'anthropic',
292 | usage: {inputTokens: 100, outputTokens: 50},
293 | stripeCustomerId: 'cus_123',
294 | };
295 |
296 | await sendMeterEventsToStripe(mockStripe, config, event);
297 |
298 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
299 | expect(call.payload.model).toBe('anthropic/claude-3-opus');
300 | });
301 |
302 | it('should handle claude-2 models', async () => {
303 | const config: MeterConfig = {};
304 | const event: UsageEvent = {
305 | model: 'claude-2-1-20231120',
306 | provider: 'anthropic',
307 | usage: {inputTokens: 100, outputTokens: 50},
308 | stripeCustomerId: 'cus_123',
309 | };
310 |
311 | await sendMeterEventsToStripe(mockStripe, config, event);
312 |
313 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
314 | expect(call.payload.model).toBe('anthropic/claude-2.1');
315 | });
316 |
317 | it('should handle future version numbers (4-0)', async () => {
318 | const config: MeterConfig = {};
319 | const event: UsageEvent = {
320 | model: 'claude-4-0-sonnet-20251231',
321 | provider: 'anthropic',
322 | usage: {inputTokens: 100, outputTokens: 50},
323 | stripeCustomerId: 'cus_123',
324 | };
325 |
326 | await sendMeterEventsToStripe(mockStripe, config, event);
327 |
328 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
329 | expect(call.payload.model).toBe('anthropic/claude-4.0-sonnet');
330 | });
331 | });
332 |
333 | describe('Model Name Normalization - OpenAI', () => {
334 | it('should keep gpt-4o-2024-05-13 as-is (special exception)', async () => {
335 | const config: MeterConfig = {};
336 | const event: UsageEvent = {
337 | model: 'gpt-4o-2024-05-13',
338 | provider: 'openai',
339 | usage: {inputTokens: 100, outputTokens: 50},
340 | stripeCustomerId: 'cus_123',
341 | };
342 |
343 | await sendMeterEventsToStripe(mockStripe, config, event);
344 |
345 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
346 | expect(call.payload.model).toBe('openai/gpt-4o-2024-05-13');
347 | });
348 |
349 | it('should remove date suffix from gpt-4-turbo', async () => {
350 | const config: MeterConfig = {};
351 | const event: UsageEvent = {
352 | model: 'gpt-4-turbo-2024-04-09',
353 | provider: 'openai',
354 | usage: {inputTokens: 100, outputTokens: 50},
355 | stripeCustomerId: 'cus_123',
356 | };
357 |
358 | await sendMeterEventsToStripe(mockStripe, config, event);
359 |
360 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
361 | expect(call.payload.model).toBe('openai/gpt-4-turbo');
362 | });
363 |
364 | it('should remove date suffix from gpt-4o-mini', async () => {
365 | const config: MeterConfig = {};
366 | const event: UsageEvent = {
367 | model: 'gpt-4o-mini-2024-07-18',
368 | provider: 'openai',
369 | usage: {inputTokens: 100, outputTokens: 50},
370 | stripeCustomerId: 'cus_123',
371 | };
372 |
373 | await sendMeterEventsToStripe(mockStripe, config, event);
374 |
375 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
376 | expect(call.payload.model).toBe('openai/gpt-4o-mini');
377 | });
378 |
379 | it('should NOT remove short date codes (MMDD format)', async () => {
380 | const config: MeterConfig = {};
381 | const event: UsageEvent = {
382 | model: 'gpt-4-0613',
383 | provider: 'openai',
384 | usage: {inputTokens: 100, outputTokens: 50},
385 | stripeCustomerId: 'cus_123',
386 | };
387 |
388 | await sendMeterEventsToStripe(mockStripe, config, event);
389 |
390 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
391 | // Short date codes like -0613 are NOT in YYYY-MM-DD format, so they stay
392 | expect(call.payload.model).toBe('openai/gpt-4-0613');
393 | });
394 |
395 | it('should keep gpt-4 without date as-is', async () => {
396 | const config: MeterConfig = {};
397 | const event: UsageEvent = {
398 | model: 'gpt-4',
399 | provider: 'openai',
400 | usage: {inputTokens: 100, outputTokens: 50},
401 | stripeCustomerId: 'cus_123',
402 | };
403 |
404 | await sendMeterEventsToStripe(mockStripe, config, event);
405 |
406 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
407 | expect(call.payload.model).toBe('openai/gpt-4');
408 | });
409 |
410 | it('should keep gpt-3.5-turbo without date as-is', async () => {
411 | const config: MeterConfig = {};
412 | const event: UsageEvent = {
413 | model: 'gpt-3.5-turbo',
414 | provider: 'openai',
415 | usage: {inputTokens: 100, outputTokens: 50},
416 | stripeCustomerId: 'cus_123',
417 | };
418 |
419 | await sendMeterEventsToStripe(mockStripe, config, event);
420 |
421 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
422 | expect(call.payload.model).toBe('openai/gpt-3.5-turbo');
423 | });
424 |
425 | it('should NOT remove short date codes from gpt-3.5-turbo', async () => {
426 | const config: MeterConfig = {};
427 | const event: UsageEvent = {
428 | model: 'gpt-3.5-turbo-0125',
429 | provider: 'openai',
430 | usage: {inputTokens: 100, outputTokens: 50},
431 | stripeCustomerId: 'cus_123',
432 | };
433 |
434 | await sendMeterEventsToStripe(mockStripe, config, event);
435 |
436 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
437 | // Short date codes like -0125 are NOT in YYYY-MM-DD format, so they stay
438 | expect(call.payload.model).toBe('openai/gpt-3.5-turbo-0125');
439 | });
440 |
441 | it('should handle o1-preview model', async () => {
442 | const config: MeterConfig = {};
443 | const event: UsageEvent = {
444 | model: 'o1-preview-2024-09-12',
445 | provider: 'openai',
446 | usage: {inputTokens: 100, outputTokens: 50},
447 | stripeCustomerId: 'cus_123',
448 | };
449 |
450 | await sendMeterEventsToStripe(mockStripe, config, event);
451 |
452 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
453 | expect(call.payload.model).toBe('openai/o1-preview');
454 | });
455 |
456 | it('should handle o1-mini model', async () => {
457 | const config: MeterConfig = {};
458 | const event: UsageEvent = {
459 | model: 'o1-mini-2024-09-12',
460 | provider: 'openai',
461 | usage: {inputTokens: 100, outputTokens: 50},
462 | stripeCustomerId: 'cus_123',
463 | };
464 |
465 | await sendMeterEventsToStripe(mockStripe, config, event);
466 |
467 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
468 | expect(call.payload.model).toBe('openai/o1-mini');
469 | });
470 |
471 | it('should NOT remove 4-digit dates (not in YYYY-MM-DD format)', async () => {
472 | const config: MeterConfig = {};
473 | const event: UsageEvent = {
474 | model: 'gpt-4-0314',
475 | provider: 'openai',
476 | usage: {inputTokens: 100, outputTokens: 50},
477 | stripeCustomerId: 'cus_123',
478 | };
479 |
480 | await sendMeterEventsToStripe(mockStripe, config, event);
481 |
482 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
483 | expect(call.payload.model).toBe('openai/gpt-4-0314');
484 | });
485 | });
486 |
487 | describe('Model Name Normalization - Google', () => {
488 | it('should keep gemini-1.5-pro as-is', async () => {
489 | const config: MeterConfig = {};
490 | const event: UsageEvent = {
491 | model: 'gemini-1.5-pro',
492 | provider: 'google',
493 | usage: {inputTokens: 100, outputTokens: 50},
494 | stripeCustomerId: 'cus_123',
495 | };
496 |
497 | await sendMeterEventsToStripe(mockStripe, config, event);
498 |
499 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
500 | expect(call.payload.model).toBe('google/gemini-1.5-pro');
501 | });
502 |
503 | it('should keep gemini-2.5-flash as-is', async () => {
504 | const config: MeterConfig = {};
505 | const event: UsageEvent = {
506 | model: 'gemini-2.5-flash',
507 | provider: 'google',
508 | usage: {inputTokens: 100, outputTokens: 50},
509 | stripeCustomerId: 'cus_123',
510 | };
511 |
512 | await sendMeterEventsToStripe(mockStripe, config, event);
513 |
514 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
515 | expect(call.payload.model).toBe('google/gemini-2.5-flash');
516 | });
517 |
518 | it('should keep gemini-pro as-is', async () => {
519 | const config: MeterConfig = {};
520 | const event: UsageEvent = {
521 | model: 'gemini-pro',
522 | provider: 'google',
523 | usage: {inputTokens: 100, outputTokens: 50},
524 | stripeCustomerId: 'cus_123',
525 | };
526 |
527 | await sendMeterEventsToStripe(mockStripe, config, event);
528 |
529 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
530 | expect(call.payload.model).toBe('google/gemini-pro');
531 | });
532 |
533 | it('should keep any Google model name as-is (even with dates)', async () => {
534 | const config: MeterConfig = {};
535 | const event: UsageEvent = {
536 | model: 'gemini-1.5-pro-20241201',
537 | provider: 'google',
538 | usage: {inputTokens: 100, outputTokens: 50},
539 | stripeCustomerId: 'cus_123',
540 | };
541 |
542 | await sendMeterEventsToStripe(mockStripe, config, event);
543 |
544 | const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
545 | expect(call.payload.model).toBe('google/gemini-1.5-pro-20241201');
546 | });
547 | });
548 | });
549 |
550 | describe('logUsageEvent', () => {
551 | let mockStripe: jest.Mocked<any>;
552 |
553 | beforeEach(() => {
554 | jest.clearAllMocks();
555 |
556 | mockStripe = {
557 | v2: {
558 | billing: {
559 | meterEvents: {
560 | create: jest.fn().mockResolvedValue({}),
561 | },
562 | },
563 | },
564 | };
565 |
566 | (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
567 | });
568 |
569 | it('should call sendMeterEventsToStripe', () => {
570 | const config: MeterConfig = {};
571 |
572 | const event: UsageEvent = {
573 | model: 'gpt-4',
574 | provider: 'openai',
575 | usage: {
576 | inputTokens: 100,
577 | outputTokens: 50,
578 | },
579 | stripeCustomerId: 'cus_123',
580 | };
581 |
582 | // logUsageEvent is fire-and-forget, so we just ensure it doesn't throw
583 | expect(() => logUsageEvent(mockStripe, config, event)).not.toThrow();
584 | });
585 | });
586 |
587 |
```
--------------------------------------------------------------------------------
/llm/ai-sdk/provider/tests/stripe-language-model.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Stripe Language Model implementation
3 | */
4 |
5 | import {StripeLanguageModel, StripeProviderAccessError} from '../stripe-language-model';
6 | import type {LanguageModelV2CallOptions} from '@ai-sdk/provider';
7 |
8 | describe('StripeLanguageModel', () => {
9 | let model: StripeLanguageModel;
10 |
11 | beforeEach(() => {
12 | model = new StripeLanguageModel(
13 | 'openai/gpt-5',
14 | {customerId: 'cus_test123'},
15 | {
16 | provider: 'stripe',
17 | baseURL: 'https://llm.stripe.com',
18 | headers: () => ({
19 | 'Content-Type': 'application/json',
20 | Authorization: 'Bearer sk_test_123',
21 | }),
22 | }
23 | );
24 | });
25 |
26 | describe('constructor', () => {
27 | it('should initialize with correct properties', () => {
28 | expect(model.specificationVersion).toBe('v2');
29 | expect(model.provider).toBe('stripe');
30 | expect(model.modelId).toBe('openai/gpt-5');
31 | });
32 |
33 | it('should support different model IDs', () => {
34 | const models = [
35 | 'openai/gpt-5',
36 | 'google/gemini-2.5-pro',
37 | 'anthropic/claude-sonnet-4',
38 | ];
39 |
40 | models.forEach((modelId) => {
41 | const m = new StripeLanguageModel(
42 | modelId,
43 | {customerId: 'cus_test'},
44 | {
45 | provider: 'stripe',
46 | baseURL: 'https://llm.stripe.com',
47 | headers: () => ({}),
48 | }
49 | );
50 | expect(m.modelId).toBe(modelId);
51 | });
52 | });
53 | });
54 |
55 | describe('supportedUrls', () => {
56 | it('should return empty object (no native URL support)', () => {
57 | expect(model.supportedUrls).toEqual({});
58 | });
59 | });
60 |
61 | describe('getHeaders', () => {
62 | it('should throw error when customer ID is not provided', () => {
63 | const modelWithoutCustomer = new StripeLanguageModel(
64 | 'openai/gpt-5',
65 | {}, // No customer ID
66 | {
67 | provider: 'stripe',
68 | baseURL: 'https://llm.stripe.com',
69 | headers: () => ({
70 | Authorization: 'Bearer sk_test_123',
71 | }),
72 | }
73 | );
74 |
75 | const options: LanguageModelV2CallOptions = {
76 | prompt: [],
77 | };
78 |
79 | expect(() => {
80 | // @ts-expect-error - Accessing private method for testing
81 | modelWithoutCustomer.getHeaders(options);
82 | }).toThrow('Stripe customer ID is required');
83 | });
84 |
85 | it('should use customer ID from settings', () => {
86 | const options: LanguageModelV2CallOptions = {
87 | prompt: [],
88 | };
89 |
90 | // @ts-expect-error - Accessing private method for testing
91 | const headers = model.getHeaders(options);
92 |
93 | expect(headers['X-Stripe-Customer-ID']).toBe('cus_test123');
94 | });
95 |
96 | it('should override customer ID from providerOptions', () => {
97 | const options: LanguageModelV2CallOptions = {
98 | prompt: [],
99 | providerOptions: {
100 | stripe: {
101 | customerId: 'cus_override',
102 | },
103 | },
104 | };
105 |
106 | // @ts-expect-error - Accessing private method for testing
107 | const headers = model.getHeaders(options);
108 |
109 | expect(headers['X-Stripe-Customer-ID']).toBe('cus_override');
110 | });
111 |
112 | it('should merge custom headers', () => {
113 | const modelWithHeaders = new StripeLanguageModel(
114 | 'openai/gpt-5',
115 | {
116 | customerId: 'cus_test',
117 | headers: {'X-Custom-Header': 'custom-value'},
118 | },
119 | {
120 | provider: 'stripe',
121 | baseURL: 'https://llm.stripe.com',
122 | headers: () => ({
123 | Authorization: 'Bearer sk_test_123',
124 | }),
125 | }
126 | );
127 |
128 | const options: LanguageModelV2CallOptions = {
129 | prompt: [],
130 | providerOptions: {
131 | stripe: {
132 | headers: {'X-Runtime-Header': 'runtime-value'},
133 | },
134 | },
135 | };
136 |
137 | // @ts-expect-error - Accessing private method for testing
138 | const headers = modelWithHeaders.getHeaders(options);
139 |
140 | expect(headers['X-Custom-Header']).toBe('custom-value');
141 | expect(headers['X-Runtime-Header']).toBe('runtime-value');
142 | expect(headers['X-Stripe-Customer-ID']).toBe('cus_test');
143 | });
144 | });
145 |
146 | describe('getArgs', () => {
147 | it('should convert basic prompt to OpenAI format', () => {
148 | const options: LanguageModelV2CallOptions = {
149 | prompt: [
150 | {
151 | role: 'user',
152 | content: [{type: 'text', text: 'Hello'}],
153 | },
154 | ],
155 | };
156 |
157 | // @ts-expect-error - Accessing private method for testing
158 | const {args, warnings} = model.getArgs(options);
159 |
160 | expect(args.model).toBe('openai/gpt-5');
161 | expect(args.messages).toHaveLength(1);
162 | expect(args.messages[0].role).toBe('user');
163 | expect(warnings).toEqual([]);
164 | });
165 |
166 | it('should include temperature setting', () => {
167 | const options: LanguageModelV2CallOptions = {
168 | prompt: [],
169 | temperature: 0.7,
170 | };
171 |
172 | // @ts-expect-error - Accessing private method for testing
173 | const {args} = model.getArgs(options);
174 |
175 | expect(args.temperature).toBe(0.7);
176 | });
177 |
178 | it('should include max_tokens setting', () => {
179 | const options: LanguageModelV2CallOptions = {
180 | prompt: [],
181 | maxOutputTokens: 100,
182 | };
183 |
184 | // @ts-expect-error - Accessing private method for testing
185 | const {args} = model.getArgs(options);
186 |
187 | expect(args.max_tokens).toBe(100);
188 | });
189 |
190 | it('should include stop sequences', () => {
191 | const options: LanguageModelV2CallOptions = {
192 | prompt: [],
193 | stopSequences: ['\n', 'END'],
194 | };
195 |
196 | // @ts-expect-error - Accessing private method for testing
197 | const {args} = model.getArgs(options);
198 |
199 | expect(args.stop).toEqual(['\n', 'END']);
200 | });
201 |
202 | it('should include topP, frequencyPenalty, and presencePenalty', () => {
203 | const options: LanguageModelV2CallOptions = {
204 | prompt: [],
205 | topP: 0.9,
206 | frequencyPenalty: 0.5,
207 | presencePenalty: 0.3,
208 | };
209 |
210 | // @ts-expect-error - Accessing private method for testing
211 | const {args} = model.getArgs(options);
212 |
213 | expect(args.top_p).toBe(0.9);
214 | expect(args.frequency_penalty).toBe(0.5);
215 | expect(args.presence_penalty).toBe(0.3);
216 | });
217 |
218 | it('should include seed when provided', () => {
219 | const options: LanguageModelV2CallOptions = {
220 | prompt: [],
221 | seed: 12345,
222 | };
223 |
224 | // @ts-expect-error - Accessing private method for testing
225 | const {args} = model.getArgs(options);
226 |
227 | expect(args.seed).toBe(12345);
228 | });
229 | });
230 |
231 | describe('tools support', () => {
232 | it('should throw error when tools are provided', () => {
233 | const options: LanguageModelV2CallOptions = {
234 | prompt: [],
235 | tools: [
236 | {
237 | type: 'function',
238 | name: 'getWeather',
239 | description: 'Get weather for a location',
240 | inputSchema: {
241 | type: 'object',
242 | properties: {
243 | location: {type: 'string'},
244 | },
245 | required: ['location'],
246 | },
247 | },
248 | ],
249 | };
250 |
251 | expect(() => {
252 | // @ts-expect-error - Accessing private method for testing
253 | model.getArgs(options);
254 | }).toThrow('Tool calling is not supported by the Stripe AI SDK Provider');
255 | });
256 |
257 | it('should throw error when tool choice is provided with tools', () => {
258 | const options: LanguageModelV2CallOptions = {
259 | prompt: [],
260 | tools: [
261 | {
262 | type: 'function',
263 | name: 'test',
264 | inputSchema: {type: 'object', properties: {}},
265 | },
266 | ],
267 | toolChoice: {type: 'auto'},
268 | };
269 |
270 | expect(() => {
271 | // @ts-expect-error - Accessing private method for testing
272 | model.getArgs(options);
273 | }).toThrow('Tool calling is not supported by the Stripe AI SDK Provider');
274 | });
275 |
276 | it('should not throw error when no tools are provided', () => {
277 | const options: LanguageModelV2CallOptions = {
278 | prompt: [
279 | {
280 | role: 'user',
281 | content: [{type: 'text', text: 'Hello'}],
282 | },
283 | ],
284 | };
285 |
286 | expect(() => {
287 | // @ts-expect-error - Accessing private method for testing
288 | model.getArgs(options);
289 | }).not.toThrow();
290 | });
291 | });
292 |
293 | describe('error handling', () => {
294 | it('should handle missing customer ID gracefully', () => {
295 | const modelWithoutCustomer = new StripeLanguageModel(
296 | 'openai/gpt-5',
297 | {},
298 | {
299 | provider: 'stripe',
300 | baseURL: 'https://llm.stripe.com',
301 | headers: () => ({
302 | Authorization: 'Bearer sk_test_123',
303 | }),
304 | }
305 | );
306 |
307 | const options: LanguageModelV2CallOptions = {
308 | prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
309 | };
310 |
311 | expect(() => {
312 | // @ts-expect-error - Accessing private method for testing
313 | modelWithoutCustomer.getHeaders(options);
314 | }).toThrow('Stripe customer ID is required');
315 | });
316 | });
317 |
318 | describe('anthropic max_tokens defaults', () => {
319 | it('should apply 64K default for Claude Sonnet 4 models', () => {
320 | const sonnetModel = new StripeLanguageModel(
321 | 'anthropic/claude-sonnet-4',
322 | {customerId: 'cus_test'},
323 | {
324 | provider: 'stripe',
325 | baseURL: 'https://llm.stripe.com',
326 | headers: () => ({}),
327 | }
328 | );
329 |
330 | const options: LanguageModelV2CallOptions = {
331 | prompt: [],
332 | };
333 |
334 | // @ts-expect-error - Accessing private method for testing
335 | const {args} = sonnetModel.getArgs(options);
336 |
337 | expect(args.max_tokens).toBe(64000);
338 | });
339 |
340 | it('should apply 32K default for Claude Opus 4 models', () => {
341 | const opusModel = new StripeLanguageModel(
342 | 'anthropic/claude-opus-4',
343 | {customerId: 'cus_test'},
344 | {
345 | provider: 'stripe',
346 | baseURL: 'https://llm.stripe.com',
347 | headers: () => ({}),
348 | }
349 | );
350 |
351 | const options: LanguageModelV2CallOptions = {
352 | prompt: [],
353 | };
354 |
355 | // @ts-expect-error - Accessing private method for testing
356 | const {args} = opusModel.getArgs(options);
357 |
358 | expect(args.max_tokens).toBe(32000);
359 | });
360 |
361 | it('should apply 8K default for Claude 3.5 Haiku', () => {
362 | const haikuModel = new StripeLanguageModel(
363 | 'anthropic/claude-3-5-haiku',
364 | {customerId: 'cus_test'},
365 | {
366 | provider: 'stripe',
367 | baseURL: 'https://llm.stripe.com',
368 | headers: () => ({}),
369 | }
370 | );
371 |
372 | const options: LanguageModelV2CallOptions = {
373 | prompt: [],
374 | };
375 |
376 | // @ts-expect-error - Accessing private method for testing
377 | const {args} = haikuModel.getArgs(options);
378 |
379 | expect(args.max_tokens).toBe(8192);
380 | });
381 |
382 | it('should apply 4K default for other Anthropic models', () => {
383 | const haikuModel = new StripeLanguageModel(
384 | 'anthropic/claude-3-haiku',
385 | {customerId: 'cus_test'},
386 | {
387 | provider: 'stripe',
388 | baseURL: 'https://llm.stripe.com',
389 | headers: () => ({}),
390 | }
391 | );
392 |
393 | const options: LanguageModelV2CallOptions = {
394 | prompt: [],
395 | };
396 |
397 | // @ts-expect-error - Accessing private method for testing
398 | const {args} = haikuModel.getArgs(options);
399 |
400 | expect(args.max_tokens).toBe(4096);
401 | });
402 |
403 | it('should not apply default for non-Anthropic models', () => {
404 | const openaiModel = new StripeLanguageModel(
405 | 'openai/gpt-5',
406 | {customerId: 'cus_test'},
407 | {
408 | provider: 'stripe',
409 | baseURL: 'https://llm.stripe.com',
410 | headers: () => ({}),
411 | }
412 | );
413 |
414 | const options: LanguageModelV2CallOptions = {
415 | prompt: [],
416 | };
417 |
418 | // @ts-expect-error - Accessing private method for testing
419 | const {args} = openaiModel.getArgs(options);
420 |
421 | expect(args.max_tokens).toBeUndefined();
422 | });
423 |
424 | it('should allow user-provided maxOutputTokens to override default', () => {
425 | const sonnetModel = new StripeLanguageModel(
426 | 'anthropic/claude-sonnet-4',
427 | {customerId: 'cus_test'},
428 | {
429 | provider: 'stripe',
430 | baseURL: 'https://llm.stripe.com',
431 | headers: () => ({}),
432 | }
433 | );
434 |
435 | const options: LanguageModelV2CallOptions = {
436 | prompt: [],
437 | maxOutputTokens: 1000, // User override
438 | };
439 |
440 | // @ts-expect-error - Accessing private method for testing
441 | const {args} = sonnetModel.getArgs(options);
442 |
443 | expect(args.max_tokens).toBe(1000);
444 | });
445 | });
446 |
447 | describe('access denied error handling', () => {
448 | it('should throw StripeProviderAccessError for "Unrecognized request URL" errors', () => {
449 | // Create a mock error that looks like the access denied error
450 | const mockError = {
451 | statusCode: 400,
452 | responseBody: JSON.stringify({
453 | error: {
454 | type: 'invalid_request_error',
455 | message: 'Unrecognized request URL. Please see https://stripe.com/docs or we can help at https://support.stripe.com/.',
456 | },
457 | }),
458 | message: 'Bad Request',
459 | };
460 |
461 | // Access the private method for testing
462 | const isAccessDenied = (model as any).isAccessDeniedError(mockError);
463 | expect(isAccessDenied).toBe(true);
464 |
465 | // Test that handleApiError throws the correct error type
466 | try {
467 | (model as any).handleApiError(mockError);
468 | fail('Should have thrown an error');
469 | } catch (error) {
470 | expect(error).toBeInstanceOf(StripeProviderAccessError);
471 | expect((error as Error).message).toContain('Stripe AI SDK Provider Access Required');
472 | expect((error as Error).message).toContain('Private Preview');
473 | expect((error as Error).message).toContain('https://docs.stripe.com/billing/token-billing');
474 | expect((error as any).cause).toBe(mockError);
475 | }
476 | });
477 |
478 | it('should not throw StripeProviderAccessError for other 400 errors', () => {
479 | const mockError = {
480 | statusCode: 400,
481 | responseBody: JSON.stringify({
482 | error: {
483 | type: 'invalid_request_error',
484 | message: 'Some other error message',
485 | },
486 | }),
487 | message: 'Bad Request',
488 | };
489 |
490 | const isAccessDenied = (model as any).isAccessDeniedError(mockError);
491 | expect(isAccessDenied).toBe(false);
492 |
493 | // Should re-throw the original error
494 | try {
495 | (model as any).handleApiError(mockError);
496 | fail('Should have thrown an error');
497 | } catch (error) {
498 | expect(error).not.toBeInstanceOf(StripeProviderAccessError);
499 | expect(error).toBe(mockError);
500 | }
501 | });
502 |
503 | it('should handle errors with parsed responseBody', () => {
504 | const mockError = {
505 | statusCode: 400,
506 | responseBody: {
507 | error: {
508 | type: 'invalid_request_error',
509 | message: 'Unrecognized request URL. Please see https://stripe.com/docs',
510 | },
511 | },
512 | message: 'Bad Request',
513 | };
514 |
515 | const isAccessDenied = (model as any).isAccessDeniedError(mockError);
516 | expect(isAccessDenied).toBe(true);
517 | });
518 |
519 | it('should handle malformed responseBody gracefully', () => {
520 | const mockError = {
521 | statusCode: 400,
522 | responseBody: 'Not valid JSON {{{',
523 | message: 'Bad Request',
524 | };
525 |
526 | const isAccessDenied = (model as any).isAccessDeniedError(mockError);
527 | expect(isAccessDenied).toBe(false);
528 | });
529 |
530 | it('should not match non-400 errors', () => {
531 | const mockError = {
532 | statusCode: 500,
533 | responseBody: JSON.stringify({
534 | error: {
535 | type: 'invalid_request_error',
536 | message: 'Unrecognized request URL',
537 | },
538 | }),
539 | message: 'Internal Server Error',
540 | };
541 |
542 | const isAccessDenied = (model as any).isAccessDeniedError(mockError);
543 | expect(isAccessDenied).toBe(false);
544 | });
545 | });
546 |
547 | describe('streaming error conditions', () => {
548 | it('should handle errors mid-stream', async () => {
549 | // Mock postJsonToApi to return a stream that emits an error
550 | const mockStream = new ReadableStream({
551 | start(controller) {
552 | // First emit a successful chunk
553 | controller.enqueue({
554 | success: true,
555 | value: {
556 | choices: [
557 | {
558 | delta: {content: 'Hello '},
559 | finish_reason: null,
560 | },
561 | ],
562 | },
563 | });
564 |
565 | // Then emit an error chunk
566 | controller.enqueue({
567 | success: false,
568 | error: new Error('Stream error occurred'),
569 | });
570 |
571 | controller.close();
572 | },
573 | });
574 |
575 | // Mock the postJsonToApi function
576 | jest.mock('@ai-sdk/provider-utils', () => ({
577 | postJsonToApi: jest.fn().mockResolvedValue({value: mockStream}),
578 | }));
579 |
580 | const options: LanguageModelV2CallOptions = {
581 | prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
582 | };
583 |
584 | try {
585 | const result = await model.doStream(options);
586 | const parts: any[] = [];
587 |
588 | for await (const part of result.stream) {
589 | parts.push(part);
590 | }
591 |
592 | // Should have text-delta and error parts
593 | const textDeltas = parts.filter((p) => p.type === 'text-delta');
594 | const errors = parts.filter((p) => p.type === 'error');
595 |
596 | expect(textDeltas.length).toBeGreaterThan(0);
597 | expect(errors.length).toBeGreaterThan(0);
598 | expect(errors[0].error).toBeDefined();
599 | } catch (error) {
600 | // Alternatively, the stream might throw
601 | expect(error).toBeDefined();
602 | }
603 | });
604 |
605 | it('should handle abort signal during streaming', async () => {
606 | const abortController = new AbortController();
607 |
608 | const options: LanguageModelV2CallOptions = {
609 | prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
610 | abortSignal: abortController.signal,
611 | };
612 |
613 | // Abort immediately
614 | abortController.abort();
615 |
616 | // Should handle the aborted request gracefully
617 | // The actual API call should throw or return an error
618 | try {
619 | await model.doStream(options);
620 | // If it doesn't throw, that's also acceptable
621 | } catch (error: any) {
622 | // Expect an abort-related error
623 | expect(
624 | error.name === 'AbortError' ||
625 | error.message?.includes('abort') ||
626 | error.statusCode !== undefined
627 | ).toBe(true);
628 | }
629 | });
630 | });
631 | });
632 |
633 |
```