#
tokens: 46816/50000 10/256 files (page 4/5)
lines: off (toggle) GitHub
raw markdown copy
This is page 4 of 5. Use http://codebase.md/stripe/agent-toolkit?page={x} to view the full context.

# Directory Structure

```
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   └── feature_request.yml
│   └── workflows
│       ├── main.yml
│       ├── npm_release_shared.yml
│       ├── pypi_release.yml
│       └── sync-skills.yml
├── .gitignore
├── .vscode
│   ├── extensions.json
│   ├── launch.json
│   └── settings.json
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── gemini-extension.json
├── LICENSE
├── llm
│   ├── ai-sdk
│   │   ├── jest.config.ts
│   │   ├── LICENSE
│   │   ├── meter
│   │   │   ├── examples
│   │   │   │   ├── .env.example
│   │   │   │   ├── .gitignore
│   │   │   │   ├── anthropic.ts
│   │   │   │   ├── google.ts
│   │   │   │   ├── openai.ts
│   │   │   │   ├── README.md
│   │   │   │   └── tsconfig.json
│   │   │   ├── index.ts
│   │   │   ├── meter-event-logging.ts
│   │   │   ├── meter-event-types.ts
│   │   │   ├── README.md
│   │   │   ├── tests
│   │   │   │   ├── ai-sdk-billing-wrapper-anthropic.test.ts
│   │   │   │   ├── ai-sdk-billing-wrapper-general.test.ts
│   │   │   │   ├── ai-sdk-billing-wrapper-google.test.ts
│   │   │   │   ├── ai-sdk-billing-wrapper-openai.test.ts
│   │   │   │   ├── ai-sdk-billing-wrapper-other-providers.test.ts
│   │   │   │   ├── meter-event-logging.test.ts
│   │   │   │   └── model-name-normalization.test.ts
│   │   │   ├── tsconfig.json
│   │   │   ├── types.ts
│   │   │   ├── utils.ts
│   │   │   └── wrapperV2.ts
│   │   ├── package.json
│   │   ├── pnpm-lock.yaml
│   │   ├── provider
│   │   │   ├── examples
│   │   │   │   ├── .env.example
│   │   │   │   ├── .gitignore
│   │   │   │   ├── anthropic.ts
│   │   │   │   ├── google.ts
│   │   │   │   ├── openai.ts
│   │   │   │   ├── README.md
│   │   │   │   └── tsconfig.json
│   │   │   ├── index.ts
│   │   │   ├── README.md
│   │   │   ├── stripe-language-model.ts
│   │   │   ├── stripe-provider.ts
│   │   │   ├── tests
│   │   │   │   ├── stripe-language-model.test.ts
│   │   │   │   ├── stripe-provider.test.ts
│   │   │   │   └── utils.test.ts
│   │   │   ├── tsconfig.build.json
│   │   │   ├── tsconfig.json
│   │   │   ├── types.ts
│   │   │   └── utils.ts
│   │   ├── README.md
│   │   ├── tsconfig.json
│   │   └── tsup.config.ts
│   ├── README.md
│   └── token-meter
│       ├── examples
│       │   ├── anthropic.ts
│       │   ├── gemini.ts
│       │   └── openai.ts
│       ├── index.ts
│       ├── jest.config.ts
│       ├── LICENSE
│       ├── meter-event-logging.ts
│       ├── meter-event-types.ts
│       ├── package.json
│       ├── pnpm-lock.yaml
│       ├── README.md
│       ├── tests
│       │   ├── meter-event-logging.test.ts
│       │   ├── model-name-normalization.test.ts
│       │   ├── token-meter-anthropic.test.ts
│       │   ├── token-meter-gemini.test.ts
│       │   ├── token-meter-general.test.ts
│       │   ├── token-meter-openai.test.ts
│       │   └── type-detection.test.ts
│       ├── token-meter.ts
│       ├── tsconfig.build.json
│       ├── tsconfig.json
│       ├── types.ts
│       └── utils
│           └── type-detection.ts
├── README.md
├── SECURITY.md
├── skills
│   ├── get-started-kiro.md
│   ├── README.md
│   ├── stripe-best-practices.md
│   └── sync.js
└── tools
    ├── modelcontextprotocol
    │   ├── .dxtignore
    │   ├── .gitignore
    │   ├── .node-version
    │   ├── .prettierrc
    │   ├── build-dxt.js
    │   ├── Dockerfile
    │   ├── eslint.config.mjs
    │   ├── jest.config.ts
    │   ├── LICENSE
    │   ├── manifest.json
    │   ├── package.json
    │   ├── pnpm-lock.yaml
    │   ├── README.md
    │   ├── server.json
    │   ├── src
    │   │   ├── index.ts
    │   │   └── test
    │   │       └── index.test.ts
    │   ├── stripe_icon.png
    │   └── tsconfig.json
    ├── python
    │   ├── .editorconfig
    │   ├── .flake8
    │   ├── examples
    │   │   ├── crewai
    │   │   │   ├── .env.template
    │   │   │   ├── main.py
    │   │   │   └── README.md
    │   │   ├── langchain
    │   │   │   ├── __init__.py
    │   │   │   ├── .env.template
    │   │   │   ├── main.py
    │   │   │   └── README.md
    │   │   ├── openai
    │   │   │   ├── .env.template
    │   │   │   ├── customer_support
    │   │   │   │   ├── .env.template
    │   │   │   │   ├── emailer.py
    │   │   │   │   ├── env.py
    │   │   │   │   ├── main.py
    │   │   │   │   ├── pyproject.toml
    │   │   │   │   ├── README.md
    │   │   │   │   ├── repl.py
    │   │   │   │   └── support_agent.py
    │   │   │   ├── file_search
    │   │   │   │   ├── main.py
    │   │   │   │   └── README.md
    │   │   │   └── web_search
    │   │   │       ├── .env.template
    │   │   │       ├── main.py
    │   │   │       └── README.md
    │   │   └── strands
    │   │       └── main.py
    │   ├── Makefile
    │   ├── pyproject.toml
    │   ├── README.md
    │   ├── requirements.txt
    │   ├── stripe_agent_toolkit
    │   │   ├── __init__.py
    │   │   ├── api.py
    │   │   ├── configuration.py
    │   │   ├── crewai
    │   │   │   ├── tool.py
    │   │   │   └── toolkit.py
    │   │   ├── functions.py
    │   │   ├── langchain
    │   │   │   ├── tool.py
    │   │   │   └── toolkit.py
    │   │   ├── openai
    │   │   │   ├── hooks.py
    │   │   │   ├── tool.py
    │   │   │   └── toolkit.py
    │   │   ├── prompts.py
    │   │   ├── schema.py
    │   │   ├── strands
    │   │   │   ├── __init__.py
    │   │   │   ├── hooks.py
    │   │   │   ├── tool.py
    │   │   │   └── toolkit.py
    │   │   └── tools.py
    │   └── tests
    │       ├── __init__.py
    │       ├── test_configuration.py
    │       └── test_functions.py
    ├── README.md
    └── typescript
        ├── .gitignore
        ├── .prettierrc
        ├── eslint.config.mjs
        ├── examples
        │   ├── ai-sdk
        │   │   ├── .env.template
        │   │   ├── index.ts
        │   │   ├── package.json
        │   │   ├── README.md
        │   │   └── tsconfig.json
        │   ├── cloudflare
        │   │   ├── .dev.vars.example
        │   │   ├── .gitignore
        │   │   ├── biome.json
        │   │   ├── package.json
        │   │   ├── README.md
        │   │   ├── src
        │   │   │   ├── app.ts
        │   │   │   ├── imageGenerator.ts
        │   │   │   ├── index.ts
        │   │   │   ├── oauth.ts
        │   │   │   └── utils.ts
        │   │   ├── tsconfig.json
        │   │   ├── worker-configuration.d.ts
        │   │   └── wrangler.jsonc
        │   ├── langchain
        │   │   ├── .env.template
        │   │   ├── index.ts
        │   │   ├── package.json
        │   │   ├── README.md
        │   │   └── tsconfig.json
        │   └── openai
        │       ├── .env.template
        │       ├── index.ts
        │       ├── package.json
        │       ├── README.md
        │       └── tsconfig.json
        ├── jest.config.ts
        ├── LICENSE
        ├── package.json
        ├── pnpm-lock.yaml
        ├── pnpm-workspace.yaml
        ├── README.md
        ├── src
        │   ├── ai-sdk
        │   │   ├── index.ts
        │   │   ├── tool.ts
        │   │   └── toolkit.ts
        │   ├── cloudflare
        │   │   ├── index.ts
        │   │   └── README.md
        │   ├── langchain
        │   │   ├── index.ts
        │   │   ├── tool.ts
        │   │   └── toolkit.ts
        │   ├── modelcontextprotocol
        │   │   ├── index.ts
        │   │   ├── README.md
        │   │   ├── register-paid-tool.ts
        │   │   └── toolkit.ts
        │   ├── openai
        │   │   ├── index.ts
        │   │   └── toolkit.ts
        │   ├── shared
        │   │   ├── api.ts
        │   │   ├── balance
        │   │   │   └── retrieveBalance.ts
        │   │   ├── configuration.ts
        │   │   ├── coupons
        │   │   │   ├── createCoupon.ts
        │   │   │   └── listCoupons.ts
        │   │   ├── customers
        │   │   │   ├── createCustomer.ts
        │   │   │   └── listCustomers.ts
        │   │   ├── disputes
        │   │   │   ├── listDisputes.ts
        │   │   │   └── updateDispute.ts
        │   │   ├── documentation
        │   │   │   └── searchDocumentation.ts
        │   │   ├── invoiceItems
        │   │   │   └── createInvoiceItem.ts
        │   │   ├── invoices
        │   │   │   ├── createInvoice.ts
        │   │   │   ├── finalizeInvoice.ts
        │   │   │   └── listInvoices.ts
        │   │   ├── paymentIntents
        │   │   │   └── listPaymentIntents.ts
        │   │   ├── paymentLinks
        │   │   │   └── createPaymentLink.ts
        │   │   ├── prices
        │   │   │   ├── createPrice.ts
        │   │   │   └── listPrices.ts
        │   │   ├── products
        │   │   │   ├── createProduct.ts
        │   │   │   └── listProducts.ts
        │   │   ├── refunds
        │   │   │   └── createRefund.ts
        │   │   ├── subscriptions
        │   │   │   ├── cancelSubscription.ts
        │   │   │   ├── listSubscriptions.ts
        │   │   │   └── updateSubscription.ts
        │   │   └── tools.ts
        │   └── test
        │       ├── modelcontextprotocol
        │       │   └── register-paid-tool.test.ts
        │       └── shared
        │           ├── balance
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           ├── configuration.test.ts
        │           ├── customers
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           ├── disputes
        │           │   └── functions.test.ts
        │           ├── documentation
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           ├── invoiceItems
        │           │   ├── functions.test.ts
        │           │   ├── parameters.test.ts
        │           │   └── prompts.test.ts
        │           ├── invoices
        │           │   ├── functions.test.ts
        │           │   ├── parameters.test.ts
        │           │   └── prompts.test.ts
        │           ├── paymentIntents
        │           │   ├── functions.test.ts
        │           │   ├── parameters.test.ts
        │           │   └── prompts.test.ts
        │           ├── paymentLinks
        │           │   ├── functions.test.ts
        │           │   ├── parameters.test.ts
        │           │   └── prompts.test.ts
        │           ├── prices
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           ├── products
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           ├── refunds
        │           │   ├── functions.test.ts
        │           │   └── parameters.test.ts
        │           └── subscriptions
        │               ├── functions.test.ts
        │               ├── parameters.test.ts
        │               └── prompts.test.ts
        ├── tsconfig.json
        └── tsup.config.ts
```

# Files

--------------------------------------------------------------------------------
/llm/ai-sdk/provider/tests/utils.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for Stripe provider utility functions
 */

import {
  convertToOpenAIMessages,
  mapOpenAIFinishReason,
  normalizeModelId,
} from '../utils';

describe('Stripe Provider Utils', () => {
  describe('convertToOpenAIMessages', () => {
    it('should convert system message', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'system',
          content: 'You are a helpful assistant.',
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0]).toEqual({
        role: 'system',
        content: 'You are a helpful assistant.',
      });
    });

    it('should convert user text message', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'user',
          content: [{type: 'text', text: 'Hello!'}],
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0].role).toBe('user');
      // Single text messages are sent as simple strings for Anthropic compatibility
      expect(result[0].content).toBe('Hello!');
    });

    it('should convert user message with file URL', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'user',
          content: [
            {type: 'text', text: 'What is this?'},
            {
              type: 'file',
              data: 'https://example.com/image.jpg',
              mediaType: 'image/jpeg',
            },
          ],
        },
      ]);

      expect(result).toHaveLength(1);
      // Multi-part messages should remain as arrays
      expect(Array.isArray(result[0].content)).toBe(true);
      const content = result[0].content as any[];
      expect(content).toHaveLength(2);
      expect(content[1].type).toBe('image_url');
      expect(content[1].image_url.url).toBe('https://example.com/image.jpg');
    });

    it('should convert user message with file Uint8Array to base64', () => {
      const fileData = new Uint8Array([137, 80, 78, 71]); // PNG header

      const result = convertToOpenAIMessages([
        {
          role: 'user',
          content: [
            {
              type: 'file',
              data: fileData,
              mediaType: 'image/png',
            },
          ],
        },
      ]);

      expect(result).toHaveLength(1);
      // Single file becomes array with one element
      expect(Array.isArray(result[0].content)).toBe(true);
      const content = result[0].content as any[];
      expect(content).toHaveLength(1);
      expect(content[0].type).toBe('image_url');
      expect(content[0].image_url.url).toMatch(/^data:image\/png;base64,/);
    });

    it('should convert assistant message with text', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'assistant',
          content: [{type: 'text', text: 'Hello! How can I help?'}],
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0]).toEqual({
        role: 'assistant',
        content: 'Hello! How can I help?',
        tool_calls: undefined,
      });
    });

    it('should convert assistant message with tool calls', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'assistant',
          content: [
            {
              type: 'tool-call',
              toolCallId: 'call_123',
              toolName: 'getWeather',
              input: {location: 'San Francisco'},
            },
          ],
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0].role).toBe('assistant');
      expect(result[0].content).toBe(''); // Empty string when only tool calls
      expect(result[0].tool_calls).toHaveLength(1);
      expect(result[0].tool_calls![0]).toEqual({
        id: 'call_123',
        type: 'function',
        function: {
          name: 'getWeather',
          arguments: '{"location":"San Francisco"}',
        },
      });
    });

    it('should convert assistant message with text and tool calls', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'assistant',
          content: [
            {type: 'text', text: 'Let me check the weather.'},
            {
              type: 'tool-call',
              toolCallId: 'call_123',
              toolName: 'getWeather',
              input: {location: 'Paris'},
            },
          ],
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0].role).toBe('assistant');
      expect(result[0].content).toBe('Let me check the weather.');
      expect(result[0].tool_calls).toHaveLength(1);
    });

    it('should convert tool message', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'tool',
          content: [
            {
              type: 'tool-result',
              toolCallId: 'call_123',
              toolName: 'getWeather',
              output: {type: 'json', value: {temperature: 72, condition: 'Sunny'}},
            },
          ],
        },
      ]);

      expect(result).toHaveLength(1);
      expect(result[0]).toEqual({
        role: 'tool',
        tool_call_id: 'call_123',
        name: 'getWeather',
        content: '{"temperature":72,"condition":"Sunny"}',
      });
    });

    it('should handle string tool call args', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'assistant',
          content: [
            {
              type: 'tool-call',
              toolCallId: 'call_123',
              toolName: 'test',
              input: '{"key":"value"}',
            },
          ],
        },
      ]);

      expect(result[0].tool_calls![0].function.arguments).toBe('{"key":"value"}');
    });

    it('should handle string tool result', () => {
      const result = convertToOpenAIMessages([
        {
          role: 'tool',
          content: [
            {
              type: 'tool-result',
              toolCallId: 'call_123',
              toolName: 'test',
              output: {type: 'text', value: 'Simple string result'},
            },
          ],
        },
      ]);

      expect(result[0].content).toBe('Simple string result');
    });

    it('should convert multiple messages', () => {
      const result = convertToOpenAIMessages([
        {role: 'system', content: 'You are helpful.'},
        {role: 'user', content: [{type: 'text', text: 'Hello'}]},
        {role: 'assistant', content: [{type: 'text', text: 'Hi!'}]},
      ]);

      expect(result).toHaveLength(3);
      expect(result[0].role).toBe('system');
      expect(result[1].role).toBe('user');
      expect(result[2].role).toBe('assistant');
    });

    it('should throw error for unsupported message role', () => {
      expect(() => {
        convertToOpenAIMessages([
          // @ts-expect-error - Testing invalid role
          {role: 'invalid', content: 'test'},
        ]);
      }).toThrow('Unsupported message role');
    });

    it('should throw error for unsupported part type', () => {
      expect(() => {
        convertToOpenAIMessages([
          {
            role: 'user',
            // @ts-expect-error - Testing invalid part type
            content: [{type: 'unsupported', data: 'test'}],
          },
        ]);
      }).toThrow('Unsupported user message part type');
    });
  });

  describe('mapOpenAIFinishReason', () => {
    it('should map "stop" to "stop"', () => {
      expect(mapOpenAIFinishReason('stop')).toBe('stop');
    });

    it('should map "length" to "length"', () => {
      expect(mapOpenAIFinishReason('length')).toBe('length');
    });

    it('should map "content_filter" to "content-filter"', () => {
      expect(mapOpenAIFinishReason('content_filter')).toBe('content-filter');
    });

    it('should map "tool_calls" to "tool-calls"', () => {
      expect(mapOpenAIFinishReason('tool_calls')).toBe('tool-calls');
    });

    it('should map "function_call" to "tool-calls"', () => {
      expect(mapOpenAIFinishReason('function_call')).toBe('tool-calls');
    });

    it('should map null to "unknown"', () => {
      expect(mapOpenAIFinishReason(null)).toBe('unknown');
    });

    it('should map undefined to "unknown"', () => {
      expect(mapOpenAIFinishReason(undefined)).toBe('unknown');
    });

    it('should map unknown reason to "unknown"', () => {
      expect(mapOpenAIFinishReason('some_other_reason')).toBe('unknown');
    });
  });

  describe('normalizeModelId', () => {
    describe('Anthropic models', () => {
      it('should remove date suffix (YYYYMMDD format)', () => {
        // Note: The date is removed AND version dashes are converted to dots
        expect(normalizeModelId('anthropic/claude-3-5-sonnet-20241022')).toBe(
          'anthropic/claude-3.5-sonnet'
        );
        expect(normalizeModelId('anthropic/claude-sonnet-4-20250101')).toBe(
          'anthropic/claude-sonnet-4'
        );
        expect(normalizeModelId('anthropic/claude-opus-4-20241231')).toBe(
          'anthropic/claude-opus-4'
        );
      });

      it('should remove -latest suffix', () => {
        expect(normalizeModelId('anthropic/claude-sonnet-4-latest')).toBe(
          'anthropic/claude-sonnet-4'
        );
        expect(normalizeModelId('anthropic/claude-opus-4-latest')).toBe(
          'anthropic/claude-opus-4'
        );
      });

      it('should convert version dashes to dots (claude-3-5 → claude-3.5)', () => {
        expect(normalizeModelId('anthropic/claude-3-5-sonnet')).toBe(
          'anthropic/claude-3.5-sonnet'
        );
        expect(normalizeModelId('anthropic/claude-3-7-sonnet')).toBe(
          'anthropic/claude-3.7-sonnet'
        );
      });

      it('should handle version numbers without model names (sonnet-4-5 → sonnet-4.5)', () => {
        expect(normalizeModelId('anthropic/sonnet-4-5')).toBe(
          'anthropic/sonnet-4.5'
        );
        expect(normalizeModelId('anthropic/opus-4-1')).toBe(
          'anthropic/opus-4.1'
        );
      });

      it('should handle combined date suffix and version conversion', () => {
        expect(normalizeModelId('anthropic/claude-3-5-sonnet-20241022')).toBe(
          'anthropic/claude-3.5-sonnet'
        );
        expect(normalizeModelId('anthropic/claude-3-7-sonnet-20250115')).toBe(
          'anthropic/claude-3.7-sonnet'
        );
      });

      it('should handle -latest suffix with version conversion', () => {
        expect(normalizeModelId('anthropic/claude-3-5-sonnet-latest')).toBe(
          'anthropic/claude-3.5-sonnet'
        );
        expect(normalizeModelId('anthropic/sonnet-4-5-latest')).toBe(
          'anthropic/sonnet-4.5'
        );
      });

      it('should handle models without dates or versions', () => {
        expect(normalizeModelId('anthropic/claude-sonnet')).toBe(
          'anthropic/claude-sonnet'
        );
        expect(normalizeModelId('anthropic/claude-opus')).toBe(
          'anthropic/claude-opus'
        );
      });

      it('should handle case-insensitive provider names', () => {
        expect(normalizeModelId('Anthropic/claude-3-5-sonnet-20241022')).toBe(
          'Anthropic/claude-3.5-sonnet'
        );
        expect(normalizeModelId('ANTHROPIC/claude-3-5-sonnet-20241022')).toBe(
          'ANTHROPIC/claude-3.5-sonnet'
        );
      });
    });

    describe('OpenAI models', () => {
      it('should remove date suffix in YYYY-MM-DD format', () => {
        expect(normalizeModelId('openai/gpt-4-turbo-2024-04-09')).toBe(
          'openai/gpt-4-turbo'
        );
        expect(normalizeModelId('openai/gpt-4-2024-12-31')).toBe(
          'openai/gpt-4'
        );
      });

      it('should keep gpt-4o-2024-05-13 as an exception', () => {
        expect(normalizeModelId('openai/gpt-4o-2024-05-13')).toBe(
          'openai/gpt-4o-2024-05-13'
        );
      });

      it('should handle models without dates', () => {
        expect(normalizeModelId('openai/gpt-5')).toBe('openai/gpt-5');
        expect(normalizeModelId('openai/gpt-4.1')).toBe('openai/gpt-4.1');
        expect(normalizeModelId('openai/o3')).toBe('openai/o3');
      });

      it('should handle case-insensitive provider names', () => {
        expect(normalizeModelId('OpenAI/gpt-4-2024-12-31')).toBe(
          'OpenAI/gpt-4'
        );
        expect(normalizeModelId('OPENAI/gpt-4-turbo-2024-04-09')).toBe(
          'OPENAI/gpt-4-turbo'
        );
      });

      it('should not affect YYYYMMDD format (only YYYY-MM-DD)', () => {
        // OpenAI only removes YYYY-MM-DD format, not YYYYMMDD
        expect(normalizeModelId('openai/gpt-4-20241231')).toBe(
          'openai/gpt-4-20241231'
        );
      });
    });

    describe('Google/Gemini models', () => {
      it('should keep models as-is', () => {
        expect(normalizeModelId('google/gemini-2.5-pro')).toBe(
          'google/gemini-2.5-pro'
        );
        expect(normalizeModelId('google/gemini-2.0-flash')).toBe(
          'google/gemini-2.0-flash'
        );
        expect(normalizeModelId('google/gemini-1.5-pro')).toBe(
          'google/gemini-1.5-pro'
        );
      });

      it('should not remove any suffixes', () => {
        expect(normalizeModelId('google/gemini-2.5-pro-20250101')).toBe(
          'google/gemini-2.5-pro-20250101'
        );
        expect(normalizeModelId('google/gemini-2.5-pro-latest')).toBe(
          'google/gemini-2.5-pro-latest'
        );
      });
    });

    describe('Other providers', () => {
      it('should keep unknown provider models as-is', () => {
        expect(normalizeModelId('bedrock/claude-3-5-sonnet')).toBe(
          'bedrock/claude-3-5-sonnet'
        );
        expect(normalizeModelId('azure/gpt-4-2024-12-31')).toBe(
          'azure/gpt-4-2024-12-31'
        );
        expect(normalizeModelId('custom/my-model-1-2-3')).toBe(
          'custom/my-model-1-2-3'
        );
      });
    });

    describe('Edge cases', () => {
      it('should handle model IDs without provider prefix', () => {
        // If no slash, return as-is
        expect(normalizeModelId('gpt-5')).toBe('gpt-5');
        expect(normalizeModelId('claude-sonnet-4')).toBe('claude-sonnet-4');
      });

      it('should handle model IDs with multiple slashes', () => {
        // If more than one slash, return as-is
        expect(normalizeModelId('provider/category/model')).toBe(
          'provider/category/model'
        );
      });

      it('should handle empty strings', () => {
        expect(normalizeModelId('')).toBe('');
      });
    });
  });
});


```

--------------------------------------------------------------------------------
/llm/token-meter/tests/type-detection.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for type detection utilities
 */

import {
  detectResponse,
  isGeminiStream,
  extractUsageFromChatStream,
  extractUsageFromResponseStream,
  extractUsageFromAnthropicStream,
} from '../utils/type-detection';

describe('detectResponse - OpenAI Chat Completions', () => {
  it('should detect OpenAI chat completion response', () => {
    const response = {
      id: 'chatcmpl-123',
      object: 'chat.completion',
      created: Date.now(),
      model: 'gpt-4',
      choices: [
        {
          index: 0,
          message: {
            role: 'assistant',
            content: 'Hello!',
          },
          finish_reason: 'stop',
        },
      ],
      usage: {
        prompt_tokens: 10,
        completion_tokens: 5,
        total_tokens: 15,
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('openai');
    expect(detected?.type).toBe('chat_completion');
    expect(detected?.model).toBe('gpt-4');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should handle missing usage data in chat completion', () => {
    const response = {
      id: 'chatcmpl-123',
      object: 'chat.completion',
      created: Date.now(),
      model: 'gpt-4',
      choices: [
        {
          index: 0,
          message: {
            role: 'assistant',
            content: 'Hello!',
          },
          finish_reason: 'stop',
        },
      ],
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(0);
    expect(detected?.outputTokens).toBe(0);
  });

  it('should handle partial usage data', () => {
    const response = {
      id: 'chatcmpl-123',
      object: 'chat.completion',
      created: Date.now(),
      model: 'gpt-4',
      choices: [
        {
          index: 0,
          message: {
            role: 'assistant',
            content: 'Hello!',
          },
          finish_reason: 'stop',
        },
      ],
      usage: {
        prompt_tokens: 10,
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(0);
  });
});

describe('detectResponse - OpenAI Responses API', () => {
  it('should detect OpenAI responses API response', () => {
    const response = {
      id: 'resp_123',
      object: 'response',
      created: Date.now(),
      model: 'gpt-4',
      output: 'Hello!',
      usage: {
        input_tokens: 10,
        output_tokens: 5,
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('openai');
    expect(detected?.type).toBe('response_api');
    expect(detected?.model).toBe('gpt-4');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should return null for responses API with empty usage', () => {
    const response = {
      id: 'resp_123',
      object: 'response',
      created: Date.now(),
      model: 'gpt-4',
      output: 'Hello!',
      usage: {},
    };

    const detected = detectResponse(response);

    // Empty usage object doesn't match the type guard, so returns null
    expect(detected).toBeNull();
  });
});

describe('detectResponse - OpenAI Embeddings', () => {
  it('should detect OpenAI embedding response', () => {
    const response = {
      object: 'list',
      data: [
        {
          object: 'embedding',
          embedding: [0.1, 0.2, 0.3],
          index: 0,
        },
      ],
      model: 'text-embedding-ada-002',
      usage: {
        prompt_tokens: 8,
        total_tokens: 8,
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('openai');
    expect(detected?.type).toBe('embedding');
    expect(detected?.model).toBe('text-embedding-ada-002');
    expect(detected?.inputTokens).toBe(8);
    expect(detected?.outputTokens).toBe(0); // Embeddings don't have output tokens
  });

  it('should handle missing usage data in embeddings', () => {
    const response = {
      object: 'list',
      data: [
        {
          object: 'embedding',
          embedding: [0.1, 0.2, 0.3],
          index: 0,
        },
      ],
      model: 'text-embedding-ada-002',
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(0);
    expect(detected?.outputTokens).toBe(0);
  });
});

describe('detectResponse - Anthropic Messages', () => {
  it('should detect Anthropic message response', () => {
    const response = {
      id: 'msg_123',
      type: 'message',
      role: 'assistant',
      content: [{type: 'text', text: 'Hello!'}],
      model: 'claude-3-5-sonnet-20241022',
      stop_reason: 'end_turn',
      stop_sequence: null,
      usage: {
        input_tokens: 10,
        output_tokens: 5,
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('anthropic');
    expect(detected?.type).toBe('chat_completion');
    expect(detected?.model).toBe('claude-3-5-sonnet-20241022');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should return null for Anthropic messages with empty usage', () => {
    const response = {
      id: 'msg_123',
      type: 'message',
      role: 'assistant',
      content: [{type: 'text', text: 'Hello!'}],
      model: 'claude-3-opus-20240229',
      stop_reason: 'end_turn',
      stop_sequence: null,
      usage: {},
    };

    const detected = detectResponse(response);

    // Empty usage object doesn't match the type guard, so returns null
    expect(detected).toBeNull();
  });
});

describe('detectResponse - Gemini', () => {
  it('should detect Gemini response', () => {
    const response = {
      response: {
        text: () => 'Hello!',
        usageMetadata: {
          promptTokenCount: 10,
          candidatesTokenCount: 5,
          totalTokenCount: 15,
        },
        modelVersion: 'gemini-1.5-pro',
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('google');
    expect(detected?.type).toBe('chat_completion');
    expect(detected?.model).toBe('gemini-1.5-pro');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should include reasoning tokens in output for extended thinking models', () => {
    const response = {
      response: {
        text: () => 'Hello!',
        usageMetadata: {
          promptTokenCount: 10,
          candidatesTokenCount: 5,
          thoughtsTokenCount: 3, // Reasoning tokens
          totalTokenCount: 18,
        },
        modelVersion: 'gemini-1.5-pro',
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.outputTokens).toBe(8); // 5 + 3 reasoning tokens
  });

  it('should return null when usageMetadata is missing', () => {
    const response = {
      response: {
        text: () => 'Hello!',
      },
    };

    const detected = detectResponse(response);

    // Missing usageMetadata doesn't match the type guard, so returns null
    expect(detected).toBeNull();
  });

  it('should use default model name when modelVersion is missing', () => {
    const response = {
      response: {
        text: () => 'Hello!',
        usageMetadata: {
          promptTokenCount: 10,
          candidatesTokenCount: 5,
          totalTokenCount: 15,
        },
      },
    };

    const detected = detectResponse(response);

    expect(detected).not.toBeNull();
    expect(detected?.model).toBe('gemini');
  });
});

describe('detectResponse - Unknown types', () => {
  it('should return null for unknown response types', () => {
    const response = {
      some: 'data',
      that: 'does not match any provider',
    };

    const detected = detectResponse(response);

    expect(detected).toBeNull();
  });

  it('should return null for null input', () => {
    const detected = detectResponse(null);

    expect(detected).toBeNull();
  });

  it('should return null for undefined input', () => {
    const detected = detectResponse(undefined);

    expect(detected).toBeNull();
  });
});

describe('isGeminiStream', () => {
  it('should detect Gemini stream structure', () => {
    const geminiStream = {
      stream: {
        [Symbol.asyncIterator]: function* () {
          yield {text: () => 'test'};
        },
      },
      response: Promise.resolve({}),
    };

    expect(isGeminiStream(geminiStream)).toBe(true);
  });

  it('should return false for OpenAI-style streams', () => {
    const openaiStream = {
      tee: () => [{}, {}],
      toReadableStream: () => {},
    };

    expect(isGeminiStream(openaiStream)).toBe(false);
  });

  it('should return false for non-stream objects', () => {
    expect(isGeminiStream({})).toBe(false);
    // null and undefined return falsy values which coerce to false in boolean context
    expect(isGeminiStream(null)).toBeFalsy();
    expect(isGeminiStream(undefined)).toBeFalsy();
  });
});

describe('extractUsageFromChatStream', () => {
  it('should extract usage from OpenAI chat stream', async () => {
    const chunks = [
      {
        id: 'chatcmpl-123',
        object: 'chat.completion.chunk',
        created: Date.now(),
        model: 'gpt-4',
        choices: [
          {
            index: 0,
            delta: {content: 'Hello'},
            finish_reason: null,
          },
        ],
      },
      {
        id: 'chatcmpl-123',
        object: 'chat.completion.chunk',
        created: Date.now(),
        model: 'gpt-4',
        choices: [
          {
            index: 0,
            delta: {},
            finish_reason: 'stop',
          },
        ],
        usage: {
          prompt_tokens: 10,
          completion_tokens: 5,
          total_tokens: 15,
        },
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromChatStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('openai');
    expect(detected?.model).toBe('gpt-4');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should handle streams without usage data', async () => {
    const chunks = [
      {
        id: 'chatcmpl-123',
        object: 'chat.completion.chunk',
        created: Date.now(),
        model: 'gpt-4',
        choices: [
          {
            index: 0,
            delta: {content: 'Hello'},
            finish_reason: 'stop',
          },
        ],
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromChatStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(0);
    expect(detected?.outputTokens).toBe(0);
  });

  it('should handle stream errors gracefully', async () => {
    const mockStream = {
      async *[Symbol.asyncIterator]() {
        throw new Error('Stream error');
      },
    };

    const detected = await extractUsageFromChatStream(mockStream as any);

    expect(detected).toBeNull();
  });
});

describe('extractUsageFromResponseStream', () => {
  it('should extract usage from OpenAI Responses API stream', async () => {
    const chunks = [
      {
        type: 'response.output_text.delta',
        delta: 'Hello',
      },
      {
        type: 'response.done',
        response: {
          id: 'resp_123',
          model: 'gpt-4',
          usage: {
            input_tokens: 10,
            output_tokens: 5,
          },
        },
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromResponseStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('openai');
    expect(detected?.type).toBe('response_api');
    expect(detected?.model).toBe('gpt-4');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should handle streams without usage data', async () => {
    const chunks = [
      {
        type: 'response.output_text.delta',
        delta: 'Hello',
      },
      {
        type: 'response.done',
        response: {
          id: 'resp_123',
          model: 'gpt-4',
        },
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromResponseStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(0);
    expect(detected?.outputTokens).toBe(0);
  });
});

describe('extractUsageFromAnthropicStream', () => {
  it('should extract usage from Anthropic stream', async () => {
    const chunks = [
      {
        type: 'message_start',
        message: {
          id: 'msg_123',
          type: 'message',
          role: 'assistant',
          content: [],
          model: 'claude-3-opus-20240229',
          usage: {
            input_tokens: 10,
            output_tokens: 0,
          },
        },
      },
      {
        type: 'content_block_start',
        index: 0,
        content_block: {type: 'text', text: ''},
      },
      {
        type: 'content_block_delta',
        index: 0,
        delta: {type: 'text_delta', text: 'Hello'},
      },
      {
        type: 'message_delta',
        delta: {stop_reason: 'end_turn'},
        usage: {
          output_tokens: 5,
        },
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromAnthropicStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.provider).toBe('anthropic');
    expect(detected?.model).toBe('claude-3-opus-20240229');
    expect(detected?.inputTokens).toBe(10);
    expect(detected?.outputTokens).toBe(5);
  });

  it('should handle streams without usage data', async () => {
    const chunks = [
      {
        type: 'message_start',
        message: {
          id: 'msg_123',
          model: 'claude-3-opus-20240229',
          usage: {},
        },
      },
    ];

    const mockStream = {
      async *[Symbol.asyncIterator]() {
        for (const chunk of chunks) {
          yield chunk;
        }
      },
    };

    const detected = await extractUsageFromAnthropicStream(mockStream as any);

    expect(detected).not.toBeNull();
    expect(detected?.inputTokens).toBe(0);
    expect(detected?.outputTokens).toBe(0);
  });
});


```

--------------------------------------------------------------------------------
/llm/token-meter/tests/token-meter-anthropic.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for TokenMeter - Anthropic Provider
 */

import Stripe from 'stripe';
import {createTokenMeter} from '../token-meter';
import type {MeterConfig} from '../types';

// Mock Stripe
jest.mock('stripe');

describe('TokenMeter - Anthropic Provider', () => {
  let mockMeterEventsCreate: jest.Mock;
  let config: MeterConfig;
  const TEST_API_KEY = 'sk_test_mock_key';

  beforeEach(() => {
    jest.clearAllMocks();
    mockMeterEventsCreate = jest.fn().mockResolvedValue({});
    
    // Mock the Stripe constructor
    (Stripe as unknown as jest.Mock).mockImplementation(() => ({
      v2: {
        billing: {
          meterEvents: {
            create: mockMeterEventsCreate,
          },
        },
      },
    }));
    
    config = {};
  });

  describe('Messages - Non-streaming', () => {
    it('should track usage from basic message', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_123',
        type: 'message',
        role: 'assistant',
        content: [{type: 'text', text: 'Hello, World!'}],
        model: 'claude-3-5-sonnet-20241022',
        stop_reason: 'end_turn',
        stop_sequence: null,
        usage: {
          input_tokens: 15,
          output_tokens: 8,
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          event_name: 'token-billing-tokens',
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '15',
            model: 'anthropic/claude-3.5-sonnet',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '8',
            token_type: 'output',
          }),
        })
      );
    });

    it('should track usage from message with system prompt', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_456',
        type: 'message',
        role: 'assistant',
        content: [{type: 'text', text: 'I am a helpful assistant.'}],
        model: 'claude-3-5-sonnet-20241022',
        stop_reason: 'end_turn',
        stop_sequence: null,
        usage: {
          input_tokens: 50,
          output_tokens: 12,
        },
      };

      meter.trackUsage(response, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '50',
            model: 'anthropic/claude-3.5-sonnet',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from message with tool use', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_789',
        type: 'message',
        role: 'assistant',
        content: [
          {
            type: 'tool_use',
            id: 'toolu_123',
            name: 'get_weather',
            input: {location: 'San Francisco'},
          },
        ],
        model: 'claude-3-5-sonnet-20241022',
        stop_reason: 'tool_use',
        stop_sequence: null,
        usage: {
          input_tokens: 100,
          output_tokens: 45,
        },
      };

      meter.trackUsage(response, 'cus_789');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '100',
            model: 'anthropic/claude-3.5-sonnet',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from multi-turn conversation', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_conv',
        type: 'message',
        role: 'assistant',
        content: [{type: 'text', text: 'The weather is sunny today.'}],
        model: 'claude-3-opus-20240229',
        stop_reason: 'end_turn',
        stop_sequence: null,
        usage: {
          input_tokens: 200, // Includes conversation history
          output_tokens: 15,
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '200',
            model: 'anthropic/claude-3-opus',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from message with mixed content', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_mixed',
        type: 'message',
        role: 'assistant',
        content: [
          {type: 'text', text: 'Let me check the weather for you.'},
          {
            type: 'tool_use',
            id: 'toolu_456',
            name: 'get_weather',
            input: {location: 'New York'},
          },
        ],
        model: 'claude-3-5-haiku-20241022',
        stop_reason: 'tool_use',
        stop_sequence: null,
        usage: {
          input_tokens: 80,
          output_tokens: 60,
        },
      };

      meter.trackUsage(response, 'cus_999');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '80',
            model: 'anthropic/claude-3.5-haiku',
            token_type: 'input',
          }),
        })
      );
    });
  });

  describe('Messages - Streaming', () => {
    it('should track usage from basic streaming message', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          type: 'message_start',
          message: {
            id: 'msg_123',
            type: 'message',
            role: 'assistant',
            content: [],
            model: 'claude-3-5-sonnet-20241022',
            usage: {
              input_tokens: 15,
              output_tokens: 0,
            },
          },
        },
        {
          type: 'content_block_start',
          index: 0,
          content_block: {type: 'text', text: ''},
        },
        {
          type: 'content_block_delta',
          index: 0,
          delta: {type: 'text_delta', text: 'Hello, World!'},
        },
        {
          type: 'content_block_stop',
          index: 0,
        },
        {
          type: 'message_delta',
          delta: {stop_reason: 'end_turn'},
          usage: {
            output_tokens: 8,
          },
        },
        {
          type: 'message_stop',
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_123');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '15',
            model: 'anthropic/claude-3.5-sonnet',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from streaming message with tool use', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          type: 'message_start',
          message: {
            id: 'msg_456',
            type: 'message',
            role: 'assistant',
            content: [],
            model: 'claude-3-5-sonnet-20241022',
            usage: {
              input_tokens: 100,
              output_tokens: 0,
            },
          },
        },
        {
          type: 'content_block_start',
          index: 0,
          content_block: {
            type: 'tool_use',
            id: 'toolu_789',
            name: 'get_weather',
            input: {},
          },
        },
        {
          type: 'content_block_delta',
          index: 0,
          delta: {
            type: 'input_json_delta',
            partial_json: '{"location": "San Francisco"}',
          },
        },
        {
          type: 'content_block_stop',
          index: 0,
        },
        {
          type: 'message_delta',
          delta: {stop_reason: 'tool_use'},
          usage: {
            output_tokens: 45,
          },
        },
        {
          type: 'message_stop',
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_456');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '100',
            model: 'anthropic/claude-3.5-sonnet',
            token_type: 'input',
          }),
        })
      );
    });

    it('should properly tee the stream', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          type: 'message_start',
          message: {
            id: 'msg_123',
            usage: {
              input_tokens: 10,
              output_tokens: 0,
            },
          },
        },
        {
          type: 'content_block_delta',
          index: 0,
          delta: {type: 'text_delta', text: 'Hello'},
        },
        {
          type: 'content_block_delta',
          index: 0,
          delta: {type: 'text_delta', text: ' World'},
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_123');

      const receivedChunks: any[] = [];
      for await (const chunk of wrappedStream) {
        receivedChunks.push(chunk);
      }

      expect(receivedChunks).toHaveLength(3);
      expect(receivedChunks[0].type).toBe('message_start');
      expect(receivedChunks[1].delta.text).toBe('Hello');
      expect(receivedChunks[2].delta.text).toBe(' World');
    });

    it('should extract input tokens from message_start', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          type: 'message_start',
          message: {
            id: 'msg_123',
            model: 'claude-3-opus-20240229',
            usage: {
              input_tokens: 250,
              output_tokens: 0,
            },
          },
        },
        {
          type: 'message_delta',
          delta: {stop_reason: 'end_turn'},
          usage: {
            output_tokens: 20,
          },
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamAnthropic(mockStream, 'cus_789');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '250',
            model: 'anthropic/claude-3-opus',
            token_type: 'input',
          }),
        })
      );
    });
  });

  describe('Model Variants', () => {
    it('should track claude-3-opus', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_opus',
        type: 'message',
        role: 'assistant',
        content: [{type: 'text', text: 'Response from Opus'}],
        model: 'claude-3-opus-20240229',
        stop_reason: 'end_turn',
        stop_sequence: null,
        usage: {
          input_tokens: 20,
          output_tokens: 10,
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '20',
            model: 'anthropic/claude-3-opus',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track claude-3-5-haiku', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'msg_haiku',
        type: 'message',
        role: 'assistant',
        content: [{type: 'text', text: 'Response from Haiku'}],
        model: 'claude-3-5-haiku-20241022',
        stop_reason: 'end_turn',
        stop_sequence: null,
        usage: {
          input_tokens: 15,
          output_tokens: 8,
        },
      };

      meter.trackUsage(response, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '15',
            model: 'anthropic/claude-3.5-haiku',
            token_type: 'input',
          }),
        })
      );
    });
  });
});

// Helper function to create mock streams with tee()
function createMockStreamWithTee(chunks: any[]) {
  return {
    tee() {
      const stream1 = {
        async *[Symbol.asyncIterator]() {
          for (const chunk of chunks) {
            yield chunk;
          }
        },
        tee() {
          const s1 = {
            async *[Symbol.asyncIterator]() {
              for (const chunk of chunks) {
                yield chunk;
              }
            },
          };
          const s2 = {
            async *[Symbol.asyncIterator]() {
              for (const chunk of chunks) {
                yield chunk;
              }
            },
          };
          return [s1, s2];
        },
      };
      const stream2 = {
        async *[Symbol.asyncIterator]() {
          for (const chunk of chunks) {
            yield chunk;
          }
        },
      };
      return [stream1, stream2];
    },
    async *[Symbol.asyncIterator]() {
      for (const chunk of chunks) {
        yield chunk;
      }
    },
  };
}


```

--------------------------------------------------------------------------------
/llm/ai-sdk/provider/stripe-language-model.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Stripe Language Model implementation for AI SDK V2
 */

import {
  LanguageModelV2,
  LanguageModelV2CallOptions,
  LanguageModelV2CallWarning,
  LanguageModelV2Content,
  LanguageModelV2FinishReason,
  LanguageModelV2StreamPart,
} from '@ai-sdk/provider';
import {
  ParseResult,
  createEventSourceResponseHandler,
  createJsonResponseHandler,
  createStatusCodeErrorResponseHandler,
  postJsonToApi,
} from '@ai-sdk/provider-utils';
import {z} from 'zod';
import {StripeLanguageModelSettings, StripeProviderOptions} from './types';
import {convertToOpenAIMessages, mapOpenAIFinishReason} from './utils';

/**
 * OpenAI-compatible chat completion response schema
 */
const openAIResponseSchema = z.object({
  choices: z.array(
    z.object({
      message: z.object({
        content: z.string().nullable().optional(),
        tool_calls: z
          .array(
            z.object({
              id: z.string(),
              type: z.literal('function'),
              function: z.object({
                name: z.string(),
                arguments: z.string(),
              }),
            })
          )
          .optional(),
      }),
      finish_reason: z.string().nullable(),
    })
  ),
  usage: z
    .object({
      prompt_tokens: z.number().optional(),
      completion_tokens: z.number().optional(),
      total_tokens: z.number().optional(),
    })
    .optional(),
});

type OpenAIResponse = z.infer<typeof openAIResponseSchema>;

/**
 * OpenAI-compatible streaming chunk schema
 * Note: The event source handler may also return '[DONE]' string or null
 */
const openAIStreamChunkSchema = z
  .union([
    z.object({
      choices: z
        .array(
          z.object({
            delta: z.object({
              content: z.string().optional(),
              tool_calls: z
                .array(
                  z.object({
                    index: z.number(),
                    id: z.string().optional(),
                    function: z
                      .object({
                        name: z.string().optional(),
                        arguments: z.string().optional(),
                      })
                      .optional(),
                  })
                )
                .optional(),
            }),
            finish_reason: z.string().nullable().optional(),
          })
        )
        .optional(),
      usage: z
        .object({
          prompt_tokens: z.number().optional(),
          completion_tokens: z.number().optional(),
          total_tokens: z.number().optional(),
        })
        .optional(),
    }),
    z.literal('[DONE]'),
    z.null(),
  ])
  .catch(null);

type OpenAIStreamChunk = z.infer<typeof openAIStreamChunkSchema>;

/**
 * Enhanced error class for Stripe AI SDK Provider access issues
 */
export class StripeProviderAccessError extends Error {
  constructor(originalError: any) {
    const message = [
      'Stripe AI SDK Provider Access Required',
      '',
      'You are probably seeing this error because you have not been granted access to the Stripe AI SDK Provider Private Preview.',
      '',
      'To request access, please fill out the form here:',
      'https://docs.stripe.com/billing/token-billing',
      '',
      '---',
      'Original error: ' + (originalError.message || 'Unknown error'),
    ].join('\n');
    
    super(message);
    this.name = 'StripeProviderAccessError';
    
    // Preserve the original error
    this.cause = originalError;
  }
}

interface StripeProviderConfig {
  provider: string;
  baseURL: string;
  headers: () => Record<string, string>;
}

/**
 * Stripe Language Model that implements the AI SDK V2 specification
 */
export class StripeLanguageModel implements LanguageModelV2 {
  readonly specificationVersion = 'v2' as const;
  readonly provider: string;
  readonly modelId: string;

  private readonly settings: StripeLanguageModelSettings;
  private readonly config: StripeProviderConfig;

  constructor(
    modelId: string,
    settings: StripeLanguageModelSettings,
    config: StripeProviderConfig
  ) {
    this.provider = config.provider;
    this.modelId = modelId;
    this.settings = settings;
    this.config = config;
  }

  /**
   * Stripe proxy doesn't require special URL handling - it accepts standard base64 data
   */
  get supportedUrls() {
    return {};
  }

  /**
   * Check if an error is due to lack of access to the Stripe AI SDK Provider
   */
  private isAccessDeniedError(error: any): boolean {
    // Check for the specific "Unrecognized request URL" error
    if (error.statusCode === 400 && error.responseBody) {
      try {
        const body = typeof error.responseBody === 'string' 
          ? JSON.parse(error.responseBody)
          : error.responseBody;
        
        if (body.error?.type === 'invalid_request_error' && 
            body.error?.message?.includes('Unrecognized request URL')) {
          return true;
        }
      } catch {
        // If we can't parse the response, it's not the error we're looking for
      }
    }
    return false;
  }

  /**
   * Wrap API call errors with helpful messaging for access issues
   */
  private handleApiError(error: any): never {
    if (this.isAccessDeniedError(error)) {
      throw new StripeProviderAccessError(error);
    }
    throw error;
  }

  /**
   * Get model-specific default max output tokens for Anthropic models
   * Based on the official Anthropic provider implementation
   * @see https://github.com/vercel/ai/blob/main/packages/anthropic/src/anthropic-messages-language-model.ts
   */
  private getDefaultMaxTokens(modelId: string): number | undefined {
    if (!modelId.startsWith('anthropic/')) {
      return undefined; // No default for non-Anthropic models
    }

    // Extract model name after 'anthropic/' prefix
    const model = modelId.substring('anthropic/'.length);

    // Claude Sonnet 4 models (including variants like sonnet-4-1) and 3.7 Sonnet
    if (model.includes('sonnet-4') || 
        model.includes('claude-3-7-sonnet') || 
        model.includes('haiku-4-5')) {
      return 64000; // 64K tokens
    } 
    // Claude Opus 4 models (including variants like opus-4-1)
    else if (model.includes('opus-4')) {
      return 32000; // 32K tokens
    } 
    // Claude 3.5 Haiku
    else if (model.includes('claude-3-5-haiku')) {
      return 8192; // 8K tokens
    } 
    // Default fallback for other Anthropic models
    else {
      return 4096;
    }
  }

  private getHeaders(
    options: LanguageModelV2CallOptions
  ): Record<string, string> {
    const baseHeaders = this.config.headers();
    const settingsHeaders = this.settings.headers || {};

    // Get provider-specific options
    const stripeOptions = (options.providerOptions?.stripe ||
      {}) as StripeProviderOptions;

    // Determine customer ID (priority: providerOptions > settings > error)
    const customerId =
      stripeOptions.customerId || this.settings.customerId || '';

    if (!customerId) {
      throw new Error(
        'Stripe customer ID is required. Provide it via provider settings or providerOptions.'
      );
    }

    return {
      ...baseHeaders,
      ...settingsHeaders,
      ...(stripeOptions.headers || {}),
      'X-Stripe-Customer-ID': customerId,
    };
  }

  private getArgs(options: LanguageModelV2CallOptions) {
    const warnings: LanguageModelV2CallWarning[] = [];

    // Convert AI SDK prompt to OpenAI-compatible format
    const messages = convertToOpenAIMessages(options.prompt);

    // Check if tools are provided and throw error (tool calling not supported by Stripe API)
    if (options.tools && options.tools.length > 0) {
      throw new Error(
        'Tool calling is not supported by the Stripe AI SDK Provider. ' +
        'The llm.stripe.com API does not currently support function calling or tool use. ' +
        'Please remove the tools parameter from your request.'
      );
    }

    // Prepare tools if provided
    const tools =
      options.tools && options.tools.length > 0
        ? options.tools.map((tool) => {
            if (tool.type === 'function') {
              return {
                type: 'function',
                function: {
                  name: tool.name,
                  description: tool.description,
                  parameters: tool.inputSchema,
                },
              };
            }
            // Provider-defined tools
            return tool;
          })
        : undefined;

    // Map tool choice
    let toolChoice: string | {type: string; function?: {name: string}} | undefined;
    if (options.toolChoice) {
      if (options.toolChoice.type === 'tool') {
        toolChoice = {
          type: 'function',
          function: {name: options.toolChoice.toolName},
        };
      } else {
        toolChoice = options.toolChoice.type; // 'auto', 'none', 'required'
      }
    }

    // Build request body, only including defined values
    const body: Record<string, any> = {
      model: this.modelId,
      messages,
    };

    // Add optional parameters only if they're defined
    if (options.temperature !== undefined) body.temperature = options.temperature;
    
    // Handle max_tokens with model-specific defaults for Anthropic
    const maxTokens = options.maxOutputTokens ?? this.getDefaultMaxTokens(this.modelId);
    if (maxTokens !== undefined) body.max_tokens = maxTokens;
    
    if (options.topP !== undefined) body.top_p = options.topP;
    if (options.frequencyPenalty !== undefined) body.frequency_penalty = options.frequencyPenalty;
    if (options.presencePenalty !== undefined) body.presence_penalty = options.presencePenalty;
    if (options.stopSequences !== undefined) body.stop = options.stopSequences;
    if (options.seed !== undefined) body.seed = options.seed;
    if (tools !== undefined) body.tools = tools;
    if (toolChoice !== undefined) body.tool_choice = toolChoice;

    return {args: body, warnings};
  }

  async doGenerate(options: LanguageModelV2CallOptions) {
    const {args, warnings} = this.getArgs(options);
    const headers = this.getHeaders(options);

    let response: OpenAIResponse;
    try {
      const result = await postJsonToApi({
        url: `${this.config.baseURL}/chat/completions`,
        headers,
        body: args,
        failedResponseHandler: createStatusCodeErrorResponseHandler(),
        successfulResponseHandler: createJsonResponseHandler(openAIResponseSchema),
        abortSignal: options.abortSignal,
      });
      response = result.value;
    } catch (error) {
      this.handleApiError(error);
    }

    const choice = response.choices[0];

    // Convert response to AI SDK V2 format
    const content: LanguageModelV2Content[] = [];

    // Add text content if present
    if (choice.message.content) {
      content.push({
        type: 'text',
        text: choice.message.content,
      });
    }

    // Add tool calls if present
    if (choice.message.tool_calls) {
      for (const toolCall of choice.message.tool_calls) {
        content.push({
          type: 'tool-call',
          toolCallId: toolCall.id,
          toolName: toolCall.function.name,
          input: toolCall.function.arguments,
        });
      }
    }

    return {
      content,
      finishReason: mapOpenAIFinishReason(choice.finish_reason),
      usage: {
        inputTokens: response.usage?.prompt_tokens,
        outputTokens: response.usage?.completion_tokens,
        totalTokens: response.usage?.total_tokens,
      },
      request: {body: args},
      response: {
        headers: {},
        body: response,
      },
      warnings,
    };
  }

  async doStream(options: LanguageModelV2CallOptions) {
    const {args, warnings} = this.getArgs(options);
    const headers = this.getHeaders(options);

    let response: ReadableStream<ParseResult<OpenAIStreamChunk>>;
    try {
      const result = await postJsonToApi({
        url: `${this.config.baseURL}/chat/completions`,
        headers,
        body: {
          ...args,
          stream: true,
          stream_options: {include_usage: true},
        },
        failedResponseHandler: createStatusCodeErrorResponseHandler(),
        successfulResponseHandler: createEventSourceResponseHandler(openAIStreamChunkSchema),
        abortSignal: options.abortSignal,
      });
      response = result.value as ReadableStream<ParseResult<OpenAIStreamChunk>>;
    } catch (error) {
      this.handleApiError(error);
    }

    let finishReason: LanguageModelV2FinishReason = 'unknown';
    let usage = {
      inputTokens: undefined as number | undefined,
      outputTokens: undefined as number | undefined,
      totalTokens: undefined as number | undefined,
    };

    // Track tool calls during streaming
    const toolCallDeltas: Record<
      number,
      {
        id: string;
        name: string;
        input: string;
      }
    > = {};

    // Track text chunks with IDs
    let currentTextId = '';

    return {
      stream: response.pipeThrough(
        new TransformStream<ParseResult<OpenAIStreamChunk>, LanguageModelV2StreamPart>({
          transform(chunk, controller) {
            if (!chunk.success) {
              controller.enqueue({type: 'error', error: chunk.error});
              return;
            }

            // The value is already parsed as an object by the event source handler
            // If value is null (schema validation failed), use rawValue
            const data = chunk.value ?? (chunk.rawValue as OpenAIStreamChunk);
            
            // Skip empty or [DONE] events
            if (!data || data === '[DONE]') {
              return;
            }

            try {
              // Type guard: at this point we know data is not null or '[DONE]'
              if (typeof data === 'object' && 'choices' in data && data.choices && data.choices.length > 0) {
                const delta = data.choices[0].delta;

                // Handle text content
                // Check if content exists (including empty string "") rather than checking truthiness
                if ('content' in delta && delta.content !== null && delta.content !== undefined) {
                  if (!currentTextId) {
                    currentTextId = `text-${Date.now()}`;
                    controller.enqueue({
                      type: 'text-start',
                      id: currentTextId,
                    });
                  }
                  // Only emit text-delta if content is not empty
                  if (delta.content !== '') {
                    controller.enqueue({
                      type: 'text-delta',
                      id: currentTextId,
                      delta: delta.content,
                    });
                  }
                }

                // Handle tool calls
                if (delta.tool_calls) {
                  for (const toolCall of delta.tool_calls) {
                    const index = toolCall.index;

                    // Initialize or update tool call
                    if (!toolCallDeltas[index]) {
                      const id = toolCall.id || `tool-${Date.now()}-${index}`;
                      toolCallDeltas[index] = {
                        id,
                        name: toolCall.function?.name || '',
                        input: '',
                      };

                      // Emit tool-input-start
                      controller.enqueue({
                        type: 'tool-input-start',
                        id,
                        toolName: toolCallDeltas[index].name,
                      });
                    }

                    if (toolCall.id) {
                      toolCallDeltas[index].id = toolCall.id;
                    }
                    if (toolCall.function?.name) {
                      toolCallDeltas[index].name = toolCall.function.name;
                    }
                    if (toolCall.function?.arguments) {
                      toolCallDeltas[index].input += toolCall.function.arguments;

                      // Emit the delta
                      controller.enqueue({
                        type: 'tool-input-delta',
                        id: toolCallDeltas[index].id,
                        delta: toolCall.function.arguments,
                      });
                    }
                  }
                }

                // Handle finish reason
                if (data.choices[0].finish_reason) {
                  finishReason = mapOpenAIFinishReason(
                    data.choices[0].finish_reason
                  );
                }
              }

              // Handle usage (typically comes in final chunk)
              if (typeof data === 'object' && 'usage' in data && data.usage) {
                usage = {
                  inputTokens: data.usage.prompt_tokens || undefined,
                  outputTokens: data.usage.completion_tokens || undefined,
                  totalTokens: data.usage.total_tokens || undefined,
                };
              }
            } catch (error) {
              controller.enqueue({
                type: 'error',
                error: error,
              });
            }
          },

          flush(controller) {
            // End current text if any
            if (currentTextId) {
              controller.enqueue({
                type: 'text-end',
                id: currentTextId,
              });
            }

            // Emit final tool calls
            for (const toolCall of Object.values(toolCallDeltas)) {
              controller.enqueue({
                type: 'tool-input-end',
                id: toolCall.id,
              });
              controller.enqueue({
                type: 'tool-call',
                toolCallId: toolCall.id,
                toolName: toolCall.name,
                input: toolCall.input,
              });
            }

            // Emit finish event
            controller.enqueue({
              type: 'finish',
              finishReason,
              usage,
            });
          },
        })
      ),
      request: {body: args},
      response: {headers: {}},
      warnings,
    };
  }
}

```

--------------------------------------------------------------------------------
/llm/token-meter/tests/meter-event-logging.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for meter event logging utilities
 */

import Stripe from 'stripe';
import {logUsageEvent, sendMeterEventsToStripe} from '../meter-event-logging';
import type {UsageEvent, MeterConfig} from '../types';

// Mock Stripe
jest.mock('stripe');

describe('sendMeterEventsToStripe', () => {
  let mockStripe: jest.Mocked<any>;
  let consoleErrorSpy: jest.SpyInstance;
  let consoleLogSpy: jest.SpyInstance;

  beforeEach(() => {
    jest.clearAllMocks();

    mockStripe = {
      v2: {
        billing: {
          meterEvents: {
            create: jest.fn().mockResolvedValue({}),
          },
        },
      },
    };

    (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);

    consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
    consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
  });

  afterEach(() => {
    consoleErrorSpy.mockRestore();
    consoleLogSpy.mockRestore();
  });

  it('should send meter events to Stripe', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(2);
  });

  it('should send separate events for input and output tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    const calls = mockStripe.v2.billing.meterEvents.create.mock.calls;
    expect(calls[0][0]).toMatchObject({
      event_name: 'token-billing-tokens',
      payload: {
        stripe_customer_id: 'cus_123',
        value: '100',
        model: 'openai/gpt-4',
        token_type: 'input',
      },
    });
    expect(calls[1][0]).toMatchObject({
      event_name: 'token-billing-tokens',
      payload: {
        stripe_customer_id: 'cus_123',
        value: '50',
        model: 'openai/gpt-4',
        token_type: 'output',
      },
    });
  });

  it('should handle zero input tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 0,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.payload.token_type).toBe('output');
  });

  it('should handle zero output tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 0,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.payload.token_type).toBe('input');
  });

  it('should handle Stripe API errors gracefully', async () => {
    mockStripe.v2.billing.meterEvents.create.mockRejectedValue(
      new Error('API Error')
    );

    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(consoleErrorSpy).toHaveBeenCalledWith(
      'Error sending meter events to Stripe:',
      expect.any(Error)
    );
  });

  it('should include proper timestamp format', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.timestamp).toMatch(
      /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/
    );
  });

  describe('Model Name Normalization - Anthropic', () => {
    it('should remove date suffix (YYYYMMDD)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-20240229',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should remove -latest suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-latest',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should convert version numbers (3-5 to 3.5)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle latest suffix before date suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-latest-20240229',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should handle version numbers + date suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle version numbers + latest suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-latest',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle haiku model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-haiku-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-haiku');
    });

    it('should handle model without any suffixes', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should handle claude-2 models', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-2-1-20231120',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-2.1');
    });

    it('should handle future version numbers (4-0)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-4-0-sonnet-20251231',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-4.0-sonnet');
    });
  });

  describe('Model Name Normalization - OpenAI', () => {
    it('should keep gpt-4o-2024-05-13 as-is (special exception)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4o-2024-05-13',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4o-2024-05-13');
    });

    it('should remove date suffix from gpt-4-turbo', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-turbo-2024-04-09',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4-turbo');
    });

    it('should remove date suffix from gpt-4o-mini', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4o-mini-2024-07-18',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4o-mini');
    });

    it('should NOT remove short date codes (MMDD format)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-0613',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      // Short date codes like -0613 are NOT in YYYY-MM-DD format, so they stay
      expect(call.payload.model).toBe('openai/gpt-4-0613');
    });

    it('should keep gpt-4 without date as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4');
    });

    it('should keep gpt-3.5-turbo without date as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-3.5-turbo',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-3.5-turbo');
    });

    it('should NOT remove short date codes from gpt-3.5-turbo', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-3.5-turbo-0125',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      // Short date codes like -0125 are NOT in YYYY-MM-DD format, so they stay
      expect(call.payload.model).toBe('openai/gpt-3.5-turbo-0125');
    });

    it('should handle o1-preview model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'o1-preview-2024-09-12',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/o1-preview');
    });

    it('should handle o1-mini model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'o1-mini-2024-09-12',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/o1-mini');
    });

    it('should NOT remove 4-digit dates (not in YYYY-MM-DD format)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-0314',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4-0314');
    });
  });

  describe('Model Name Normalization - Google', () => {
    it('should keep gemini-1.5-pro as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-1.5-pro',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-1.5-pro');
    });

    it('should keep gemini-2.5-flash as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-2.5-flash',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-2.5-flash');
    });

    it('should keep gemini-pro as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-pro',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-pro');
    });

    it('should keep any Google model name as-is (even with dates)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-1.5-pro-20241201',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-1.5-pro-20241201');
    });
  });
});

describe('logUsageEvent', () => {
  let mockStripe: jest.Mocked<any>;

  beforeEach(() => {
    jest.clearAllMocks();

    mockStripe = {
      v2: {
        billing: {
          meterEvents: {
            create: jest.fn().mockResolvedValue({}),
          },
        },
      },
    };

    (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
  });

  it('should call sendMeterEventsToStripe', () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    // logUsageEvent is fire-and-forget, so we just ensure it doesn't throw
    expect(() => logUsageEvent(mockStripe, config, event)).not.toThrow();
  });
});


```

--------------------------------------------------------------------------------
/llm/ai-sdk/meter/tests/meter-event-logging.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for meter event logging utilities
 */

import Stripe from 'stripe';
import {logUsageEvent, sendMeterEventsToStripe} from '../meter-event-logging';
import type {UsageEvent, MeterConfig} from '../meter-event-types';

// Mock Stripe
jest.mock('stripe');

describe('sendMeterEventsToStripe', () => {
  let mockStripe: jest.Mocked<any>;
  let consoleErrorSpy: jest.SpyInstance;
  let consoleLogSpy: jest.SpyInstance;

  beforeEach(() => {
    jest.clearAllMocks();

    mockStripe = {
      v2: {
        billing: {
          meterEvents: {
            create: jest.fn().mockResolvedValue({}),
          },
        },
      },
    };

    (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);

    consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation(() => {});
    consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
  });

  afterEach(() => {
    consoleErrorSpy.mockRestore();
    consoleLogSpy.mockRestore();
  });

  it('should send meter events to Stripe', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(2);
  });

  it('should send separate events for input and output tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    const calls = mockStripe.v2.billing.meterEvents.create.mock.calls;
    expect(calls[0][0]).toMatchObject({
      event_name: 'token-billing-tokens',
      payload: {
        stripe_customer_id: 'cus_123',
        value: '100',
        model: 'openai/gpt-4',
        token_type: 'input',
      },
    });
    expect(calls[1][0]).toMatchObject({
      event_name: 'token-billing-tokens',
      payload: {
        stripe_customer_id: 'cus_123',
        value: '50',
        model: 'openai/gpt-4',
        token_type: 'output',
      },
    });
  });

  it('should handle zero input tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 0,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.payload.token_type).toBe('output');
  });

  it('should handle zero output tokens', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 0,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(mockStripe.v2.billing.meterEvents.create).toHaveBeenCalledTimes(1);
    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.payload.token_type).toBe('input');
  });

  it('should handle Stripe API errors gracefully', async () => {
    mockStripe.v2.billing.meterEvents.create.mockRejectedValue(
      new Error('API Error')
    );

    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    expect(consoleErrorSpy).toHaveBeenCalledWith(
      'Error sending meter events to Stripe:',
      expect.any(Error)
    );
  });

  it('should include proper timestamp format', async () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    await sendMeterEventsToStripe(mockStripe, config, event);

    const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
    expect(call.timestamp).toMatch(
      /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/
    );
  });

  describe('Model Name Normalization - Anthropic', () => {
    it('should remove date suffix (YYYYMMDD)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-20240229',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should remove -latest suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-latest',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should convert version numbers (3-5 to 3.5)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle latest suffix before date suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus-latest-20240229',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should handle version numbers + date suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle version numbers + latest suffix', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-sonnet-latest',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-sonnet');
    });

    it('should handle haiku model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-5-haiku-20241022',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3.5-haiku');
    });

    it('should handle model without any suffixes', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-3-opus',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-3-opus');
    });

    it('should handle claude-2 models', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-2-1-20231120',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-2.1');
    });

    it('should handle future version numbers (4-0)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'claude-4-0-sonnet-20251231',
        provider: 'anthropic',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('anthropic/claude-4.0-sonnet');
    });
  });

  describe('Model Name Normalization - OpenAI', () => {
    it('should keep gpt-4o-2024-05-13 as-is (special exception)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4o-2024-05-13',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4o-2024-05-13');
    });

    it('should remove date suffix from gpt-4-turbo', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-turbo-2024-04-09',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4-turbo');
    });

    it('should remove date suffix from gpt-4o-mini', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4o-mini-2024-07-18',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4o-mini');
    });

    it('should NOT remove short date codes (MMDD format)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-0613',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      // Short date codes like -0613 are NOT in YYYY-MM-DD format, so they stay
      expect(call.payload.model).toBe('openai/gpt-4-0613');
    });

    it('should keep gpt-4 without date as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4');
    });

    it('should keep gpt-3.5-turbo without date as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-3.5-turbo',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-3.5-turbo');
    });

    it('should NOT remove short date codes from gpt-3.5-turbo', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-3.5-turbo-0125',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      // Short date codes like -0125 are NOT in YYYY-MM-DD format, so they stay
      expect(call.payload.model).toBe('openai/gpt-3.5-turbo-0125');
    });

    it('should handle o1-preview model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'o1-preview-2024-09-12',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/o1-preview');
    });

    it('should handle o1-mini model', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'o1-mini-2024-09-12',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/o1-mini');
    });

    it('should NOT remove 4-digit dates (not in YYYY-MM-DD format)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gpt-4-0314',
        provider: 'openai',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('openai/gpt-4-0314');
    });
  });

  describe('Model Name Normalization - Google', () => {
    it('should keep gemini-1.5-pro as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-1.5-pro',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-1.5-pro');
    });

    it('should keep gemini-2.5-flash as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-2.5-flash',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-2.5-flash');
    });

    it('should keep gemini-pro as-is', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-pro',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-pro');
    });

    it('should keep any Google model name as-is (even with dates)', async () => {
      const config: MeterConfig = {};
      const event: UsageEvent = {
        model: 'gemini-1.5-pro-20241201',
        provider: 'google',
        usage: {inputTokens: 100, outputTokens: 50},
        stripeCustomerId: 'cus_123',
      };

      await sendMeterEventsToStripe(mockStripe, config, event);

      const call = mockStripe.v2.billing.meterEvents.create.mock.calls[0][0];
      expect(call.payload.model).toBe('google/gemini-1.5-pro-20241201');
    });
  });
});

describe('logUsageEvent', () => {
  let mockStripe: jest.Mocked<any>;

  beforeEach(() => {
    jest.clearAllMocks();

    mockStripe = {
      v2: {
        billing: {
          meterEvents: {
            create: jest.fn().mockResolvedValue({}),
          },
        },
      },
    };

    (Stripe as unknown as jest.Mock).mockImplementation(() => mockStripe);
  });

  it('should call sendMeterEventsToStripe', () => {
    const config: MeterConfig = {};

    const event: UsageEvent = {
      model: 'gpt-4',
      provider: 'openai',
      usage: {
        inputTokens: 100,
        outputTokens: 50,
      },
      stripeCustomerId: 'cus_123',
    };

    // logUsageEvent is fire-and-forget, so we just ensure it doesn't throw
    expect(() => logUsageEvent(mockStripe, config, event)).not.toThrow();
  });
});


```

--------------------------------------------------------------------------------
/llm/ai-sdk/provider/tests/stripe-language-model.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for Stripe Language Model implementation
 */

import {StripeLanguageModel, StripeProviderAccessError} from '../stripe-language-model';
import type {LanguageModelV2CallOptions} from '@ai-sdk/provider';

describe('StripeLanguageModel', () => {
  let model: StripeLanguageModel;

  beforeEach(() => {
    model = new StripeLanguageModel(
      'openai/gpt-5',
      {customerId: 'cus_test123'},
      {
        provider: 'stripe',
        baseURL: 'https://llm.stripe.com',
        headers: () => ({
          'Content-Type': 'application/json',
          Authorization: 'Bearer sk_test_123',
        }),
      }
    );
  });

  describe('constructor', () => {
    it('should initialize with correct properties', () => {
      expect(model.specificationVersion).toBe('v2');
      expect(model.provider).toBe('stripe');
      expect(model.modelId).toBe('openai/gpt-5');
    });

    it('should support different model IDs', () => {
      const models = [
        'openai/gpt-5',
        'google/gemini-2.5-pro',
        'anthropic/claude-sonnet-4',
      ];

      models.forEach((modelId) => {
        const m = new StripeLanguageModel(
          modelId,
          {customerId: 'cus_test'},
          {
            provider: 'stripe',
            baseURL: 'https://llm.stripe.com',
            headers: () => ({}),
          }
        );
        expect(m.modelId).toBe(modelId);
      });
    });
  });

  describe('supportedUrls', () => {
    it('should return empty object (no native URL support)', () => {
      expect(model.supportedUrls).toEqual({});
    });
  });

  describe('getHeaders', () => {
    it('should throw error when customer ID is not provided', () => {
      const modelWithoutCustomer = new StripeLanguageModel(
        'openai/gpt-5',
        {}, // No customer ID
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({
            Authorization: 'Bearer sk_test_123',
          }),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      expect(() => {
        // @ts-expect-error - Accessing private method for testing
        modelWithoutCustomer.getHeaders(options);
      }).toThrow('Stripe customer ID is required');
    });

    it('should use customer ID from settings', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const headers = model.getHeaders(options);

      expect(headers['X-Stripe-Customer-ID']).toBe('cus_test123');
    });

    it('should override customer ID from providerOptions', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        providerOptions: {
          stripe: {
            customerId: 'cus_override',
          },
        },
      };

      // @ts-expect-error - Accessing private method for testing
      const headers = model.getHeaders(options);

      expect(headers['X-Stripe-Customer-ID']).toBe('cus_override');
    });

    it('should merge custom headers', () => {
      const modelWithHeaders = new StripeLanguageModel(
        'openai/gpt-5',
        {
          customerId: 'cus_test',
          headers: {'X-Custom-Header': 'custom-value'},
        },
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({
            Authorization: 'Bearer sk_test_123',
          }),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
        providerOptions: {
          stripe: {
            headers: {'X-Runtime-Header': 'runtime-value'},
          },
        },
      };

      // @ts-expect-error - Accessing private method for testing
      const headers = modelWithHeaders.getHeaders(options);

      expect(headers['X-Custom-Header']).toBe('custom-value');
      expect(headers['X-Runtime-Header']).toBe('runtime-value');
      expect(headers['X-Stripe-Customer-ID']).toBe('cus_test');
    });
  });

  describe('getArgs', () => {
    it('should convert basic prompt to OpenAI format', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [
          {
            role: 'user',
            content: [{type: 'text', text: 'Hello'}],
          },
        ],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args, warnings} = model.getArgs(options);

      expect(args.model).toBe('openai/gpt-5');
      expect(args.messages).toHaveLength(1);
      expect(args.messages[0].role).toBe('user');
      expect(warnings).toEqual([]);
    });

    it('should include temperature setting', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        temperature: 0.7,
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = model.getArgs(options);

      expect(args.temperature).toBe(0.7);
    });

    it('should include max_tokens setting', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        maxOutputTokens: 100,
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = model.getArgs(options);

      expect(args.max_tokens).toBe(100);
    });

    it('should include stop sequences', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        stopSequences: ['\n', 'END'],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = model.getArgs(options);

      expect(args.stop).toEqual(['\n', 'END']);
    });

    it('should include topP, frequencyPenalty, and presencePenalty', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        topP: 0.9,
        frequencyPenalty: 0.5,
        presencePenalty: 0.3,
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = model.getArgs(options);

      expect(args.top_p).toBe(0.9);
      expect(args.frequency_penalty).toBe(0.5);
      expect(args.presence_penalty).toBe(0.3);
    });

    it('should include seed when provided', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        seed: 12345,
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = model.getArgs(options);

      expect(args.seed).toBe(12345);
    });
  });

  describe('tools support', () => {
    it('should throw error when tools are provided', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        tools: [
          {
            type: 'function',
            name: 'getWeather',
            description: 'Get weather for a location',
            inputSchema: {
              type: 'object',
              properties: {
                location: {type: 'string'},
              },
              required: ['location'],
            },
          },
        ],
      };

      expect(() => {
        // @ts-expect-error - Accessing private method for testing
        model.getArgs(options);
      }).toThrow('Tool calling is not supported by the Stripe AI SDK Provider');
    });

    it('should throw error when tool choice is provided with tools', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [],
        tools: [
          {
            type: 'function',
            name: 'test',
            inputSchema: {type: 'object', properties: {}},
          },
        ],
        toolChoice: {type: 'auto'},
      };

      expect(() => {
        // @ts-expect-error - Accessing private method for testing
        model.getArgs(options);
      }).toThrow('Tool calling is not supported by the Stripe AI SDK Provider');
    });

    it('should not throw error when no tools are provided', () => {
      const options: LanguageModelV2CallOptions = {
        prompt: [
          {
            role: 'user',
            content: [{type: 'text', text: 'Hello'}],
          },
        ],
      };

      expect(() => {
        // @ts-expect-error - Accessing private method for testing
        model.getArgs(options);
      }).not.toThrow();
    });
  });

  describe('error handling', () => {
    it('should handle missing customer ID gracefully', () => {
      const modelWithoutCustomer = new StripeLanguageModel(
        'openai/gpt-5',
        {},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({
            Authorization: 'Bearer sk_test_123',
          }),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
      };

      expect(() => {
        // @ts-expect-error - Accessing private method for testing
        modelWithoutCustomer.getHeaders(options);
      }).toThrow('Stripe customer ID is required');
    });
  });

  describe('anthropic max_tokens defaults', () => {
    it('should apply 64K default for Claude Sonnet 4 models', () => {
      const sonnetModel = new StripeLanguageModel(
        'anthropic/claude-sonnet-4',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = sonnetModel.getArgs(options);

      expect(args.max_tokens).toBe(64000);
    });

    it('should apply 32K default for Claude Opus 4 models', () => {
      const opusModel = new StripeLanguageModel(
        'anthropic/claude-opus-4',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = opusModel.getArgs(options);

      expect(args.max_tokens).toBe(32000);
    });

    it('should apply 8K default for Claude 3.5 Haiku', () => {
      const haikuModel = new StripeLanguageModel(
        'anthropic/claude-3-5-haiku',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = haikuModel.getArgs(options);

      expect(args.max_tokens).toBe(8192);
    });

    it('should apply 4K default for other Anthropic models', () => {
      const haikuModel = new StripeLanguageModel(
        'anthropic/claude-3-haiku',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = haikuModel.getArgs(options);

      expect(args.max_tokens).toBe(4096);
    });

    it('should not apply default for non-Anthropic models', () => {
      const openaiModel = new StripeLanguageModel(
        'openai/gpt-5',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = openaiModel.getArgs(options);

      expect(args.max_tokens).toBeUndefined();
    });

    it('should allow user-provided maxOutputTokens to override default', () => {
      const sonnetModel = new StripeLanguageModel(
        'anthropic/claude-sonnet-4',
        {customerId: 'cus_test'},
        {
          provider: 'stripe',
          baseURL: 'https://llm.stripe.com',
          headers: () => ({}),
        }
      );

      const options: LanguageModelV2CallOptions = {
        prompt: [],
        maxOutputTokens: 1000, // User override
      };

      // @ts-expect-error - Accessing private method for testing
      const {args} = sonnetModel.getArgs(options);

      expect(args.max_tokens).toBe(1000);
    });
  });

  describe('access denied error handling', () => {
    it('should throw StripeProviderAccessError for "Unrecognized request URL" errors', () => {
      // Create a mock error that looks like the access denied error
      const mockError = {
        statusCode: 400,
        responseBody: JSON.stringify({
          error: {
            type: 'invalid_request_error',
            message: 'Unrecognized request URL. Please see https://stripe.com/docs or we can help at https://support.stripe.com/.',
          },
        }),
        message: 'Bad Request',
      };

      // Access the private method for testing
      const isAccessDenied = (model as any).isAccessDeniedError(mockError);
      expect(isAccessDenied).toBe(true);

      // Test that handleApiError throws the correct error type
      try {
        (model as any).handleApiError(mockError);
        fail('Should have thrown an error');
      } catch (error) {
        expect(error).toBeInstanceOf(StripeProviderAccessError);
        expect((error as Error).message).toContain('Stripe AI SDK Provider Access Required');
        expect((error as Error).message).toContain('Private Preview');
        expect((error as Error).message).toContain('https://docs.stripe.com/billing/token-billing');
        expect((error as any).cause).toBe(mockError);
      }
    });

    it('should not throw StripeProviderAccessError for other 400 errors', () => {
      const mockError = {
        statusCode: 400,
        responseBody: JSON.stringify({
          error: {
            type: 'invalid_request_error',
            message: 'Some other error message',
          },
        }),
        message: 'Bad Request',
      };

      const isAccessDenied = (model as any).isAccessDeniedError(mockError);
      expect(isAccessDenied).toBe(false);

      // Should re-throw the original error
      try {
        (model as any).handleApiError(mockError);
        fail('Should have thrown an error');
      } catch (error) {
        expect(error).not.toBeInstanceOf(StripeProviderAccessError);
        expect(error).toBe(mockError);
      }
    });

    it('should handle errors with parsed responseBody', () => {
      const mockError = {
        statusCode: 400,
        responseBody: {
          error: {
            type: 'invalid_request_error',
            message: 'Unrecognized request URL. Please see https://stripe.com/docs',
          },
        },
        message: 'Bad Request',
      };

      const isAccessDenied = (model as any).isAccessDeniedError(mockError);
      expect(isAccessDenied).toBe(true);
    });

    it('should handle malformed responseBody gracefully', () => {
      const mockError = {
        statusCode: 400,
        responseBody: 'Not valid JSON {{{',
        message: 'Bad Request',
      };

      const isAccessDenied = (model as any).isAccessDeniedError(mockError);
      expect(isAccessDenied).toBe(false);
    });

    it('should not match non-400 errors', () => {
      const mockError = {
        statusCode: 500,
        responseBody: JSON.stringify({
          error: {
            type: 'invalid_request_error',
            message: 'Unrecognized request URL',
          },
        }),
        message: 'Internal Server Error',
      };

      const isAccessDenied = (model as any).isAccessDeniedError(mockError);
      expect(isAccessDenied).toBe(false);
    });
  });

  describe('streaming error conditions', () => {
    it('should handle errors mid-stream', async () => {
      // Mock postJsonToApi to return a stream that emits an error
      const mockStream = new ReadableStream({
        start(controller) {
          // First emit a successful chunk
          controller.enqueue({
            success: true,
            value: {
              choices: [
                {
                  delta: {content: 'Hello '},
                  finish_reason: null,
                },
              ],
            },
          });

          // Then emit an error chunk
          controller.enqueue({
            success: false,
            error: new Error('Stream error occurred'),
          });

          controller.close();
        },
      });

      // Mock the postJsonToApi function
      jest.mock('@ai-sdk/provider-utils', () => ({
        postJsonToApi: jest.fn().mockResolvedValue({value: mockStream}),
      }));

      const options: LanguageModelV2CallOptions = {
        prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
      };

      try {
        const result = await model.doStream(options);
        const parts: any[] = [];

        for await (const part of result.stream) {
          parts.push(part);
        }

        // Should have text-delta and error parts
        const textDeltas = parts.filter((p) => p.type === 'text-delta');
        const errors = parts.filter((p) => p.type === 'error');

        expect(textDeltas.length).toBeGreaterThan(0);
        expect(errors.length).toBeGreaterThan(0);
        expect(errors[0].error).toBeDefined();
      } catch (error) {
        // Alternatively, the stream might throw
        expect(error).toBeDefined();
      }
    });

    it('should handle abort signal during streaming', async () => {
      const abortController = new AbortController();

      const options: LanguageModelV2CallOptions = {
        prompt: [{role: 'user', content: [{type: 'text', text: 'Hi'}]}],
        abortSignal: abortController.signal,
      };

      // Abort immediately
      abortController.abort();

      // Should handle the aborted request gracefully
      // The actual API call should throw or return an error
      try {
        await model.doStream(options);
        // If it doesn't throw, that's also acceptable
      } catch (error: any) {
        // Expect an abort-related error
        expect(
          error.name === 'AbortError' ||
            error.message?.includes('abort') ||
            error.statusCode !== undefined
        ).toBe(true);
      }
    });
  });
});


```

--------------------------------------------------------------------------------
/llm/token-meter/tests/token-meter-openai.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for TokenMeter - OpenAI Provider
 */

import Stripe from 'stripe';
import {createTokenMeter} from '../token-meter';
import type {MeterConfig} from '../types';

// Mock Stripe
jest.mock('stripe');

describe('TokenMeter - OpenAI Provider', () => {
  let mockMeterEventsCreate: jest.Mock;
  let config: MeterConfig;
  const TEST_API_KEY = 'sk_test_mock_key';

  beforeEach(() => {
    jest.clearAllMocks();
    mockMeterEventsCreate = jest.fn().mockResolvedValue({});
    
    // Mock the Stripe constructor
    (Stripe as unknown as jest.Mock).mockImplementation(() => ({
      v2: {
        billing: {
          meterEvents: {
            create: mockMeterEventsCreate,
          },
        },
      },
    }));
    
    config = {};
  });

  describe('Chat Completions - Non-streaming', () => {
    it('should track usage from basic chat completion', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'chatcmpl-123',
        object: 'chat.completion',
        created: Date.now(),
        model: 'gpt-4o-mini',
        choices: [
          {
            index: 0,
            message: {
              role: 'assistant',
              content: 'Hello, World!',
            },
            finish_reason: 'stop',
          },
        ],
        usage: {
          prompt_tokens: 12,
          completion_tokens: 5,
          total_tokens: 17,
        },
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          event_name: 'token-billing-tokens',
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '12',
            model: 'openai/gpt-4o-mini',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '5',
            token_type: 'output',
          }),
        })
      );
    });

    it('should track usage from chat completion with tools', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'chatcmpl-123',
        object: 'chat.completion',
        created: Date.now(),
        model: 'gpt-4o',
        choices: [
          {
            index: 0,
            message: {
              role: 'assistant',
              content: null,
              tool_calls: [
                {
                  id: 'call_123',
                  type: 'function',
                  function: {
                    name: 'get_weather',
                    arguments: '{"location":"San Francisco"}',
                  },
                },
              ],
            },
            finish_reason: 'tool_calls',
          },
        ],
        usage: {
          prompt_tokens: 100,
          completion_tokens: 30,
          total_tokens: 130,
        },
      };

      meter.trackUsage(response as any, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '100',
            model: 'openai/gpt-4o',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '30',
            token_type: 'output',
          }),
        })
      );
    });

    it('should handle missing usage data', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'chatcmpl-123',
        object: 'chat.completion',
        created: Date.now(),
        model: 'gpt-4',
        choices: [
          {
            index: 0,
            message: {
              role: 'assistant',
              content: 'Hello!',
            },
            finish_reason: 'stop',
          },
        ],
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      // Should not create events with 0 tokens (code only sends when > 0)
      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(0);
    });

    it('should handle multi-turn conversations', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'chatcmpl-789',
        object: 'chat.completion',
        created: Date.now(),
        model: 'gpt-4',
        choices: [
          {
            index: 0,
            message: {
              role: 'assistant',
              content: 'The weather is sunny.',
            },
            finish_reason: 'stop',
          },
        ],
        usage: {
          prompt_tokens: 150, // Includes conversation history
          completion_tokens: 10,
          total_tokens: 160,
        },
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '150',
            model: 'openai/gpt-4',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '10',
            token_type: 'output',
          }),
        })
      );
    });
  });

  describe('Chat Completions - Streaming', () => {
    it('should track usage from basic streaming chat', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4o-mini',
          choices: [
            {
              index: 0,
              delta: {content: 'Hello'},
              finish_reason: null,
            },
          ],
        },
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4o-mini',
          choices: [
            {
              index: 0,
              delta: {content: ', World!'},
              finish_reason: 'stop',
            },
          ],
          usage: {
            prompt_tokens: 12,
            completion_tokens: 5,
            total_tokens: 17,
          },
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamOpenAI(mockStream as any, 'cus_123');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '12',
            model: 'openai/gpt-4o-mini',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '5',
            token_type: 'output',
          }),
        })
      );
    });

    it('should track usage from streaming chat with tools', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4o',
          choices: [
            {
              index: 0,
              delta: {
                tool_calls: [
                  {
                    index: 0,
                    id: 'call_123',
                    type: 'function',
                    function: {
                      name: 'get_weather',
                      arguments: '{"location":',
                    },
                  },
                ],
              },
              finish_reason: null,
            },
          ],
        },
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4o',
          choices: [
            {
              index: 0,
              delta: {},
              finish_reason: 'tool_calls',
            },
          ],
          usage: {
            prompt_tokens: 100,
            completion_tokens: 30,
            total_tokens: 130,
          },
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamOpenAI(mockStream as any, 'cus_456');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '100',
            model: 'openai/gpt-4o',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '30',
            token_type: 'output',
          }),
        })
      );
    });

    it('should properly tee the stream', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4',
          choices: [
            {
              index: 0,
              delta: {content: 'Hello'},
              finish_reason: null,
            },
          ],
        },
        {
          id: 'chatcmpl-123',
          object: 'chat.completion.chunk',
          created: Date.now(),
          model: 'gpt-4',
          choices: [
            {
              index: 0,
              delta: {content: ' world'},
              finish_reason: 'stop',
            },
          ],
          usage: {
            prompt_tokens: 10,
            completion_tokens: 5,
            total_tokens: 15,
          },
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamOpenAI(mockStream as any, 'cus_123');

      const receivedChunks: any[] = [];
      for await (const chunk of wrappedStream) {
        receivedChunks.push(chunk);
      }

      expect(receivedChunks).toHaveLength(2);
      expect(receivedChunks[0].choices[0].delta.content).toBe('Hello');
      expect(receivedChunks[1].choices[0].delta.content).toBe(' world');
    });
  });

  describe('Responses API - Non-streaming', () => {
    it('should track usage from basic responses API', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'resp_123',
        object: 'response',
        created: Date.now(),
        model: 'gpt-4o-mini',
        output: 'Hello, World!',
        usage: {
          input_tokens: 15,
          output_tokens: 8,
        },
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '15',
            model: 'openai/gpt-4o-mini',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '8',
            token_type: 'output',
          }),
        })
      );
    });

    it('should track usage from responses API parse', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        id: 'resp_456',
        object: 'response',
        created: Date.now(),
        model: 'gpt-4o',
        output: {parsed: {city: 'San Francisco', temperature: 72}},
        usage: {
          input_tokens: 50,
          output_tokens: 20,
        },
      };

      meter.trackUsage(response as any, 'cus_789');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_789',
            value: '50',
            model: 'openai/gpt-4o',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '20',
            token_type: 'output',
          }),
        })
      );
    });
  });

  describe('Responses API - Streaming', () => {
    it('should track usage from streaming responses API', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          type: 'response.output_text.delta',
          delta: 'Hello',
        },
        {
          type: 'response.output_text.delta',
          delta: ', World!',
        },
        {
          type: 'response.done',
          response: {
            id: 'resp_123',
            model: 'gpt-4o-mini',
            usage: {
              input_tokens: 15,
              output_tokens: 8,
            },
          },
        },
      ];

      const mockStream = createMockStreamWithTee(chunks);
      const wrappedStream = meter.trackUsageStreamOpenAI(mockStream as any, 'cus_123');

      for await (const _chunk of wrappedStream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '15',
            model: 'openai/gpt-4o-mini',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '8',
            token_type: 'output',
          }),
        })
      );
    });
  });

  describe('Embeddings', () => {
    it('should track usage from single text embedding', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        object: 'list',
        data: [
          {
            object: 'embedding',
            embedding: new Array(1536).fill(0.1),
            index: 0,
          },
        ],
        model: 'text-embedding-ada-002',
        usage: {
          prompt_tokens: 8,
          total_tokens: 8,
        },
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      // Embeddings only have input tokens, no output tokens
      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(1);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '8',
            model: 'openai/text-embedding-ada-002',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from batch embeddings', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        object: 'list',
        data: [
          {
            object: 'embedding',
            embedding: new Array(1536).fill(0.1),
            index: 0,
          },
          {
            object: 'embedding',
            embedding: new Array(1536).fill(0.2),
            index: 1,
          },
          {
            object: 'embedding',
            embedding: new Array(1536).fill(0.3),
            index: 2,
          },
        ],
        model: 'text-embedding-3-small',
        usage: {
          prompt_tokens: 24,
          total_tokens: 24,
        },
      };

      meter.trackUsage(response as any, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      // Embeddings only have input tokens, no output tokens
      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(1);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '24',
            model: 'openai/text-embedding-3-small',
            token_type: 'input',
          }),
        })
      );
    });

    it('should handle missing usage data in embeddings', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        object: 'list',
        data: [
          {
            object: 'embedding',
            embedding: new Array(1536).fill(0.1),
            index: 0,
          },
        ],
        model: 'text-embedding-ada-002',
      };

      meter.trackUsage(response as any, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      // Should not create events with 0 tokens
      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(0);
    });
  });
});

// Helper function to create mock streams with tee()
function createMockStreamWithTee(chunks: any[]) {
  return {
    tee() {
      const stream1 = {
        async *[Symbol.asyncIterator]() {
          for (const chunk of chunks) {
            yield chunk;
          }
        },
        tee() {
          const s1 = {
            async *[Symbol.asyncIterator]() {
              for (const chunk of chunks) {
                yield chunk;
              }
            },
          };
          const s2 = {
            async *[Symbol.asyncIterator]() {
              for (const chunk of chunks) {
                yield chunk;
              }
            },
          };
          return [s1, s2];
        },
      };
      const stream2 = {
        async *[Symbol.asyncIterator]() {
          for (const chunk of chunks) {
            yield chunk;
          }
        },
      };
      return [stream1, stream2];
    },
    async *[Symbol.asyncIterator]() {
      for (const chunk of chunks) {
        yield chunk;
      }
    },
  };
}


```

--------------------------------------------------------------------------------
/llm/token-meter/tests/token-meter-gemini.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for TokenMeter - Gemini Provider
 */

import Stripe from 'stripe';
import {createTokenMeter} from '../token-meter';
import type {MeterConfig} from '../types';

// Mock Stripe
jest.mock('stripe');

describe('TokenMeter - Gemini Provider', () => {
  let mockMeterEventsCreate: jest.Mock;
  let config: MeterConfig;
  const TEST_API_KEY = 'sk_test_mock_key';

  beforeEach(() => {
    jest.clearAllMocks();
    mockMeterEventsCreate = jest.fn().mockResolvedValue({});
    
    // Mock the Stripe constructor
    (Stripe as unknown as jest.Mock).mockImplementation(() => ({
      v2: {
        billing: {
          meterEvents: {
            create: mockMeterEventsCreate,
          },
        },
      },
    }));
    
    config = {};
  });

  describe('GenerateContent - Non-streaming', () => {
    it('should track usage from basic text generation', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Hello, World!',
          usageMetadata: {
            promptTokenCount: 12,
            candidatesTokenCount: 8,
            totalTokenCount: 20,
          },
          modelVersion: 'gemini-2.0-flash-exp',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '12',
            model: 'google/gemini-2.0-flash-exp',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage with reasoning tokens for extended thinking models', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Detailed response after thinking',
          usageMetadata: {
            promptTokenCount: 20,
            candidatesTokenCount: 15,
            thoughtsTokenCount: 50, // Reasoning/thinking tokens
            totalTokenCount: 85,
          },
          modelVersion: 'gemini-2.0-flash-thinking-exp',
        },
      };

      meter.trackUsage(response, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '20',
            model: 'google/gemini-2.0-flash-thinking-exp',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '65', // 15 candidates + 50 thoughts
            token_type: 'output',
          }),
        })
      );
    });

    it('should track usage from generation with function calling', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => '',
          functionCalls: () => [
            {
              name: 'get_weather',
              args: {location: 'San Francisco'},
            },
          ],
          usageMetadata: {
            promptTokenCount: 100,
            candidatesTokenCount: 30,
            totalTokenCount: 130,
          },
          modelVersion: 'gemini-1.5-pro',
        },
      };

      meter.trackUsage(response, 'cus_789');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '100',
            model: 'google/gemini-1.5-pro',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage with system instructions', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'I am following the system instructions.',
          usageMetadata: {
            promptTokenCount: 50, // Includes system instruction tokens
            candidatesTokenCount: 12,
            totalTokenCount: 62,
          },
          modelVersion: 'gemini-2.5-flash',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '50',
            model: 'google/gemini-2.5-flash',
            token_type: 'input',
          }),
        })
      );
    });

    it('should use default model name when modelVersion is missing', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Hello',
          usageMetadata: {
            promptTokenCount: 5,
            candidatesTokenCount: 3,
            totalTokenCount: 8,
          },
        },
      };

      meter.trackUsage(response, 'cus_999');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '5',
            model: 'google/gemini',
            token_type: 'input',
          }),
        })
      );
    });
  });

  describe('GenerateContent - Streaming', () => {
    it('should require model name parameter', () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            yield {
              text: () => 'Hello',
              usageMetadata: {
                promptTokenCount: 10,
                candidatesTokenCount: 5,
                totalTokenCount: 15,
              },
            };
          },
        },
        response: Promise.resolve({
          text: () => 'Hello',
          modelVersion: 'gemini-1.5-pro',
        }),
      };

      // TypeScript will enforce model name parameter at compile time
      // @ts-expect-error - Testing that TypeScript requires model name
      meter.trackUsageStreamGemini(mockGeminiStream, 'cus_123');
    });

    it('should track usage from basic streaming generation', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          text: () => 'Hello',
          usageMetadata: null,
        },
        {
          text: () => ', World!',
          usageMetadata: {
            promptTokenCount: 12,
            candidatesTokenCount: 8,
            totalTokenCount: 20,
          },
        },
      ];

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            for (const chunk of chunks) {
              yield chunk;
            }
          },
        },
        response: Promise.resolve({
          text: () => 'Hello, World!',
          modelVersion: 'gemini-2.0-flash-exp',
        }),
      };

      const wrappedStream = meter.trackUsageStreamGemini(
        mockGeminiStream,
        'cus_123',
        'gemini-2.0-flash-exp'
      );

      for await (const _chunk of wrappedStream.stream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_123',
            value: '12',
            model: 'google/gemini-2.0-flash-exp',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from streaming with reasoning tokens', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          text: () => 'Thinking...',
          usageMetadata: null,
        },
        {
          text: () => 'After consideration, here is my answer.',
          usageMetadata: {
            promptTokenCount: 20,
            candidatesTokenCount: 15,
            thoughtsTokenCount: 50,
            totalTokenCount: 85,
          },
        },
      ];

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            for (const chunk of chunks) {
              yield chunk;
            }
          },
        },
        response: Promise.resolve({
          text: () => 'Complete response',
          modelVersion: 'gemini-2.0-flash-thinking-exp',
        }),
      };

      const wrappedStream = meter.trackUsageStreamGemini(
        mockGeminiStream,
        'cus_456',
        'gemini-2.0-flash-thinking-exp'
      );

      for await (const _chunk of wrappedStream.stream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            stripe_customer_id: 'cus_456',
            value: '20',
            model: 'google/gemini-2.0-flash-thinking-exp',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '65', // 15 candidates + 50 thoughts
            token_type: 'output',
          }),
        })
      );
    });

    it('should preserve the response promise in wrapped stream', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            yield {
              text: () => 'Hello',
              usageMetadata: {
                promptTokenCount: 10,
                candidatesTokenCount: 5,
                totalTokenCount: 15,
              },
            };
          },
        },
        response: Promise.resolve({
          text: () => 'Hello',
          modelVersion: 'gemini-1.5-pro',
        }),
      };

      const wrappedStream = meter.trackUsageStreamGemini(
        mockGeminiStream,
        'cus_123',
        'gemini-1.5-pro'
      );

      expect(wrappedStream).toHaveProperty('stream');
      expect(wrappedStream).toHaveProperty('response');

      const response = await wrappedStream.response;
      expect(response.text()).toBe('Hello');
    });

    it('should properly wrap the stream generator', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {text: () => 'First', usageMetadata: null},
        {text: () => ' Second', usageMetadata: null},
        {
          text: () => ' Third',
          usageMetadata: {
            promptTokenCount: 20,
            candidatesTokenCount: 15,
            totalTokenCount: 35,
          },
        },
      ];

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            for (const chunk of chunks) {
              yield chunk;
            }
          },
        },
        response: Promise.resolve({
          text: () => 'First Second Third',
          modelVersion: 'gemini-2.0-flash-exp',
        }),
      };

      const wrappedStream = meter.trackUsageStreamGemini(
        mockGeminiStream,
        'cus_123',
        'gemini-2.0-flash-exp'
      );

      const receivedChunks: any[] = [];
      for await (const chunk of wrappedStream.stream) {
        receivedChunks.push(chunk);
      }

      expect(receivedChunks).toHaveLength(3);
      expect(receivedChunks[0].text()).toBe('First');
      expect(receivedChunks[1].text()).toBe(' Second');
      expect(receivedChunks[2].text()).toBe(' Third');
    });
  });

  describe('Multi-turn Chat (ChatSession)', () => {
    it('should track usage from chat session message', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      // ChatSession.sendMessage() returns the same structure as generateContent
      const response = {
        response: {
          text: () => 'This is my second response.',
          usageMetadata: {
            promptTokenCount: 80, // Includes conversation history
            candidatesTokenCount: 12,
            totalTokenCount: 92,
          },
          modelVersion: 'gemini-2.5-flash',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '80',
            model: 'google/gemini-2.5-flash',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from streaming chat session', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const chunks = [
        {
          text: () => 'Continuing',
          usageMetadata: null,
        },
        {
          text: () => ' our conversation.',
          usageMetadata: {
            promptTokenCount: 100, // Includes full conversation context
            candidatesTokenCount: 10,
            totalTokenCount: 110,
          },
        },
      ];

      const mockGeminiStream = {
        stream: {
          async *[Symbol.asyncIterator]() {
            for (const chunk of chunks) {
              yield chunk;
            }
          },
        },
        response: Promise.resolve({
          text: () => 'Continuing our conversation.',
          modelVersion: 'gemini-1.5-pro',
        }),
      };

      const wrappedStream = meter.trackUsageStreamGemini(
        mockGeminiStream,
        'cus_456',
        'gemini-1.5-pro'
      );

      for await (const _chunk of wrappedStream.stream) {
        // Consume stream
      }

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '100',
            model: 'google/gemini-1.5-pro',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track usage from long conversation with history', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Based on our previous discussion...',
          usageMetadata: {
            promptTokenCount: 500, // Large context from history
            candidatesTokenCount: 25,
            totalTokenCount: 525,
          },
          modelVersion: 'gemini-1.5-pro',
        },
      };

      meter.trackUsage(response, 'cus_789');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '500',
            model: 'google/gemini-1.5-pro',
            token_type: 'input',
          }),
        })
      );
    });
  });

  describe('Model Variants', () => {
    it('should track gemini-1.5-pro', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Pro model response',
          usageMetadata: {
            promptTokenCount: 15,
            candidatesTokenCount: 10,
            totalTokenCount: 25,
          },
          modelVersion: 'gemini-1.5-pro',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '15',
            model: 'google/gemini-1.5-pro',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track gemini-2.5-flash', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Flash model response',
          usageMetadata: {
            promptTokenCount: 12,
            candidatesTokenCount: 8,
            totalTokenCount: 20,
          },
          modelVersion: 'gemini-2.5-flash',
        },
      };

      meter.trackUsage(response, 'cus_456');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '12',
            model: 'google/gemini-2.5-flash',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track gemini-2.0-flash-exp', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Gemini 2.0 response',
          usageMetadata: {
            promptTokenCount: 10,
            candidatesTokenCount: 5,
            totalTokenCount: 15,
          },
          modelVersion: 'gemini-2.0-flash-exp',
        },
      };

      meter.trackUsage(response, 'cus_789');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '10',
            model: 'google/gemini-2.0-flash-exp',
            token_type: 'input',
          }),
        })
      );
    });

    it('should track gemini-2.0-flash-thinking-exp with reasoning tokens', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Thought-through response',
          usageMetadata: {
            promptTokenCount: 25,
            candidatesTokenCount: 20,
            thoughtsTokenCount: 100, // Extended thinking
            totalTokenCount: 145,
          },
          modelVersion: 'gemini-2.0-flash-thinking-exp',
        },
      };

      meter.trackUsage(response, 'cus_999');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '25',
            model: 'google/gemini-2.0-flash-thinking-exp',
            token_type: 'input',
          }),
        })
      );
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '120', // 20 + 100
            token_type: 'output',
          }),
        })
      );
    });
  });

  describe('Edge Cases', () => {
    it('should handle zero reasoning tokens gracefully', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'No reasoning tokens',
          usageMetadata: {
            promptTokenCount: 10,
            candidatesTokenCount: 5,
            thoughtsTokenCount: 0,
            totalTokenCount: 15,
          },
          modelVersion: 'gemini-2.0-flash-thinking-exp',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '10',
            model: 'google/gemini-2.0-flash-thinking-exp',
            token_type: 'input',
          }),
        })
      );
    });

    it('should handle missing thoughtsTokenCount field', async () => {
      const meter = createTokenMeter(TEST_API_KEY, config);

      const response = {
        response: {
          text: () => 'Standard model without thoughts',
          usageMetadata: {
            promptTokenCount: 10,
            candidatesTokenCount: 5,
            totalTokenCount: 15,
            // No thoughtsTokenCount field
          },
          modelVersion: 'gemini-1.5-pro',
        },
      };

      meter.trackUsage(response, 'cus_123');

      // Wait for fire-and-forget logging to complete
      await new Promise(resolve => setImmediate(resolve));

      expect(mockMeterEventsCreate).toHaveBeenCalledTimes(2);
      expect(mockMeterEventsCreate).toHaveBeenCalledWith(
        expect.objectContaining({
          payload: expect.objectContaining({
            value: '10',
            model: 'google/gemini-1.5-pro',
            token_type: 'input',
          }),
        })
      );
    });
  });
});


```

--------------------------------------------------------------------------------
/tools/python/tests/test_functions.py:
--------------------------------------------------------------------------------

```python
import unittest
import stripe
from unittest import mock
from stripe_agent_toolkit.functions import (
    create_customer,
    list_customers,
    create_product,
    list_products,
    create_price,
    list_prices,
    create_payment_link,
    list_invoices,
    create_invoice,
    create_invoice_item,
    finalize_invoice,
    retrieve_balance,
    create_refund,
    list_payment_intents,
    create_billing_portal_session,
)


class TestStripeFunctions(unittest.TestCase):
    def test_create_customer(self):
        with mock.patch("stripe.Customer.create") as mock_function:
            mock_customer = {"id": "cus_123"}
            mock_function.return_value = stripe.Customer.construct_from(
                mock_customer, "sk_test_123"
            )

            result = create_customer(
                context={}, name="Test User", email="[email protected]"
            )

            mock_function.assert_called_with(
                name="Test User", email="[email protected]"
            )

            self.assertEqual(result, {"id": mock_customer["id"]})

    def test_create_customer_with_context(self):
        with mock.patch("stripe.Customer.create") as mock_function:
            mock_customer = {"id": "cus_123"}
            mock_function.return_value = stripe.Customer.construct_from(
                mock_customer, "sk_test_123"
            )

            result = create_customer(
                context={"account": "acct_123"},
                name="Test User",
                email="[email protected]",
            )

            mock_function.assert_called_with(
                name="Test User",
                email="[email protected]",
                stripe_account="acct_123",
            )

            self.assertEqual(result, {"id": mock_customer["id"]})

    def test_list_customers(self):
        with mock.patch("stripe.Customer.list") as mock_function:
            mock_customers = [{"id": "cus_123"}, {"id": "cus_456"}]

            mock_function.return_value = stripe.ListObject.construct_from(
                {
                    "object": "list",
                    "data": [
                        stripe.Customer.construct_from(
                            {
                                "id": "cus_123",
                                "email": "[email protected]",
                                "name": "Customer One",
                            },
                            "sk_test_123",
                        ),
                        stripe.Customer.construct_from(
                            {
                                "id": "cus_456",
                                "email": "[email protected]",
                                "name": "Customer Two",
                            },
                            "sk_test_123",
                        ),
                    ],
                    "has_more": False,
                    "url": "/v1/customers",
                },
                "sk_test_123",
            )

            result = list_customers(context={})

            mock_function.assert_called_with()

            self.assertEqual(result, mock_customers)

    def test_list_customers_with_context(self):
        with mock.patch("stripe.Customer.list") as mock_function:
            mock_customers = [{"id": "cus_123"}, {"id": "cus_456"}]

            mock_function.return_value = stripe.ListObject.construct_from(
                {
                    "object": "list",
                    "data": [
                        stripe.Customer.construct_from(
                            {
                                "id": "cus_123",
                                "email": "[email protected]",
                                "name": "Customer One",
                            },
                            "sk_test_123",
                        ),
                        stripe.Customer.construct_from(
                            {
                                "id": "cus_456",
                                "email": "[email protected]",
                                "name": "Customer Two",
                            },
                            "sk_test_123",
                        ),
                    ],
                    "has_more": False,
                    "url": "/v1/customers",
                },
                "sk_test_123",
            )

            result = list_customers(context={"account": "acct_123"})

            mock_function.assert_called_with(
                stripe_account="acct_123",
            )

            self.assertEqual(result, mock_customers)

    def test_create_product(self):
        with mock.patch("stripe.Product.create") as mock_function:
            mock_product = {"id": "prod_123"}
            mock_function.return_value = stripe.Product.construct_from(
                mock_product, "sk_test_123"
            )

            result = create_product(context={}, name="Test Product")

            mock_function.assert_called_with(
                name="Test Product",
            )

            self.assertEqual(result, {"id": mock_product["id"]})

    def test_create_product_with_context(self):
        with mock.patch("stripe.Product.create") as mock_function:
            mock_product = {"id": "prod_123"}
            mock_function.return_value = stripe.Product.construct_from(
                mock_product, "sk_test_123"
            )

            result = create_product(
                context={"account": "acct_123"}, name="Test Product"
            )

            mock_function.assert_called_with(
                name="Test Product", stripe_account="acct_123"
            )

            self.assertEqual(result, {"id": mock_product["id"]})

    def test_list_products(self):
        with mock.patch("stripe.Product.list") as mock_function:
            mock_products = [
                {"id": "prod_123", "name": "Product One"},
                {"id": "prod_456", "name": "Product Two"},
            ]

            mock_function.return_value = stripe.ListObject.construct_from(
                {
                    "object": "list",
                    "data": [
                        stripe.Product.construct_from(
                            {
                                "id": "prod_123",
                                "name": "Product One",
                            },
                            "sk_test_123",
                        ),
                        stripe.Product.construct_from(
                            {
                                "id": "prod_456",
                                "name": "Product Two",
                            },
                            "sk_test_123",
                        ),
                    ],
                    "has_more": False,
                    "url": "/v1/products",
                },
                "sk_test_123",
            )

            result = list_products(context={})

            mock_function.assert_called_with()

            self.assertEqual(result, mock_products)

    def test_create_price(self):
        with mock.patch("stripe.Price.create") as mock_function:
            mock_price = {"id": "price_123"}
            mock_function.return_value = stripe.Price.construct_from(
                mock_price, "sk_test_123"
            )

            result = create_price(
                context={},
                product="prod_123",
                currency="usd",
                unit_amount=1000,
            )

            mock_function.assert_called_with(
                product="prod_123",
                currency="usd",
                unit_amount=1000,
            )

            self.assertEqual(result, {"id": mock_price["id"]})

    def test_create_price_with_context(self):
        with mock.patch("stripe.Price.create") as mock_function:
            mock_price = {"id": "price_123"}
            mock_function.return_value = stripe.Price.construct_from(
                mock_price, "sk_test_123"
            )

            result = create_price(
                context={"account": "acct_123"},
                product="prod_123",
                currency="usd",
                unit_amount=1000,
            )

            mock_function.assert_called_with(
                product="prod_123",
                currency="usd",
                unit_amount=1000,
                stripe_account="acct_123",
            )

            self.assertEqual(result, {"id": mock_price["id"]})

    def test_list_prices(self):
        with mock.patch("stripe.Price.list") as mock_function:
            mock_prices = [
                {"id": "price_123", "product": "prod_123"},
                {"id": "price_456", "product": "prod_456"},
            ]

            mock_function.return_value = stripe.ListObject.construct_from(
                {
                    "object": "list",
                    "data": [
                        stripe.Price.construct_from(
                            {
                                "id": "price_123",
                                "product": "prod_123",
                            },
                            "sk_test_123",
                        ),
                        stripe.Price.construct_from(
                            {
                                "id": "price_456",
                                "product": "prod_456",
                            },
                            "sk_test_123",
                        ),
                    ],
                    "has_more": False,
                    "url": "/v1/prices",
                },
                "sk_test_123",
            )

            result = list_prices({})

            mock_function.assert_called_with()

            self.assertEqual(result, mock_prices)

    def test_list_prices_with_context(self):
        with mock.patch("stripe.Price.list") as mock_function:
            mock_prices = [
                {"id": "price_123", "product": "prod_123"},
                {"id": "price_456", "product": "prod_456"},
            ]

            mock_function.return_value = stripe.ListObject.construct_from(
                {
                    "object": "list",
                    "data": [
                        stripe.Price.construct_from(
                            {
                                "id": "price_123",
                                "product": "prod_123",
                            },
                            "sk_test_123",
                        ),
                        stripe.Price.construct_from(
                            {
                                "id": "price_456",
                                "product": "prod_456",
                            },
                            "sk_test_123",
                        ),
                    ],
                    "has_more": False,
                    "url": "/v1/prices",
                },
                "sk_test_123",
            )

            result = list_prices({"account": "acct_123"})

            mock_function.assert_called_with(stripe_account="acct_123")

            self.assertEqual(result, mock_prices)

    def test_create_payment_link(self):
        with mock.patch("stripe.PaymentLink.create") as mock_function:
            mock_payment_link = {"id": "pl_123", "url": "https://example.com"}
            mock_function.return_value = stripe.PaymentLink.construct_from(
                mock_payment_link, "sk_test_123"
            )

            result = create_payment_link(
                context={}, price="price_123", quantity=1
            )

            mock_function.assert_called_with(
                line_items=[{"price": "price_123", "quantity": 1}],
            )

            self.assertEqual(result, mock_payment_link)

    def test_create_payment_link_with_redirect_url(self):
        with mock.patch("stripe.PaymentLink.create") as mock_function:
            mock_payment_link = {"id": "pl_123", "url": "https://example.com"}
            mock_function.return_value = stripe.PaymentLink.construct_from(
                mock_payment_link, "sk_test_123"
            )

            result = create_payment_link(
                context={}, price="price_123", quantity=1, redirect_url="https://example.com"
            )

            mock_function.assert_called_with(
                line_items=[{"price": "price_123", "quantity": 1, }],
                after_completion={"type": "redirect", "redirect": {"url": "https://example.com"}}
            )

            self.assertEqual(result, mock_payment_link)

    def test_create_payment_link_with_context(self):
        with mock.patch("stripe.PaymentLink.create") as mock_function:
            mock_payment_link = {"id": "pl_123", "url": "https://example.com"}
            mock_function.return_value = stripe.PaymentLink.construct_from(
                mock_payment_link, "sk_test_123"
            )

            result = create_payment_link(
                context={"account": "acct_123"}, price="price_123", quantity=1
            )

            mock_function.assert_called_with(
                line_items=[{"price": "price_123", "quantity": 1}],
                stripe_account="acct_123",
            )

            self.assertEqual(result, mock_payment_link)

    def test_list_invoices(self):
        with mock.patch("stripe.Invoice.list") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }
            mock_invoices = {
                "object": "list",
                "data": [
                    stripe.Invoice.construct_from(
                        mock_invoice,
                        "sk_test_123",
                    ),
                ],
                "has_more": False,
                "url": "/v1/invoices",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoices, "sk_test_123"
            )

            result = list_invoices(context={})

            mock_function.assert_called_with()

            self.assertEqual(
                result,
                [
                    {
                        "id": mock_invoice["id"],
                        "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                        "customer": mock_invoice["customer"],
                        "status": mock_invoice["status"],
                    }
                ],
            )

    def test_list_invoices_with_customer(self):
        with mock.patch("stripe.Invoice.list") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }
            mock_invoices = {
                "object": "list",
                "data": [
                    stripe.Invoice.construct_from(
                        mock_invoice,
                        "sk_test_123",
                    ),
                ],
                "has_more": False,
                "url": "/v1/invoices",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoices, "sk_test_123"
            )

            result = list_invoices(context={}, customer="cus_123")

            mock_function.assert_called_with(
                customer="cus_123",
            )

            self.assertEqual(
                result,
                [
                    {
                        "id": mock_invoice["id"],
                        "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                        "customer": mock_invoice["customer"],
                        "status": mock_invoice["status"],
                    }
                ],
            )

    def test_list_invoices_with_customer_and_limit(self):
        with mock.patch("stripe.Invoice.list") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }
            mock_invoices = {
                "object": "list",
                "data": [
                    stripe.Invoice.construct_from(
                        mock_invoice,
                        "sk_test_123",
                    ),
                ],
                "has_more": False,
                "url": "/v1/invoices",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoices, "sk_test_123"
            )

            result = list_invoices(context={}, customer="cus_123", limit=100)

            mock_function.assert_called_with(
                customer="cus_123",
                limit=100,
            )

            self.assertEqual(
                result,
                [
                    {
                        "id": mock_invoice["id"],
                        "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                        "customer": mock_invoice["customer"],
                        "status": mock_invoice["status"],
                    }
                ],
            )

    def test_list_invoices_with_context(self):
        with mock.patch("stripe.Invoice.list") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }
            mock_invoices = {
                "object": "list",
                "data": [
                    stripe.Invoice.construct_from(
                        mock_invoice,
                        "sk_test_123",
                    ),
                ],
                "has_more": False,
                "url": "/v1/invoices",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoices, "sk_test_123"
            )

            result = list_invoices(context={"account": "acct_123"}, customer="cus_123")

            mock_function.assert_called_with(
                customer="cus_123",
                stripe_account="acct_123",
            )

            self.assertEqual(
                result,
                [
                    {
                        "id": mock_invoice["id"],
                        "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                        "customer": mock_invoice["customer"],
                        "status": mock_invoice["status"],
                    }
                ],
            )

    def test_create_invoice(self):
        with mock.patch("stripe.Invoice.create") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoice, "sk_test_123"
            )

            result = create_invoice(context={}, customer="cus_123")

            mock_function.assert_called_with(
                customer="cus_123",
                collection_method="send_invoice",
                days_until_due=30,
            )

            self.assertEqual(
                result,
                {
                    "id": mock_invoice["id"],
                    "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                    "customer": mock_invoice["customer"],
                    "status": mock_invoice["status"],
                },
            )

    def test_create_invoice_with_context(self):
        with mock.patch("stripe.Invoice.create") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoice, "sk_test_123"
            )

            result = create_invoice(
                context={"account": "acct_123"}, customer="cus_123"
            )

            mock_function.assert_called_with(
                customer="cus_123",
                collection_method="send_invoice",
                days_until_due=30,
                stripe_account="acct_123",
            )

            self.assertEqual(
                result,
                {
                    "id": mock_invoice["id"],
                    "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                    "customer": mock_invoice["customer"],
                    "status": mock_invoice["status"],
                },
            )

    def test_create_invoice_item(self):
        with mock.patch("stripe.InvoiceItem.create") as mock_function:
            mock_invoice_item = {"id": "ii_123", "invoice": "in_123"}
            mock_function.return_value = stripe.InvoiceItem.construct_from(
                mock_invoice_item, "sk_test_123"
            )

            result = create_invoice_item(
                context={},
                customer="cus_123",
                price="price_123",
                invoice="in_123",
            )

            mock_function.assert_called_with(
                customer="cus_123", price="price_123", invoice="in_123"
            )

            self.assertEqual(
                result,
                {
                    "id": mock_invoice_item["id"],
                    "invoice": mock_invoice_item["invoice"],
                },
            )

    def test_create_invoice_item_with_context(self):
        with mock.patch("stripe.InvoiceItem.create") as mock_function:
            mock_invoice_item = {"id": "ii_123", "invoice": "in_123"}
            mock_function.return_value = stripe.InvoiceItem.construct_from(
                mock_invoice_item, "sk_test_123"
            )

            result = create_invoice_item(
                context={"account": "acct_123"},
                customer="cus_123",
                price="price_123",
                invoice="in_123",
            )

            mock_function.assert_called_with(
                customer="cus_123",
                price="price_123",
                invoice="in_123",
                stripe_account="acct_123",
            )

            self.assertEqual(
                result,
                {
                    "id": mock_invoice_item["id"],
                    "invoice": mock_invoice_item["invoice"],
                },
            )

    def test_finalize_invoice(self):
        with mock.patch("stripe.Invoice.finalize_invoice") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoice, "sk_test_123"
            )

            result = finalize_invoice(context={}, invoice="in_123")

            mock_function.assert_called_with(invoice="in_123")

            self.assertEqual(
                result,
                {
                    "id": mock_invoice["id"],
                    "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                    "customer": mock_invoice["customer"],
                    "status": mock_invoice["status"],
                },
            )

    def test_finalize_invoice_with_context(self):
        with mock.patch("stripe.Invoice.finalize_invoice") as mock_function:
            mock_invoice = {
                "id": "in_123",
                "hosted_invoice_url": "https://example.com",
                "customer": "cus_123",
                "status": "open",
            }

            mock_function.return_value = stripe.Invoice.construct_from(
                mock_invoice, "sk_test_123"
            )

            result = finalize_invoice(
                context={"account": "acct_123"}, invoice="in_123"
            )

            mock_function.assert_called_with(
                invoice="in_123", stripe_account="acct_123"
            )

            self.assertEqual(
                result,
                {
                    "id": mock_invoice["id"],
                    "hosted_invoice_url": mock_invoice["hosted_invoice_url"],
                    "customer": mock_invoice["customer"],
                    "status": mock_invoice["status"],
                },
            )

    def test_retrieve_balance(self):
        with mock.patch("stripe.Balance.retrieve") as mock_function:
            mock_balance = {"available": [{"amount": 1000, "currency": "usd"}]}

            mock_function.return_value = stripe.Balance.construct_from(
                mock_balance, "sk_test_123"
            )

            result = retrieve_balance(context={})

            mock_function.assert_called_with()

            self.assertEqual(result, mock_balance)

    def test_retrieve_balance_with_context(self):
        with mock.patch("stripe.Balance.retrieve") as mock_function:
            mock_balance = {"available": [{"amount": 1000, "currency": "usd"}]}

            mock_function.return_value = stripe.Balance.construct_from(
                mock_balance, "sk_test_123"
            )

            result = retrieve_balance(context={"account": "acct_123"})

            mock_function.assert_called_with(stripe_account="acct_123")

            self.assertEqual(result, mock_balance)

    def test_create_refund(self):
        with mock.patch("stripe.Refund.create") as mock_function:
            mock_refund = {"id": "re_123"}
            mock_function.return_value = stripe.Refund.construct_from(
                mock_refund, "sk_test_123"
            )

            result = create_refund(context={}, payment_intent="pi_123")

            mock_function.assert_called_with(payment_intent="pi_123")

            self.assertEqual(result, {"id": mock_refund["id"]})

    def test_create_partial_refund(self):
        with mock.patch("stripe.Refund.create") as mock_function:
            mock_refund = {"id": "re_123"}
            mock_function.return_value = stripe.Refund.construct_from(
                mock_refund, "sk_test_123"
            )

            result = create_refund(
                context={}, payment_intent="pi_123", amount=1000
            )

            mock_function.assert_called_with(
                payment_intent="pi_123", amount=1000
            )

            self.assertEqual(result, {"id": mock_refund["id"]})

    def test_create_refund_with_context(self):
        with mock.patch("stripe.Refund.create") as mock_function:
            mock_refund = {"id": "re_123"}
            mock_function.return_value = stripe.Refund.construct_from(
                mock_refund, "sk_test_123"
            )

            result = create_refund(
                context={"account": "acct_123"},
                payment_intent="pi_123",
                amount=1000,
            )

            mock_function.assert_called_with(
                payment_intent="pi_123", amount=1000, stripe_account="acct_123"
            )

            self.assertEqual(result, {"id": mock_refund["id"]})

    def test_list_payment_intents(self):
        with mock.patch("stripe.PaymentIntent.list") as mock_function:
            mock_payment_intents = [{"id": "pi_123"}, {"id": "pi_456"}]
            mock_function.return_value = stripe.ListObject.construct_from(
                {"data": mock_payment_intents}, "sk_test_123"
            )

            result = list_payment_intents(context={})

            mock_function.assert_called_with()

            self.assertEqual(result, mock_payment_intents)

    def test_list_payment_intents_with_context(self):
        with mock.patch("stripe.PaymentIntent.list") as mock_function:
            mock_payment_intents = [{"id": "pi_123"}, {"id": "pi_456"}]
            mock_function.return_value = stripe.ListObject.construct_from(
                {"data": mock_payment_intents}, "sk_test_123"
            )

            result = list_payment_intents(context={"account": "acct_123"})

            mock_function.assert_called_with(stripe_account="acct_123")

            self.assertEqual(result, mock_payment_intents)


    def test_create_billing_portal_session(self):
        with mock.patch("stripe.billing_portal.Session.create") as mock_function:
            mock_billing_portal_session = {
                "id": "bps_123",
                "url": "https://example.com",
                "customer": "cus_123",
                "configuration": "bpc_123",
            }
            mock_function.return_value = stripe.billing_portal.Session.construct_from(
                mock_billing_portal_session, "sk_test_123"
            )

            result = create_billing_portal_session(context={}, customer="cus_123")

            mock_function.assert_called_with(customer="cus_123")

            self.assertEqual(result, {
                "id": mock_billing_portal_session["id"],
                "url": mock_billing_portal_session["url"],
                "customer": mock_billing_portal_session["customer"],
            })

    def test_create_billing_portal_session_with_return_url(self):
        with mock.patch("stripe.billing_portal.Session.create") as mock_function:
            mock_billing_portal_session = {
                "id": "bps_123",
                "url": "https://example.com",
                "customer": "cus_123",
                "configuration": "bpc_123",
            }
            mock_function.return_value = stripe.billing_portal.Session.construct_from(
                mock_billing_portal_session, "sk_test_123"
            )

            result = create_billing_portal_session(
                context={},
                customer="cus_123",
                return_url="http://example.com"
            )

            mock_function.assert_called_with(
                customer="cus_123",
                return_url="http://example.com",
            )

            self.assertEqual(result, {
                "id": mock_billing_portal_session["id"],
                "url": mock_billing_portal_session["url"],
                "customer": mock_billing_portal_session["customer"],
            })

    def test_create_billing_portal_session_with_context(self):
        with mock.patch("stripe.billing_portal.Session.create") as mock_function:
            mock_billing_portal_session = {
                "id": "bps_123",
                "url": "https://example.com",
                "customer": "cus_123",
                "configuration": "bpc_123",
            }
            mock_function.return_value = stripe.billing_portal.Session.construct_from(
                mock_billing_portal_session, "sk_test_123"
            )

            result = create_billing_portal_session(
                context={"account": "acct_123"},
                customer="cus_123",
                return_url="http://example.com"
            )

            mock_function.assert_called_with(
                customer="cus_123",
                return_url="http://example.com",
                stripe_account="acct_123"
            )

            self.assertEqual(result, {
                "id": mock_billing_portal_session["id"],
                "url": mock_billing_portal_session["url"],
                "customer": mock_billing_portal_session["customer"],
            })

if __name__ == "__main__":
    unittest.main()

```
Page 4/5FirstPrevNextLast