diff --git a/config-schema.json b/config-schema.json index 336ba24d..1eafef7b 100644 --- a/config-schema.json +++ b/config-schema.json @@ -655,14 +655,20 @@ }, "tokenLimit": { "type": "string", - "description": "Token limit for quota warnings (e.g., 500000 or \"max\")", - "markdownDescription": "Token limit for quota warnings (e.g., 500000 or \"max\")" + "description": "Token limit for quota warnings (number: e.g. 50000, \"max\", \"avg\", or \"median\")", + "markdownDescription": "Token limit for quota warnings (number: e.g. 50000, \"max\", \"avg\", or \"median\")" }, "sessionLength": { "type": "number", "description": "Session block duration in hours (default: 5)", "markdownDescription": "Session block duration in hours (default: 5)", "default": 5 + }, + "tokenLimitSessions": { + "type": "number", + "description": "Number of recent completed sessions to use for token limit calculation (default: 10)", + "markdownDescription": "Number of recent completed sessions to use for token limit calculation (default: 10)", + "default": 10 } }, "additionalProperties": false diff --git a/docs/guide/blocks-reports.md b/docs/guide/blocks-reports.md index a9ae603e..22492062 100644 --- a/docs/guide/blocks-reports.md +++ b/docs/guide/blocks-reports.md @@ -83,16 +83,21 @@ Perfect for understanding recent usage patterns without scrolling through all hi ### Token Limit Tracking -Set token limits to monitor quota usage: +Set token limits to monitor quota usage with flexible calculation methods: ```bash # Set explicit token limit ccusage blocks --token-limit 500000 -# Use highest previous block as limit -ccusage blocks --token-limit max -# or short form: +# Use automatic calculation methods +ccusage blocks --token-limit max # Highest previous block (default) +ccusage blocks --token-limit avg # Average of all blocks +ccusage blocks --token-limit median # Median of all blocks + +# Short forms ccusage blocks -t max +ccusage blocks -t avg +ccusage blocks -t median ``` When limits are set, blocks display: @@ -101,6 +106,149 @@ When limits are set, blocks display: - 🚨 **Alert indicators** when exceeding limits - **Progress bars** showing usage relative to limit +#### Token Limit Calculation Methods + +Choose how automatic token limits are calculated from your usage history using the `--token-limit` option with method keywords: + +```bash +# Maximum tokens from any block (default - conservative) +ccusage blocks --token-limit max + +# Average tokens across all blocks (balanced) +ccusage blocks --token-limit avg + +# Median tokens across all blocks (robust against outliers) +ccusage blocks --token-limit median +``` + +#### Limiting Calculation to Recent Sessions + +Control how many recent blocks are used for automatic limit calculation: + +```bash +# Use only the 10 most recent blocks (default) +ccusage blocks --token-limit-sessions 10 + +# Use only the 5 most recent blocks (more responsive to patterns) +ccusage blocks --token-limit-sessions 5 + +# Use only the last block +ccusage blocks --token-limit-sessions 1 +``` + +#### Advanced Token Limit Examples + +Combine calculation methods with session limits for precise control: + +```bash +# Conservative: Maximum of last 5 blocks +ccusage blocks --token-limit max --token-limit-sessions 5 + +# Balanced: Average of last 10 blocks +ccusage blocks --token-limit avg --token-limit-sessions 10 + +# Robust: Median of last 15 blocks (ignores outliers) +ccusage blocks --token-limit median --token-limit-sessions 15 + +# Recent pattern focus: Average of last 3 blocks +ccusage blocks --token-limit avg --token-limit-sessions 3 +``` + +#### Understanding Calculation Methods + +**Maximum (max)** - Most conservative approach: +```bash +ccusage blocks --token-limit max --token-limit-sessions 10 +``` +- **Best for**: Cost control, avoiding budget overruns +- **Behavior**: Uses your highest usage block as the limit +- **Result**: Most restrictive warnings, earliest alerts +- **Use when**: You want strict budget management + +**Average (avg)** - Balanced approach: +```bash +ccusage blocks --token-limit avg --token-limit-sessions 10 +``` +- **Best for**: Typical usage patterns, consistent workflows +- **Behavior**: Uses your average block usage as the limit +- **Result**: Moderate warnings based on normal usage +- **Use when**: You have consistent usage patterns + +**Median (median)** - Robust against outliers: +```bash +ccusage blocks --token-limit median --token-limit-sessions 10 +``` +- **Best for**: Variable usage with occasional large sessions +- **Behavior**: Ignores extreme high/low usage blocks +- **Result**: Warnings based on typical (not average) usage +- **Use when**: You have mixed session types or occasional spikes + +#### Practical Token Limit Scenarios + +**Scenario 1: New User Learning Patterns** +```bash +# Start conservative, adjust based on experience +ccusage blocks --token-limit max --token-limit-sessions 3 +``` +- **Why**: Uses maximum of last 3 sessions for safety +- **Benefit**: Prevents accidental overuse while learning +- **Adjustment**: Switch to `avg` method once patterns emerge + +**Scenario 2: Consistent Daily Developer** +```bash +# Balanced approach based on typical usage +ccusage blocks --token-limit avg --token-limit-sessions 10 +``` +- **Why**: Average of last 10 sessions reflects normal workflow +- **Benefit**: Warnings at appropriate levels for usual work +- **Adjustment**: Increase sessions count during routine changes + +**Scenario 3: Project-Based Usage (Variable Intensity)** +```bash +# Median handles both light and heavy project phases +ccusage blocks --token-limit median --token-limit-sessions 15 +``` +- **Why**: Median ignores occasional large refactoring sessions +- **Benefit**: Consistent warnings despite usage variability +- **Adjustment**: Use `max` during intense project phases + +**Scenario 4: Budget-Conscious Team Lead** +```bash +# Conservative limits with recent pattern awareness +ccusage blocks --token-limit max --token-limit-sessions 7 +``` +- **Why**: Maximum of last week prevents budget overruns +- **Benefit**: Early warnings help maintain cost discipline +- **Adjustment**: Combine with explicit limits during month-end + +**Scenario 5: Experimental Developer (High Variance)** +```bash +# Robust calculation ignoring outliers +ccusage blocks --token-limit median --token-limit-sessions 20 +``` +- **Why**: Large sample size with outlier protection +- **Benefit**: Stable warnings despite experimental sessions +- **Adjustment**: Switch to `avg` during stable development phases + +#### Understanding Calculation Methods with Session Limits + +The unified `--token-limit` option supports flexible calculation methods: + +**Current Unified Approach**: +```bash +# Choose your calculation method with a single option +ccusage blocks --token-limit avg --token-limit-sessions 10 +ccusage blocks --token-limit median --token-limit-sessions 15 +ccusage blocks --token-limit max --token-limit-sessions 5 +``` + +**Key Benefits**: +- **Better Accuracy**: Methods reflect actual usage patterns +- **Outlier Handling**: Median method ignores extreme sessions +- **Recent Focus**: Session limits consider recent workflow changes +- **Flexibility**: Multiple methods for different use cases +- **Simplified Syntax**: Single `--token-limit` option with method keywords or explicit limits + ### Live Monitoring Real-time dashboard with automatic updates: diff --git a/docs/guide/cli-options.md b/docs/guide/cli-options.md index 2b581504..3cd2f07f 100644 --- a/docs/guide/cli-options.md +++ b/docs/guide/cli-options.md @@ -209,15 +209,49 @@ ccusage blocks -r # Set token limit for warnings ccusage blocks --token-limit 500000 ccusage blocks --token-limit max +ccusage blocks --token-limit avg +ccusage blocks --token-limit median + +# Limit calculation to recent sessions +ccusage blocks --token-limit-sessions 10 # Use last 10 sessions (default) +ccusage blocks --token-limit-sessions 5 # Use last 5 sessions + +# Combined token limit options +ccusage blocks --token-limit avg --token-limit-sessions 10 +ccusage blocks --token-limit median --token-limit-sessions 15 # Live monitoring mode ccusage blocks --live ccusage blocks --live --refresh-interval 2 +ccusage blocks --live --token-limit avg --token-limit-sessions 10 # Customize session length ccusage blocks --session-length 5 ``` +#### Token Limit Calculation Options + +| Option | Values | Description | +|--------|---------|-------------| +| `--token-limit` | `max`, `avg`, `median`, number | Set token limit (auto-calculated or explicit) | +| `--token-limit-sessions` | number | Limit calculation to N most recent sessions | + +**Examples:** + +```bash +# Conservative: Maximum of last 5 sessions +ccusage blocks --token-limit max --token-limit-sessions 5 + +# Balanced: Average of last 10 sessions +ccusage blocks --token-limit avg --token-limit-sessions 10 + +# Robust: Median of all sessions (ignores outliers) +ccusage blocks --token-limit median + +# Explicit limit (overrides calculation methods) +ccusage blocks --token-limit 750000 +``` + ### MCP Server Options for MCP server: diff --git a/docs/guide/live-monitoring.md b/docs/guide/live-monitoring.md index f6b6e4ed..96bd940c 100644 --- a/docs/guide/live-monitoring.md +++ b/docs/guide/live-monitoring.md @@ -32,7 +32,7 @@ The dashboard refreshes every second, showing: ### Token Limits -Set custom token limits for quota warnings: +Set custom token limits for quota warnings with flexible calculation methods: ```bash # Use specific token limit @@ -41,10 +41,76 @@ ccusage blocks --live -t 500000 # Use highest previous session as limit (default) ccusage blocks --live -t max -# Explicitly set max (same as default) -ccusage blocks --live -t max +# Use average of all previous sessions +ccusage blocks --live -t avg + +# Use median of all previous sessions +ccusage blocks --live -t median +``` + +#### Token Limit Calculation Methods + +Choose how the automatic token limit is calculated from your usage history: + +```bash +# Maximum tokens from any session (default - conservative) +ccusage blocks --live --token-limit max + +# Average tokens across all sessions (balanced) +ccusage blocks --live --token-limit avg + +# Median tokens across all sessions (robust against outliers) +ccusage blocks --live --token-limit median ``` +#### Limiting Calculation to Recent Sessions + +Control how many recent sessions are used for automatic limit calculation: + +```bash +# Use only the 10 most recent sessions for calculation (default) +ccusage blocks --live --token-limit-sessions 10 + +# Use only the 5 most recent sessions (more responsive to recent patterns) +ccusage blocks --live --token-limit-sessions 5 + +# Use only the last session (equivalent to -t max with single session) +ccusage blocks --live --token-limit-sessions 1 +``` + +#### Combined Token Limit Options + +```bash +# Average of last 10 sessions (balanced and recent) +ccusage blocks --live --token-limit avg --token-limit-sessions 10 + +# Median of last 15 sessions (robust and comprehensive) +ccusage blocks --live --token-limit median --token-limit-sessions 15 + +# Maximum of last 5 sessions (conservative but responsive) +ccusage blocks --live --token-limit max --token-limit-sessions 5 + +# Short aliases +ccusage blocks --live --token-limit avg --token-limit-sessions 10 +``` + +#### When to Use Each Method + +**Maximum (max)** - Most conservative approach: +- Best for: Cost-conscious usage, avoiding overruns +- Behavior: Sets limit to your highest usage session +- Use when: You want strict budget control + +**Average (avg)** - Balanced approach: +- Best for: Typical usage patterns, steady workflows +- Behavior: Sets limit based on your typical usage +- Use when: You have consistent usage patterns + +**Median (median)** - Robust against outliers: +- Best for: Variable usage with occasional spikes +- Behavior: Ignores extreme high/low sessions +- Use when: You have mixed session types (quick questions + long projects) + ### Refresh Interval Control update frequency: diff --git a/src/_consts.ts b/src/_consts.ts index 551c6382..ea77467f 100644 --- a/src/_consts.ts +++ b/src/_consts.ts @@ -31,6 +31,12 @@ export const BLOCKS_COMPACT_WIDTH_THRESHOLD = 120; */ export const BLOCKS_DEFAULT_TERMINAL_WIDTH = 120; +/** + * Default number of recent completed sessions to use for token limit calculation in the blocks command + * Used when automatic token limit detection is enabled to analyze usage patterns + */ +export const DEFAULT_TOKEN_LIMIT_SESSIONS = 10; + /** * Threshold percentage for considering costs as matching (0.1% tolerance) * Used in debug cost validation to allow for minor calculation differences diff --git a/src/commands/blocks.ts b/src/commands/blocks.ts index d21a4c93..b5e1c939 100644 --- a/src/commands/blocks.ts +++ b/src/commands/blocks.ts @@ -4,7 +4,7 @@ import { Result } from '@praha/byethrow'; import { define } from 'gunshi'; import pc from 'picocolors'; import { loadConfig, mergeConfigWithArgs } from '../_config-loader-tokens.ts'; -import { BLOCKS_COMPACT_WIDTH_THRESHOLD, BLOCKS_DEFAULT_TERMINAL_WIDTH, BLOCKS_WARNING_THRESHOLD, DEFAULT_RECENT_DAYS, DEFAULT_REFRESH_INTERVAL_SECONDS, MAX_REFRESH_INTERVAL_SECONDS, MIN_REFRESH_INTERVAL_SECONDS } from '../_consts.ts'; +import { BLOCKS_COMPACT_WIDTH_THRESHOLD, BLOCKS_DEFAULT_TERMINAL_WIDTH, BLOCKS_WARNING_THRESHOLD, DEFAULT_RECENT_DAYS, DEFAULT_REFRESH_INTERVAL_SECONDS, DEFAULT_TOKEN_LIMIT_SESSIONS, MAX_REFRESH_INTERVAL_SECONDS, MIN_REFRESH_INTERVAL_SECONDS } from '../_consts.ts'; import { processWithJq } from '../_jq-processor.ts'; import { calculateBurnRate, @@ -92,18 +92,37 @@ function formatModels(models: string[]): string { } /** - * Parses token limit argument, supporting 'max' keyword + * Parses token limit argument, supporting 'max', 'avg', and 'median' keywords * @param value - Token limit string value - * @param maxFromAll - Maximum token count found in all blocks - * @returns Parsed token limit or undefined if invalid + * @param calculatedLimit - Calculated token limit based on selected method and sessions + * @returns Object containing parsed limit and method */ -function parseTokenLimit(value: string | undefined, maxFromAll: number): number | undefined { - if (value == null || value === '' || value === 'max') { - return maxFromAll > 0 ? maxFromAll : undefined; +function parseTokenLimit(value: string | undefined | null, calculatedLimit: number): { + limit: number | undefined; + method: 'max' | 'avg' | 'median'; +} { + const v = typeof value === 'string' ? value.trim().toLowerCase() : ''; + + // Determine method + let method: 'max' | 'avg' | 'median' = 'max'; + if (v === 'avg') { + method = 'avg'; + } + else if (v === 'median') { + method = 'median'; } - const limit = Number.parseInt(value, 10); - return Number.isNaN(limit) ? undefined : limit; + // Determine limit + let limit: number | undefined; + if (value === null || value === undefined || value === '' || v === 'max' || v === 'avg' || v === 'median') { + limit = calculatedLimit > 0 ? calculatedLimit : undefined; + } + else { + const parsedLimit = Number.parseInt(value, 10); + limit = Number.isNaN(parsedLimit) ? undefined : parsedLimit; + } + + return { limit, method }; } export const blocksCommand = define({ @@ -126,7 +145,7 @@ export const blocksCommand = define({ tokenLimit: { type: 'string', short: 't', - description: 'Token limit for quota warnings (e.g., 500000 or "max")', + description: 'Token limit for quota warnings (number: e.g. 50000, "max", "avg", or "median")', }, sessionLength: { type: 'number', @@ -134,6 +153,11 @@ export const blocksCommand = define({ description: `Session block duration in hours (default: ${DEFAULT_SESSION_DURATION_HOURS})`, default: DEFAULT_SESSION_DURATION_HOURS, }, + tokenLimitSessions: { + type: 'number', + description: `Number of recent completed sessions to use for token limit calculation (default: ${DEFAULT_TOKEN_LIMIT_SESSIONS})`, + default: DEFAULT_TOKEN_LIMIT_SESSIONS, + }, live: { type: 'boolean', description: 'Live monitoring mode with real-time updates', @@ -152,8 +176,8 @@ export const blocksCommand = define({ const mergedOptions = mergeConfigWithArgs(ctx, config, ctx.values.debug); // --jq implies --json - const useJson = mergedOptions.json || mergedOptions.jq != null; - if (useJson) { + const useJson = mergedOptions.json === true || mergedOptions.jq != null; + if (useJson === true) { logger.level = 0; } @@ -163,6 +187,12 @@ export const blocksCommand = define({ process.exit(1); } + // Validate token limit sessions + if (ctx.values.tokenLimitSessions != null && ctx.values.tokenLimitSessions <= 0) { + logger.error('Token limit sessions must be a positive number'); + process.exit(1); + } + let blocks = await loadSessionBlockData({ since: ctx.values.since, until: ctx.values.until, @@ -175,7 +205,7 @@ export const blocksCommand = define({ }); if (blocks.length === 0) { - if (useJson) { + if (useJson === true) { log(JSON.stringify({ blocks: [] })); } else { @@ -184,31 +214,71 @@ export const blocksCommand = define({ process.exit(0); } - // Calculate max tokens from ALL blocks before applying filters - let maxTokensFromAll = 0; - if (ctx.values.tokenLimit === 'max' || ctx.values.tokenLimit == null || ctx.values.tokenLimit === '') { + // Calculate token limit from recent completed sessions + let calculatedTokenLimit = 0; + const { method: tokenLimitMethod } = parseTokenLimit(ctx.values.tokenLimit, 0); + const tokenLimitSessions = ctx.values.tokenLimitSessions; // null means all sessions + + if (ctx.values.tokenLimit === 'max' || ctx.values.tokenLimit === 'avg' || ctx.values.tokenLimit === 'median' || ctx.values.tokenLimit == null || ctx.values.tokenLimit === '') { + const completedBlocks: SessionBlock[] = []; + + // Collect all completed blocks (non-gap, non-active) for (const block of blocks) { if (!(block.isGap ?? false) && !block.isActive) { - const blockTokens = getTotalTokens(block.tokenCounts); - if (blockTokens > maxTokensFromAll) { - maxTokensFromAll = blockTokens; + completedBlocks.push(block); + } + } + + // Sort by start time (most recent first) and take the specified number + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + const blocksToUse = tokenLimitSessions != null + ? completedBlocks.slice(0, tokenLimitSessions) + : completedBlocks; + + if (blocksToUse.length > 0) { + const blockTokens = blocksToUse.map(block => getTotalTokens(block.tokenCounts)); + + switch (tokenLimitMethod) { + case 'avg': { + calculatedTokenLimit = Math.round( + blockTokens.reduce((sum, tokens) => sum + tokens, 0) / blockTokens.length, + ); + break; + } + case 'median': { + const sortedTokens = [...blockTokens].sort((a, b) => a - b); + const mid = Math.floor(sortedTokens.length / 2); + calculatedTokenLimit = sortedTokens.length % 2 === 0 + ? Math.round(((sortedTokens[mid - 1] ?? 0) + (sortedTokens[mid] ?? 0)) / 2) + : (sortedTokens[mid] ?? 0); + break; } + case 'max': + default: + calculatedTokenLimit = Math.max(...blockTokens); + break; + } + + if (useJson !== true) { + const sessionsText = blocksToUse.length === 1 ? 'session' : 'sessions'; + const recentText = tokenLimitSessions != null ? ` recent` : ''; + logger.info(`Using ${tokenLimitMethod} tokens from ${blocksToUse.length}${recentText} ${sessionsText}: ${formatNumber(calculatedTokenLimit)}`); } } - if (!useJson && maxTokensFromAll > 0) { - logger.info(`Using max tokens from previous sessions: ${formatNumber(maxTokensFromAll)}`); + else if (useJson !== true) { + logger.warn('No completed sessions found for token limit calculation'); } } // Apply filters - if (ctx.values.recent) { + if (ctx.values.recent === true) { blocks = filterRecentBlocks(blocks, DEFAULT_RECENT_DAYS); } - if (ctx.values.active) { + if (ctx.values.active === true) { blocks = blocks.filter((block: SessionBlock) => block.isActive); if (blocks.length === 0) { - if (useJson) { + if (useJson === true) { log(JSON.stringify({ blocks: [], message: 'No active block' })); } else { @@ -219,9 +289,9 @@ export const blocksCommand = define({ } // Live monitoring mode - if (ctx.values.live && !useJson) { + if (ctx.values.live === true && useJson !== true) { // Live mode only shows active blocks - if (!ctx.values.active) { + if (ctx.values.active !== true) { logger.info('Live mode automatically shows only active blocks.'); } @@ -229,8 +299,8 @@ export const blocksCommand = define({ let tokenLimitValue = ctx.values.tokenLimit; if (tokenLimitValue == null || tokenLimitValue === '') { tokenLimitValue = 'max'; - if (maxTokensFromAll > 0) { - logger.info(`No token limit specified, using max from previous sessions: ${formatNumber(maxTokensFromAll)}`); + if (calculatedTokenLimit > 0) { + logger.info(`No token limit specified, using max from previous sessions: ${formatNumber(calculatedTokenLimit)}`); } } @@ -249,7 +319,7 @@ export const blocksCommand = define({ await startLiveMonitoring({ claudePaths: paths, - tokenLimit: parseTokenLimit(tokenLimitValue, maxTokensFromAll), + tokenLimit: parseTokenLimit(tokenLimitValue, calculatedTokenLimit).limit, refreshInterval: refreshInterval * 1000, // Convert to milliseconds sessionDurationHours: ctx.values.sessionLength, mode: ctx.values.mode, @@ -258,7 +328,7 @@ export const blocksCommand = define({ return; // Exit early, don't show table } - if (useJson) { + if (useJson === true) { // JSON output const jsonOutput = { blocks: blocks.map((block: SessionBlock) => { @@ -281,7 +351,7 @@ export const blocksCommand = define({ projection, tokenLimitStatus: projection != null && ctx.values.tokenLimit != null ? (() => { - const limit = parseTokenLimit(ctx.values.tokenLimit, maxTokensFromAll); + const { limit } = parseTokenLimit(ctx.values.tokenLimit, calculatedTokenLimit); return limit != null ? { limit, @@ -314,7 +384,7 @@ export const blocksCommand = define({ } else { // Table output - if (ctx.values.active && blocks.length === 1) { + if (ctx.values.active === true && blocks.length === 1) { // Detailed active block view const block = blocks[0] as SessionBlock; if (block == null) { @@ -355,7 +425,7 @@ export const blocksCommand = define({ if (ctx.values.tokenLimit != null) { // Parse token limit - const limit = parseTokenLimit(ctx.values.tokenLimit, maxTokensFromAll); + const { limit } = parseTokenLimit(ctx.values.tokenLimit, calculatedTokenLimit); if (limit != null && limit > 0) { const currentTokens = getTotalTokens(block.tokenCounts); const remainingTokens = Math.max(0, limit - currentTokens); @@ -380,7 +450,7 @@ export const blocksCommand = define({ logger.box('Claude Code Token Usage Report - Session Blocks'); // Calculate token limit if "max" is specified - const actualTokenLimit = parseTokenLimit(ctx.values.tokenLimit, maxTokensFromAll); + const { limit: actualTokenLimit } = parseTokenLimit(ctx.values.tokenLimit, calculatedTokenLimit); const tableHeaders = ['Block Start', 'Duration/Status', 'Models', 'Tokens']; const tableAligns: ('left' | 'right' | 'center')[] = ['left', 'left', 'left', 'right']; @@ -404,9 +474,9 @@ export const blocksCommand = define({ // Use compact format if: // 1. User explicitly requested it with --compact flag // 2. Terminal width is below threshold - const terminalWidth = process.stdout.columns || BLOCKS_DEFAULT_TERMINAL_WIDTH; + const terminalWidth = process.stdout.columns ?? BLOCKS_DEFAULT_TERMINAL_WIDTH; const isNarrowTerminal = terminalWidth < BLOCKS_COMPACT_WIDTH_THRESHOLD; - const useCompactFormat = ctx.values.compact || isNarrowTerminal; + const useCompactFormat = ctx.values.compact === true || isNarrowTerminal; for (const block of blocks) { if (block.isGap ?? false) { @@ -506,3 +576,651 @@ export const blocksCommand = define({ } }, }); + +if (import.meta.vitest != null) { + /* eslint-disable ts/no-unused-vars, ts/no-unsafe-member-access, ts/no-unsafe-argument */ + + describe('parseTokenLimit', () => { + it('returns calculated limit and correct method when value is null or empty', () => { + expect(parseTokenLimit(undefined, 500)).toEqual({ limit: 500, method: 'max' }); + expect(parseTokenLimit('', 500)).toEqual({ limit: 500, method: 'max' }); + expect(parseTokenLimit(null, 500)).toEqual({ limit: 500, method: 'max' }); + }); + + it('returns calculated limit and correct method when value is method keyword', () => { + expect(parseTokenLimit('max', 500)).toEqual({ limit: 500, method: 'max' }); + expect(parseTokenLimit('avg', 500)).toEqual({ limit: 500, method: 'avg' }); + expect(parseTokenLimit('median', 500)).toEqual({ limit: 500, method: 'median' }); + }); + + it('returns undefined limit when calculated limit is 0', () => { + expect(parseTokenLimit(undefined, 0)).toEqual({ limit: undefined, method: 'max' }); + expect(parseTokenLimit('', 0)).toEqual({ limit: undefined, method: 'max' }); + expect(parseTokenLimit('max', 0)).toEqual({ limit: undefined, method: 'max' }); + expect(parseTokenLimit('avg', 0)).toEqual({ limit: undefined, method: 'avg' }); + expect(parseTokenLimit('median', 0)).toEqual({ limit: undefined, method: 'median' }); + }); + + it('parses numeric values correctly with default method', () => { + expect(parseTokenLimit('1000', 500)).toEqual({ limit: 1000, method: 'max' }); + expect(parseTokenLimit('0', 500)).toEqual({ limit: 0, method: 'max' }); + expect(parseTokenLimit('999999', 500)).toEqual({ limit: 999999, method: 'max' }); + }); + + it('returns undefined limit for invalid numeric values with default method', () => { + expect(parseTokenLimit('invalid', 500)).toEqual({ limit: undefined, method: 'max' }); + expect(parseTokenLimit('12.5', 500)).toEqual({ limit: 12, method: 'max' }); // parseInt parses "12.5" as 12 + expect(parseTokenLimit('-100', 500)).toEqual({ limit: -100, method: 'max' }); // parseInt parses negative numbers + }); + + it('handles whitespace and case insensitivity for methods', () => { + expect(parseTokenLimit(' MAX ', 500)).toEqual({ limit: 500, method: 'max' }); + expect(parseTokenLimit(' Avg ', 500)).toEqual({ limit: 500, method: 'avg' }); + expect(parseTokenLimit(' MEDIAN ', 500)).toEqual({ limit: 500, method: 'median' }); + }); + }); + + // Shared mock data creation function + async function createMockData(blocks: Array<{ + startTime: string; + isGap?: boolean; + isActive?: boolean; + inputTokens: number; + outputTokens: number; + model?: string; + costUSD?: number; + }>): Promise>> { + // Create directory structure to match Claude data layout + const fixtureStructure: Record = { + projects: {}, + }; + + // Create session files for each block + blocks.forEach((block, index) => { + const sessionId = `session-${index + 1}`; + const projectName = 'test-project'; + + if (fixtureStructure.projects[projectName] == null) { + fixtureStructure.projects[projectName] = {}; + } + + const entry = { + timestamp: block.startTime, + sessionId, + message: { + id: `msg_${index + 1}`, + model: block.model ?? 'claude-sonnet-4-20250514', + usage: { + input_tokens: block.inputTokens, + output_tokens: block.outputTokens, + cache_creation_input_tokens: 0, + cache_read_input_tokens: 0, + }, + }, + requestId: `req_${index + 1}`, + costUSD: block.costUSD ?? 0.01, + version: '1.0.0', + }; + + fixtureStructure.projects[projectName][`${sessionId}.jsonl`] = `${JSON.stringify(entry)}\n`; + }); + + const { createFixture } = await import('fs-fixture'); + const fixture = await createFixture(fixtureStructure); + + // Set up environment variable to point to the fixture + vi.stubEnv('CLAUDE_CONFIG_DIR', fixture.path); + + return fixture; + } + + describe('Token Limit Calculation Logic', () => { + beforeEach(() => { + vi.stubEnv('HOME', '/test-home'); + vi.stubEnv('USERPROFILE', '/test-home'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it('calculates max token limit correctly', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 150, outputTokens: 75 }, // total: 225 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + // Filter out gaps and active blocks + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + expect(completedBlocks).toHaveLength(3); + + // Sort by start time (most recent first) - should be: 300, 225, 150 + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + expect(blockTokens).toEqual([225, 300, 150]); + expect(Math.max(...blockTokens)).toBe(300); + }); + + it('calculates average token limit correctly', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 250, outputTokens: 100 }, // total: 350 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + // Average of [350, 300, 150] = 800 / 3 = 266.67 -> Math.round = 267 + const average = Math.round(blockTokens.reduce((sum, tokens) => sum + tokens, 0) / blockTokens.length); + expect(average).toBe(267); + }); + + it('calculates median token limit correctly for odd count', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 400, outputTokens: 100 }, // total: 500 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + // Sort tokens: [150, 300, 500], median = 300 (middle value) + const sortedTokens = [...blockTokens].sort((a, b) => a - b); + const mid = Math.floor(sortedTokens.length / 2); + expect(sortedTokens[mid]).toBe(300); + }); + + it('calculates median token limit correctly for even count', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 400, outputTokens: 100 }, // total: 500 + { startTime: '2024-01-01T18:00:00Z', inputTokens: 500, outputTokens: 100 }, // total: 600 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + // Sort tokens: [150, 300, 500, 600], median = (300 + 500) / 2 = 400 + const sortedTokens = [...blockTokens].sort((a, b) => a - b); + const mid = Math.floor(sortedTokens.length / 2); + const median = Math.round(((sortedTokens[mid - 1] ?? 0) + (sortedTokens[mid] ?? 0)) / 2); + expect(median).toBe(400); + }); + + it('limits sessions correctly when tokenLimitSessions is specified', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 0 }, // total: 100 (oldest) + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 0 }, // total: 200 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 300, outputTokens: 0 }, // total: 300 + { startTime: '2024-01-01T18:00:00Z', inputTokens: 400, outputTokens: 0 }, // total: 400 + { startTime: '2024-01-02T00:00:00Z', inputTokens: 500, outputTokens: 0 }, // total: 500 (newest) + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + // Sort by start time (most recent first) + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + + // Take only 3 most recent sessions: [500, 400, 300] + const blocksToUse = completedBlocks.slice(0, 3); + const blockTokens = blocksToUse.map(block => getTotalTokens(block.tokenCounts)); + + expect(blockTokens).toEqual([500, 400, 300]); + expect(Math.max(...blockTokens)).toBe(500); + }); + + it('excludes gap blocks from calculation', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + // Manually mark one as gap for testing (simulates gap detection logic) + if (blocks.length >= 2) { + blocks[1]!.isGap = true; + } + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + // Expect at least 1 block (since we only marked one as gap, and others should not be active) + expect(completedBlocks.length).toBeGreaterThan(0); + + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + // Should include the block that wasn't marked as gap + expect(blockTokens).toContain(300); + }); + + it('excludes active blocks from calculation', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, // total: 300 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + // Manually mark the first block (which should be the most recent = 300 tokens) as active for testing + if (blocks.length >= 1) { + blocks[0]!.isActive = true; + } + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + expect(completedBlocks).toHaveLength(1); + + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + // Should be 150 (the older block) since the 300 token block is marked as active + expect(Math.max(...blockTokens)).toBe(150); + }); + + it('returns 0 when no completed sessions are found', async () => { + await using _fixture = await createMockData([]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + expect(completedBlocks).toHaveLength(0); + + // Calculated limit should be 0 + const calculatedTokenLimit = completedBlocks.length > 0 ? Math.max(...completedBlocks.map(block => getTotalTokens(block.tokenCounts))) : 0; + expect(calculatedTokenLimit).toBe(0); + }); + + it('handles single session correctly for all methods', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 150, outputTokens: 75 }, // total: 225 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + expect(completedBlocks).toHaveLength(1); + + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + expect(blockTokens).toEqual([225]); + + // All methods should return the same value for single session + expect(Math.max(...blockTokens)).toBe(225); // max + expect(Math.round(blockTokens.reduce((sum, tokens) => sum + tokens, 0) / blockTokens.length)).toBe(225); // avg + + const sortedTokens = [...blockTokens].sort((a, b) => a - b); + const mid = Math.floor(sortedTokens.length / 2); + expect(sortedTokens[mid]).toBe(225); // median (odd count) + }); + + it('sorts sessions by most recent first', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 0 }, // oldest: 100 + { startTime: '2024-01-01T12:00:00Z', inputTokens: 300, outputTokens: 0 }, // middle: 300 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 0 }, // newest: 200 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + + // Sort by start time (most recent first) + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + // Should be sorted as: 300 (12:00), 200 (06:00), 100 (00:00) + expect(blockTokens).toEqual([300, 200, 100]); + }); + }); + + describe('CLI Integration Tests', () => { + beforeEach(() => { + vi.stubEnv('HOME', '/test-home'); + vi.stubEnv('USERPROFILE', '/test-home'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + }); + + // Mock the command execution context + function createMockContext(overrides: Partial = {}): { values: any; tokens: unknown[] } { + return { + values: { + tokenLimitSessions: null, + tokenLimit: undefined, + sessionLength: 5, + json: false, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + ...overrides, + }, + tokens: [], + }; + } + + it('uses default values correctly', () => { + const ctx = createMockContext(); + expect(ctx.values.tokenLimitSessions).toBeNull(); + expect(parseTokenLimit(ctx.values.tokenLimit, 0).method).toBe('max'); // default method + }); + + it('accepts valid tokenLimit method values', () => { + const maxCtx = createMockContext({ tokenLimit: 'max' }); + const avgCtx = createMockContext({ tokenLimit: 'avg' }); + const medianCtx = createMockContext({ tokenLimit: 'median' }); + + expect(parseTokenLimit(maxCtx.values.tokenLimit, 0).method).toBe('max'); + expect(parseTokenLimit(avgCtx.values.tokenLimit, 0).method).toBe('avg'); + expect(parseTokenLimit(medianCtx.values.tokenLimit, 0).method).toBe('median'); + }); + + it('accepts valid tokenLimitSessions values', () => { + const nullCtx = createMockContext({ tokenLimitSessions: null }); + const numberCtx = createMockContext({ tokenLimitSessions: 5 }); + + expect(nullCtx.values.tokenLimitSessions).toBeNull(); + expect(numberCtx.values.tokenLimitSessions).toBe(5); + }); + + it('maintains backward compatibility with tokenLimit="max"', () => { + const ctx = createMockContext({ tokenLimit: 'max' }); + expect(ctx.values.tokenLimit).toBe('max'); + expect(parseTokenLimit(ctx.values.tokenLimit, 0).method).toBe('max'); + + // This should trigger the calculation logic (when tokenLimit is "max") + expect(ctx.values.tokenLimit === 'max').toBe(true); + }); + + it('handles new tokenLimit method values', () => { + const avgCtx = createMockContext({ tokenLimit: 'avg' }); + const medianCtx = createMockContext({ tokenLimit: 'median' }); + + expect(avgCtx.values.tokenLimit).toBe('avg'); + expect(parseTokenLimit(avgCtx.values.tokenLimit, 0).method).toBe('avg'); + + expect(medianCtx.values.tokenLimit).toBe('median'); + expect(parseTokenLimit(medianCtx.values.tokenLimit, 0).method).toBe('median'); + }); + + it('handles explicit numeric tokenLimit values', () => { + const ctx = createMockContext({ tokenLimit: '50000' }); + expect(ctx.values.tokenLimit).toBe('50000'); + expect(parseTokenLimit(ctx.values.tokenLimit, 0).method).toBe('max'); // numeric values default to max method + + // parseTokenLimit should handle this correctly + expect(parseTokenLimit(ctx.values.tokenLimit, 0).limit).toBe(50000); + }); + }); + + describe('Edge Cases', () => { + beforeEach(() => { + vi.stubEnv('HOME', '/test-home'); + vi.stubEnv('USERPROFILE', '/test-home'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it('handles empty blocks list', () => { + const blocks: any[] = []; + const completedBlocks = blocks.filter((block: any) => (block.isGap ?? false) === false && block.isActive !== true); + expect(completedBlocks).toHaveLength(0); + + const calculatedTokenLimit = completedBlocks.length > 0 ? Math.max(...completedBlocks.map((block: any) => getTotalTokens(block.tokenCounts))) : 0; + expect(calculatedTokenLimit).toBe(0); + }); + + it('handles tokenLimitSessions greater than available sessions', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 100, outputTokens: 50 }, + { startTime: '2024-01-01T06:00:00Z', inputTokens: 200, outputTokens: 100 }, + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + + // Request DEFAULT_TOKEN_LIMIT_SESSIONS sessions but only have 2 + const blocksToUse = completedBlocks.slice(0, DEFAULT_TOKEN_LIMIT_SESSIONS); // Should just return all 2 + expect(blocksToUse).toHaveLength(2); + + const blockTokens = blocksToUse.map(block => getTotalTokens(block.tokenCounts)); + expect(Math.max(...blockTokens)).toBe(300); + }); + + it('handles zero token blocks', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T00:00:00Z', inputTokens: 0, outputTokens: 0 }, // total: 0 + { startTime: '2024-01-01T06:00:00Z', inputTokens: 100, outputTokens: 50 }, // total: 150 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + expect(blockTokens).toContain(0); + expect(blockTokens).toContain(150); + expect(Math.max(...blockTokens)).toBe(150); + }); + }); + + describe('Backwards Compatibility', () => { + it('maintains old behavior when defaults are used', () => { + // When tokenLimit defaults to 'max' method and tokenLimitSessions=null, + // behavior should be identical to the old implementation + const testTokens = [100, 200, 300, 150, 250]; + + // Old behavior: Math.max(...allTokens) + const oldBehavior = Math.max(...testTokens); + + // New behavior with defaults + const sortedTokens = [...testTokens]; // no session limiting (null) + const newBehavior = Math.max(...sortedTokens); // 'max' method (default) + + expect(newBehavior).toBe(oldBehavior); + expect(newBehavior).toBe(300); + }); + + it('parseTokenLimit preserves backward compatibility', () => { + // Old behavior: if tokenLimit is "max" or undefined, use calculated limit + expect(parseTokenLimit('max', 500).limit).toBe(500); + expect(parseTokenLimit(undefined, 500).limit).toBe(500); + expect(parseTokenLimit('', 500).limit).toBe(500); + + // New behavior: supports additional method keywords + expect(parseTokenLimit('avg', 500).limit).toBe(500); + expect(parseTokenLimit('median', 500).limit).toBe(500); + + // Old behavior: if explicit number, use that + expect(parseTokenLimit('1000', 500).limit).toBe(1000); + + // Old behavior: if calculated limit is 0 and tokenLimit is "max", return undefined + expect(parseTokenLimit('max', 0).limit).toBeUndefined(); + expect(parseTokenLimit('avg', 0).limit).toBeUndefined(); + expect(parseTokenLimit('median', 0).limit).toBeUndefined(); + }); + + it('parseTokenLimit provides consistent method extraction', () => { + // Default to 'max' for old behavior + expect(parseTokenLimit(undefined, 0).method).toBe('max'); + expect(parseTokenLimit(null, 0).method).toBe('max'); + expect(parseTokenLimit('', 0).method).toBe('max'); + expect(parseTokenLimit('max', 0).method).toBe('max'); + + // New method support + expect(parseTokenLimit('avg', 0).method).toBe('avg'); + expect(parseTokenLimit('median', 0).method).toBe('median'); + + // Numeric values default to 'max' method + expect(parseTokenLimit('50000', 0).method).toBe('max'); + }); + }); + + describe('Real Scenario Tests', () => { + beforeEach(() => { + vi.stubEnv('HOME', '/test-home'); + vi.stubEnv('USERPROFILE', '/test-home'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + }); + + it('handles realistic token usage patterns', async () => { + // Realistic scenario: morning heavy usage, afternoon light usage, evening moderate + await using _fixture = await createMockData([ + { startTime: '2024-01-01T08:00:00Z', inputTokens: 5000, outputTokens: 2000 }, // morning: 7000 + { startTime: '2024-01-01T14:00:00Z', inputTokens: 500, outputTokens: 200 }, // afternoon: 700 + { startTime: '2024-01-01T20:00:00Z', inputTokens: 2000, outputTokens: 800 }, // evening: 2800 + { startTime: '2024-01-02T08:00:00Z', inputTokens: 4000, outputTokens: 1500 }, // next day: 5500 + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + completedBlocks.sort((a, b) => b.startTime.getTime() - a.startTime.getTime()); + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + + // Expected order (newest first): 5500, 2800, 700, 7000 + expect(blockTokens[0]).toBe(5500); // most recent + expect(Math.max(...blockTokens)).toBe(7000); // max method + + // Test taking only 2 recent sessions + const recentTokens = blockTokens.slice(0, 2); + expect(Math.max(...recentTokens)).toBe(5500); + expect(Math.round(recentTokens.reduce((sum, tokens) => sum + tokens, 0) / recentTokens.length)).toBe(4150); // avg of 5500, 2800 + }); + + it('handles mixed Claude models correctly', async () => { + await using _fixture = await createMockData([ + { startTime: '2024-01-01T08:00:00Z', inputTokens: 1000, outputTokens: 500, model: 'claude-sonnet-4-20250514' }, + { startTime: '2024-01-01T14:00:00Z', inputTokens: 2000, outputTokens: 800, model: 'claude-opus-4-20250514' }, + { startTime: '2024-01-01T20:00:00Z', inputTokens: 1500, outputTokens: 600, model: 'claude-sonnet-4-20250514' }, + ]); + + const blocks = await loadSessionBlockData({ + sessionDurationHours: 5, + mode: 'display', + order: 'desc', + offline: true, + timezone: 'UTC', + locale: 'en-US', + }); + + const completedBlocks = blocks.filter(block => !(block.isGap ?? false) && !block.isActive); + expect(completedBlocks).toHaveLength(3); + + // Token calculation should work regardless of model + const blockTokens = completedBlocks.map(block => getTotalTokens(block.tokenCounts)); + expect(blockTokens).toContain(1500); // sonnet + expect(blockTokens).toContain(2800); // opus + expect(blockTokens).toContain(2100); // sonnet + }); + }); +}