From 44a5a7ce065508ded6c5f5ad20daceaa548c4fea Mon Sep 17 00:00:00 2001 From: Ian White Date: Fri, 20 Feb 2026 18:31:16 -0600 Subject: [PATCH 1/3] fix(ai): address retroactive PR 1-3 review findings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - test(ai-errors): remove error-causes API tests; keep only handleAIErrors behavioral routing (ericelliott/janhesters #407) - test(constants): remove defaults/constraints value-only blocks; replace tautological expected: defaults.X with literals (ericelliott #407) - fix(debug-logger): rename writeToFile→bufferEntry, process→logProcess export; add logFile type guard; circular ref safety in formatMessage; command() rest params; improved JSDoc (janhesters #408) - test(debug-logger): onTestFinished for all teardown; add circular ref and logFile TypeError tests; flush no-op debug:false (janhesters #408) - fix(limit-concurrency): guard non-positive limit with RangeError; onTestFinished for fake timer teardown; document fail-fast (janhesters #408) - test(agent-parser): replace partial assertions with full expected values including ndjsonLength (janhesters #409) - test(extraction-parser): replace 4x multi-assert blocks with single full-object assertions (janhesters #409) Co-authored-by: Cursor --- source/agent-parser.test.js | 23 ++--- source/ai-errors.test.js | 79 ----------------- source/constants.test.js | 50 +---------- source/debug-logger.js | 23 +++-- source/debug-logger.test.js | 145 ++++++++++++++++++------------- source/execute-agent.js | 2 +- source/extraction-parser.test.js | 95 +++++++------------- source/limit-concurrency.js | 14 ++- source/limit-concurrency.test.js | 30 ++++++- 9 files changed, 182 insertions(+), 279 deletions(-) diff --git a/source/agent-parser.test.js b/source/agent-parser.test.js index 2aa41da6..62779970 100644 --- a/source/agent-parser.test.js +++ b/source/agent-parser.test.js @@ -47,8 +47,8 @@ describe('parseStringResult()', () => { assert({ given: 'JSON string starting with [', should: 'parse as JSON array', - actual: result.length, - expected: 2 + actual: result, + expected: [{ id: 1 }, { id: 2 }] }); }); @@ -82,8 +82,8 @@ describe('parseStringResult()', () => { assert({ given: 'markdown without json tag', should: 'extract and parse JSON', - actual: result.passed, - expected: true + actual: result, + expected: { passed: true } }); }); @@ -96,8 +96,8 @@ describe('parseStringResult()', () => { assert({ given: 'malformed JSON with markdown fallback', should: 'extract from markdown block', - actual: result.passed, - expected: true + actual: result, + expected: { passed: true } }); assert({ @@ -156,13 +156,6 @@ describe('parseStringResult()', () => { const result = parseStringResult(input, logger); - assert({ - given: 'JSON with surrounding whitespace', - should: 'parse successfully', - actual: result.passed, - expected: true - }); - assert({ given: 'JSON with surrounding whitespace', should: 'return parsed object matching trimmed input', @@ -276,8 +269,8 @@ describe('parseOpenCodeNDJSON()', () => { assert({ given: 'NDJSON with no text events', should: 'include ndjsonLength in cause', - actual: typeof error?.cause?.ndjsonLength === 'number', - expected: true + actual: error?.cause?.ndjsonLength, + expected: ndjson.length }); assert({ diff --git a/source/ai-errors.test.js b/source/ai-errors.test.js index 3f999c9b..38b5a3ee 100644 --- a/source/ai-errors.test.js +++ b/source/ai-errors.test.js @@ -2,91 +2,12 @@ import { describe, test } from 'vitest'; import { assert } from './vitest.js'; import { createError } from 'error-causes'; import { - aiErrors, handleAIErrors, ParseError, ValidationError, - SecurityError, - TimeoutError, - AgentProcessError, - AITestError, - OutputError, - ExtractionParseError, - ExtractionValidationError } from './ai-errors.js'; -const errorTable = [ - ['ParseError', 'PARSE_FAILURE', ParseError], - ['ValidationError', 'VALIDATION_FAILURE', ValidationError], - ['SecurityError', 'SECURITY_VIOLATION', SecurityError], - ['TimeoutError', 'AGENT_TIMEOUT', TimeoutError], - ['AgentProcessError', 'AGENT_PROCESS_FAILURE', AgentProcessError], - ['AITestError', 'AI_TEST_ERROR', AITestError], - ['OutputError', 'OUTPUT_ERROR', OutputError], - ['ExtractionParseError', 'EXTRACTION_PARSE_FAILURE', ExtractionParseError], - ['ExtractionValidationError','EXTRACTION_VALIDATION_FAILURE', ExtractionValidationError], -]; - describe('ai-errors module', () => { - describe('error descriptors', () => { - test.each(errorTable)('%s descriptor has correct name', (name) => { - assert({ - given: `${name} descriptor`, - should: 'have the correct error name', - actual: aiErrors[name].name, - expected: name - }); - }); - - test.each(errorTable)('%s descriptor has correct code', (name, code) => { - assert({ - given: `${name} descriptor`, - should: 'have the correct error code', - actual: aiErrors[name].code, - expected: code - }); - }); - - test.each(errorTable)('%s named export matches aiErrors entry', (name, _code, descriptor) => { - assert({ - given: `destructured ${name} export`, - should: 'equal aiErrors entry', - actual: descriptor, - expected: aiErrors[name] - }); - }); - }); - - describe('createError integration', () => { - test.each([ - ['ParseError', ParseError, 'PARSE_FAILURE'], - ['SecurityError', SecurityError, 'SECURITY_VIOLATION'], - ])('%s produces an Error with structured cause', (name, descriptor, code) => { - const err = createError(descriptor); - - assert({ - given: `a ${name} descriptor`, - should: 'produce an Error instance', - actual: err instanceof Error, - expected: true - }); - - assert({ - given: `a ${name} descriptor`, - should: `set cause.name to ${name}`, - actual: err.cause.name, - expected: name - }); - - assert({ - given: `a ${name} descriptor`, - should: `set cause.code to ${code}`, - actual: err.cause.code, - expected: code - }); - }); - }); - describe('handleAIErrors', () => { // handleAIErrors is exhaustive: every registered error type must have a handler. // Build a no-op map for all types, then override the one under test. diff --git a/source/constants.test.js b/source/constants.test.js index ce80e926..3526943e 100644 --- a/source/constants.test.js +++ b/source/constants.test.js @@ -1,7 +1,6 @@ import { describe, test } from 'vitest'; import { assert } from './vitest.js'; import { - defaults, constraints, runsSchema, thresholdSchema, @@ -13,49 +12,6 @@ import { } from './constants.js'; describe('constants module', () => { - describe('defaults', () => { - test('exports standard test configuration', () => { - assert({ - given: 'defaults export', - should: 'contain runs value', - actual: defaults.runs, - expected: 4 - }); - - assert({ - given: 'defaults export', - should: 'contain threshold value', - actual: defaults.threshold, - expected: 75 - }); - - assert({ - given: 'defaults export', - should: 'contain timeout in milliseconds', - actual: defaults.timeoutMs, - expected: 300_000 - }); - }); - }); - - describe('constraints', () => { - test('defines validation boundaries', () => { - assert({ - given: 'constraints export', - should: 'define threshold min/max', - actual: [constraints.thresholdMin, constraints.thresholdMax], - expected: [0, 100] - }); - - assert({ - given: 'constraints export', - should: 'list supported agents', - actual: constraints.supportedAgents, - expected: ['claude', 'opencode', 'cursor'] - }); - }); - }); - describe('runsSchema', () => { test('accepts positive integers', () => { const result = runsSchema.safeParse(5); @@ -398,21 +354,21 @@ describe('constants module', () => { given: 'minimal options with only filePath', should: 'apply default runs', actual: result.data?.runs, - expected: defaults.runs + expected: 4 }); assert({ given: 'minimal options with only filePath', should: 'apply default threshold', actual: result.data?.threshold, - expected: defaults.threshold + expected: 75 }); assert({ given: 'minimal options with only filePath', should: 'apply default agent', actual: result.data?.agent, - expected: defaults.agent + expected: 'claude' }); }); diff --git a/source/debug-logger.js b/source/debug-logger.js index 48cc0900..444ac357 100644 --- a/source/debug-logger.js +++ b/source/debug-logger.js @@ -2,18 +2,27 @@ import { appendFileSync } from 'fs'; /** * Create a debug logger that can write to console and/or log file. + * Callers must invoke flush() to write buffered entries to disk before process exit. * @param {Object} options * @param {boolean} [options.debug=false] - Enable debug logging - * @param {string} [options.logFile] - Optional log file path - * @returns {Object} Logger with log, command, process, result, and flush methods + * @param {string} [options.logFile] - Optional log file path for buffered writes + * @returns {{ log: Function, command: Function, logProcess: Function, result: Function, flush: Function }} */ export const createDebugLogger = ({ debug = false, logFile } = {}) => { + if (logFile !== undefined && typeof logFile !== 'string') { + throw new TypeError(`logFile must be a string, got ${typeof logFile}`); + } + const buffer = []; const formatMessage = (parts) => - parts.map(part => typeof part === 'object' ? JSON.stringify(part) : String(part)).join(' '); + parts.map(part => { + if (typeof part !== 'object') return String(part); + try { return JSON.stringify(part); } + catch { return '[Circular]'; } + }).join(' '); - const writeToFile = (message) => { + const bufferEntry = (message) => { if (!logFile) return; const timestamp = new Date().toISOString(); @@ -28,10 +37,10 @@ export const createDebugLogger = ({ debug = false, logFile } = {}) => { console.error(`[DEBUG] ${message}`); } - writeToFile(message); + bufferEntry(message); }; - const command = (cmd, args = []) => { + const command = (cmd, ...args) => { log(`Command: ${cmd} ${args.join(' ')}`); }; @@ -49,5 +58,5 @@ export const createDebugLogger = ({ debug = false, logFile } = {}) => { buffer.length = 0; }; - return { log, command, process: logProcess, result, flush }; + return { log, command, logProcess, result, flush }; }; diff --git a/source/debug-logger.test.js b/source/debug-logger.test.js index 25c4882e..c98d4613 100644 --- a/source/debug-logger.test.js +++ b/source/debug-logger.test.js @@ -1,5 +1,6 @@ -import { describe, test, beforeEach, afterEach, vi } from 'vitest'; +import { describe, test, onTestFinished, vi } from 'vitest'; import { assert } from './vitest.js'; +import { Try } from './riteway.js'; import { createDebugLogger } from './debug-logger.js'; import { rmSync, existsSync, readFileSync, mkdtempSync } from 'fs'; import { join } from 'path'; @@ -7,17 +8,10 @@ import { tmpdir } from 'os'; describe('debug-logger', () => { describe('createDebugLogger()', () => { - let consoleErrorSpy; - - beforeEach(() => { - consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - }); - - afterEach(() => { - consoleErrorSpy.mockRestore(); - }); - test('logs to console when debug is enabled', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + const logger = createDebugLogger({ debug: true }); logger.log('test message'); @@ -37,6 +31,9 @@ describe('debug-logger', () => { }); test('does not log to console when debug is disabled', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + const logger = createDebugLogger({ debug: false }); logger.log('test message'); @@ -51,58 +48,55 @@ describe('debug-logger', () => { test('writes to log file when logFile is specified', () => { const testDir = mkdtempSync(join(tmpdir(), 'riteway-')); const logFile = join(testDir, 'debug.log'); + onTestFinished(() => rmSync(testDir, { recursive: true, force: true })); + + const logger = createDebugLogger({ debug: true, logFile }); + + logger.log('test message 1'); + logger.log('test message 2'); + logger.flush(); - try { - const logger = createDebugLogger({ debug: true, logFile }); - - logger.log('test message 1'); - logger.log('test message 2'); - logger.flush(); - - assert({ - given: 'logFile specified', - should: 'create the log file', - actual: existsSync(logFile), - expected: true - }); - - const logContents = readFileSync(logFile, 'utf-8'); - - assert({ - given: 'multiple log messages', - should: 'write all messages to file', - actual: logContents.includes('test message 1') && logContents.includes('test message 2'), - expected: true - }); - } finally { - rmSync(testDir, { recursive: true, force: true }); - } + assert({ + given: 'logFile specified', + should: 'create the log file', + actual: existsSync(logFile), + expected: true + }); + + const logContents = readFileSync(logFile, 'utf-8'); + + assert({ + given: 'multiple log messages', + should: 'write all messages to file', + actual: logContents.includes('test message 1') && logContents.includes('test message 2'), + expected: true + }); }); test('includes timestamp in log file entries', () => { const testDir = mkdtempSync(join(tmpdir(), 'riteway-')); const logFile = join(testDir, 'debug.log'); + onTestFinished(() => rmSync(testDir, { recursive: true, force: true })); + + const logger = createDebugLogger({ debug: true, logFile }); - try { - const logger = createDebugLogger({ debug: true, logFile }); - - logger.log('test message'); - logger.flush(); - - const logContents = readFileSync(logFile, 'utf-8'); - - assert({ - given: 'a log message', - should: 'include ISO timestamp', - actual: /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(logContents), - expected: true - }); - } finally { - rmSync(testDir, { recursive: true, force: true }); - } + logger.log('test message'); + logger.flush(); + + const logContents = readFileSync(logFile, 'utf-8'); + + assert({ + given: 'a log message', + should: 'include ISO timestamp', + actual: /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/.test(logContents), + expected: true + }); }); test('handles object logging with JSON serialization', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + const logger = createDebugLogger({ debug: true }); const testObj = { key: 'value', nested: { data: 123 } }; @@ -116,11 +110,32 @@ describe('debug-logger', () => { }); }); + test('handles circular reference objects without throwing', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + + const circular = {}; + circular.self = circular; + + const logger = createDebugLogger({ debug: true }); + logger.log('Ref:', circular); + + assert({ + given: 'an object with a circular reference', + should: 'log [Circular] instead of throwing', + actual: consoleErrorSpy.mock.calls[0][0], + expected: '[DEBUG] Ref: [Circular]' + }); + }); + test('provides structured logging methods', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + const logger = createDebugLogger({ debug: true }); - logger.command('node', ['arg1', 'arg2']); - logger.process({ pid: 123, exitCode: 0 }); + logger.command('node', 'arg1', 'arg2'); + logger.logProcess({ pid: 123, exitCode: 0 }); logger.result({ passed: true }); assert({ @@ -146,18 +161,21 @@ describe('debug-logger', () => { }); test('flush is a no-op when no logFile is configured', () => { - const logger = createDebugLogger({ debug: true }); + const logger = createDebugLogger({ debug: false }); logger.log('test message'); assert({ given: 'no logFile configured', should: 'not throw when flush is called', - actual: (() => { logger.flush(); return true; })(), - expected: true + actual: Try(logger.flush), + expected: undefined }); }); test('default debug value is false', () => { + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + onTestFinished(() => consoleErrorSpy.mockRestore()); + const logger = createDebugLogger(); logger.log('test'); @@ -168,5 +186,16 @@ describe('debug-logger', () => { expected: 0 }); }); + + test('throws TypeError for non-string logFile', () => { + const error = Try(createDebugLogger, { logFile: 123 }); + + assert({ + given: 'non-string logFile value', + should: 'throw TypeError', + actual: error?.name, + expected: 'TypeError' + }); + }); }); }); diff --git a/source/execute-agent.js b/source/execute-agent.js index a22d9c11..ceac994f 100644 --- a/source/execute-agent.js +++ b/source/execute-agent.js @@ -34,7 +34,7 @@ const spawnProcess = async ({ agentConfig, prompt, logger }) => { const allArgs = [...args, prompt]; logger.log('\nExecuting agent command:'); - logger.command(command, args); + logger.command(command, ...args); logger.log(`Prompt length: ${prompt.length} characters`); try { diff --git a/source/extraction-parser.test.js b/source/extraction-parser.test.js index ba2dfb99..592ae789 100644 --- a/source/extraction-parser.test.js +++ b/source/extraction-parser.test.js @@ -18,37 +18,16 @@ describe('parseExtractionResult()', () => { assert({ given: 'valid extraction result', - should: 'preserve the userPrompt field', - actual: result.userPrompt, - expected: 'What is 2 + 2?' - }); - - assert({ - given: 'valid extraction result', - should: 'preserve the importPaths array', - actual: Array.isArray(result.importPaths), - expected: true - }); - - assert({ - given: 'valid extraction result', - should: 'preserve importPaths values', - actual: result.importPaths[0], - expected: 'test.mdc' - }); - - assert({ - given: 'valid extraction result', - should: 'preserve assertions array length', - actual: result.assertions.length, - expected: 2 - }); - - assert({ - given: 'valid extraction result', - should: 'preserve assertion requirement field', - actual: result.assertions[0].requirement, - expected: 'Given simple addition, should add correctly' + should: 'return the parsed extraction object', + actual: result, + expected: { + userPrompt: 'What is 2 + 2?', + importPaths: ['test.mdc'], + assertions: [ + { id: 1, requirement: 'Given simple addition, should add correctly' }, + { id: 2, requirement: 'Given format, should output JSON' } + ] + } }); }); @@ -59,23 +38,13 @@ describe('parseExtractionResult()', () => { assert({ given: 'JSON wrapped in markdown code fences', - should: 'parse importPaths as an array', - actual: Array.isArray(result.importPaths), - expected: true - }); - - assert({ - given: 'JSON wrapped in markdown code fences', - should: 'preserve the userPrompt field', - actual: result.userPrompt, - expected: 'test prompt' - }); - - assert({ - given: 'JSON wrapped in markdown code fences', - should: 'preserve assertions array', - actual: result.assertions[0].requirement, - expected: 'Given test, should pass' + should: 'extract and parse the JSON object', + actual: result, + expected: { + userPrompt: 'test prompt', + importPaths: [], + assertions: [{ id: 1, requirement: 'Given test, should pass' }] + } }); }); @@ -87,15 +56,12 @@ describe('parseExtractionResult()', () => { assert({ given: 'JSON with explanation text and markdown fences', should: 'extract and parse the JSON object', - actual: result.userPrompt, - expected: 'test prompt' - }); - - assert({ - given: 'JSON with explanation text and markdown fences', - should: 'return the parsed content', - actual: result.assertions[0].requirement, - expected: 'Given test, should pass' + actual: result, + expected: { + userPrompt: 'test prompt', + importPaths: [], + assertions: [{ id: 1, requirement: 'Given test, should pass' }] + } }); }); @@ -111,15 +77,12 @@ describe('parseExtractionResult()', () => { assert({ given: 'an already-parsed object instead of a JSON string', should: 'validate and return the object directly', - actual: result.userPrompt, - expected: 'test prompt' - }); - - assert({ - given: 'an already-parsed object', - should: 'preserve the assertions', - actual: result.assertions[0].requirement, - expected: 'Given a test, should pass' + actual: result, + expected: { + userPrompt: 'test prompt', + importPaths: [], + assertions: [{ id: 1, requirement: 'Given a test, should pass' }] + } }); }); diff --git a/source/limit-concurrency.js b/source/limit-concurrency.js index f5d19da2..3750fafb 100644 --- a/source/limit-concurrency.js +++ b/source/limit-concurrency.js @@ -1,11 +1,17 @@ /** - * Simple concurrency limiter to avoid resource exhaustion. - * Executes tasks with a maximum concurrency limit. + * Sliding-window async concurrency limiter. + * Executes tasks with a maximum concurrency limit, preserving result order. + * Uses fail-fast semantics: if any task rejects, the returned promise rejects + * immediately and remaining results are discarded. * @param {Array} tasks - Array of async task functions - * @param {number} limit - Maximum number of concurrent tasks - * @returns {Promise} Results from all tasks + * @param {number} limit - Maximum number of concurrent tasks (must be >= 1) + * @returns {Promise} Results from all tasks in input order */ export const limitConcurrency = async (tasks, limit) => { + if (!Number.isInteger(limit) || limit < 1) { + throw new RangeError(`limit must be a positive integer, got ${limit}`); + } + const results = []; const executing = []; diff --git a/source/limit-concurrency.test.js b/source/limit-concurrency.test.js index 3a33848c..a2d341c9 100644 --- a/source/limit-concurrency.test.js +++ b/source/limit-concurrency.test.js @@ -1,4 +1,4 @@ -import { describe, test, vi } from 'vitest'; +import { describe, test, vi, onTestFinished } from 'vitest'; import { assert } from './vitest.js'; import { Try } from './riteway.js'; import { limitConcurrency } from './limit-concurrency.js'; @@ -24,6 +24,7 @@ describe('limit-concurrency', () => { test('limits concurrent execution to the specified limit', async () => { vi.useFakeTimers(); + onTestFinished(() => vi.useRealTimers()); let activeConcurrent = 0; let maxConcurrent = 0; @@ -38,7 +39,6 @@ describe('limit-concurrency', () => { const run = limitConcurrency(tasks, 2); await vi.runAllTimersAsync(); await run; - vi.useRealTimers(); assert({ given: '6 tasks with a limit of 2', @@ -96,5 +96,31 @@ describe('limit-concurrency', () => { expected: ['a', 'b'], }); }); + + test('throws RangeError for zero limit', async () => { + const tasks = [() => Promise.resolve(1)]; + + const error = await Try(limitConcurrency, tasks, 0); + + assert({ + given: 'limit of 0', + should: 'throw RangeError', + actual: error?.name, + expected: 'RangeError', + }); + }); + + test('throws RangeError for negative limit', async () => { + const tasks = [() => Promise.resolve(1)]; + + const error = await Try(limitConcurrency, tasks, -1); + + assert({ + given: 'negative limit', + should: 'throw RangeError', + actual: error?.name, + expected: 'RangeError', + }); + }); }); }); From d38bae02b4dafbf551e14bb8ab94057b0979d1aa Mon Sep 17 00:00:00 2001 From: Ian White Date: Fri, 20 Feb 2026 18:52:14 -0600 Subject: [PATCH 2/3] test(agent-parser): use full expected values - Replace JSON.stringify comparisons with direct object assertions - Collapse 4 partial error.cause assertions into single full-object assert in parseOpenCodeNDJSON error test - Expand partial error.cause?.name assertion to full cause object in unwrapAgentResult error test Addresses Jan's PR #409 comment: deterministic functions should assert the complete expected value, not individual properties. Co-authored-by: Cursor --- source/agent-parser.test.js | 52 +++++++++++++++---------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/source/agent-parser.test.js b/source/agent-parser.test.js index 62779970..9a25ee64 100644 --- a/source/agent-parser.test.js +++ b/source/agent-parser.test.js @@ -26,8 +26,8 @@ describe('parseStringResult()', () => { assert({ given: 'JSON string starting with {', should: 'parse as JSON object', - actual: JSON.stringify(result), - expected: '{"passed":true,"output":"test"}' + actual: result, + expected: { passed: true, output: 'test' } }); assert({ @@ -61,8 +61,8 @@ describe('parseStringResult()', () => { assert({ given: 'markdown-wrapped JSON', should: 'extract and parse JSON', - actual: JSON.stringify(result), - expected: '{"passed":true,"output":"test"}' + actual: result, + expected: { passed: true, output: 'test' } }); assert({ @@ -254,30 +254,15 @@ describe('parseOpenCodeNDJSON()', () => { assert({ given: 'NDJSON with no text events', - should: 'have ParseError name in cause', - actual: error?.cause?.name, - expected: 'ParseError' - }); - - assert({ - given: 'NDJSON with no text events', - should: 'have NO_TEXT_EVENTS code in cause', - actual: error?.cause?.code, - expected: 'NO_TEXT_EVENTS' - }); - - assert({ - given: 'NDJSON with no text events', - should: 'include ndjsonLength in cause', - actual: error?.cause?.ndjsonLength, - expected: ndjson.length - }); - - assert({ - given: 'NDJSON with no text events', - should: 'include linesProcessed in cause', - actual: error?.cause?.linesProcessed, - expected: 2 + should: 'throw Error with full ParseError cause', + actual: error?.cause, + expected: { + name: 'ParseError', + code: 'NO_TEXT_EVENTS', + message: 'No text events found in OpenCode output', + ndjsonLength: ndjson.length, + linesProcessed: 2 + } }); }); @@ -362,9 +347,14 @@ describe('unwrapAgentResult()', () => { assert({ given: 'plain text that is not valid JSON', - should: 'throw Error with ParseError cause', - actual: error?.cause?.name, - expected: 'ParseError' + should: 'throw Error with full ParseError cause', + actual: error?.cause, + expected: { + name: 'ParseError', + code: 'PARSE_FAILURE', + message: 'Agent output is not valid JSON: plain text response', + outputPreview: 'plain text response' + } }); }); From fd72de78585924b5d253dbadcf5d3febe9b97aae Mon Sep 17 00:00:00 2001 From: Ian White Date: Fri, 20 Feb 2026 19:29:39 -0600 Subject: [PATCH 3/3] test(ai): replace partial assertions with full expected values Per Jan's review: deterministic functions should assert the complete expected value, not individual properties. - extraction-parser: collapse ExtractionParseError and ExtractionValidationError cause assertions to full objects; comment .name usage (SyntaxError sets it as own property) - tap-yaml: consolidate per-property result asserts to full objects; collapse error cause to single full-object assert; remove redundant typeof score check - execute-agent: collapse AgentProcessError, TimeoutError, ParseError cause assertions to full objects; comment 3-deep .cause chain - aggregation: collapse ValidationError and ParseError test.each cause assertions to full objects; comment .constructor.name (ZodError does not set .name as own property); remove standalone normalizeJudgment ParseError test made redundant by test.each Co-authored-by: Cursor --- source/aggregation.test.js | 352 +++++++++++++------------------ source/execute-agent.test.js | 68 +++--- source/extraction-parser.test.js | 90 ++++---- source/tap-yaml.test.js | 147 +++++-------- 4 files changed, 289 insertions(+), 368 deletions(-) diff --git a/source/aggregation.test.js b/source/aggregation.test.js index 616eb033..fa095e8d 100644 --- a/source/aggregation.test.js +++ b/source/aggregation.test.js @@ -8,21 +8,17 @@ import { describe('aggregatePerAssertionResults()', () => { test('aggregates per-assertion results when all assertions pass', () => { + const runResultsA = [ + { passed: true, output: 'ok' }, + { passed: true, output: 'ok' } + ]; + const runResultsB = [ + { passed: true, output: 'ok' }, + { passed: true, output: 'ok' } + ]; const perAssertionResults = [ - { - requirement: 'Given simple addition, should add correctly', - runResults: [ - { passed: true, output: 'ok' }, - { passed: true, output: 'ok' } - ] - }, - { - requirement: 'Given format, should output JSON', - runResults: [ - { passed: true, output: 'ok' }, - { passed: true, output: 'ok' } - ] - } + { requirement: 'Given simple addition, should add correctly', runResults: runResultsA }, + { requirement: 'Given format, should output JSON', runResults: runResultsB } ]; const result = aggregatePerAssertionResults({ @@ -32,57 +28,45 @@ describe('aggregatePerAssertionResults()', () => { }); assert({ - given: 'all assertions meeting threshold', - should: 'return passed: true', - actual: result.passed, - expected: true - }); - - assert({ - given: 'two assertions', - should: 'return assertions array of length 2', - actual: result.assertions.length, - expected: 2 - }); - - assert({ - given: 'first assertion with all passes', - should: 'mark the assertion as passed', - actual: result.assertions[0].passed, - expected: true - }); - - assert({ - given: 'first assertion with 2 passes', - should: 'report passCount 2', - actual: result.assertions[0].passCount, - expected: 2 - }); - - assert({ - given: 'first assertion requirement', - should: 'preserve the requirement', - actual: result.assertions[0].requirement, - expected: 'Given simple addition, should add correctly' + given: 'two assertions both meeting threshold', + should: 'return full aggregated result with all assertions passed', + actual: result, + expected: { + passed: true, + assertions: [ + { + requirement: 'Given simple addition, should add correctly', + passed: true, + passCount: 2, + totalRuns: 2, + averageScore: 0, + runResults: runResultsA + }, + { + requirement: 'Given format, should output JSON', + passed: true, + passCount: 2, + totalRuns: 2, + averageScore: 0, + runResults: runResultsB + } + ] + } }); }); test('fails when any assertion does not meet threshold', () => { + const runResultsA = [ + { passed: true, output: 'ok' }, + { passed: true, output: 'ok' } + ]; + const runResultsB = [ + { passed: false, output: 'fail' }, + { passed: false, output: 'fail' } + ]; const perAssertionResults = [ - { - requirement: 'Given addition, should add correctly', - runResults: [ - { passed: true, output: 'ok' }, - { passed: true, output: 'ok' } - ] - }, - { - requirement: 'Given format, should output JSON', - runResults: [ - { passed: false, output: 'fail' }, - { passed: false, output: 'fail' } - ] - } + { requirement: 'Given addition, should add correctly', runResults: runResultsA }, + { requirement: 'Given format, should output JSON', runResults: runResultsB } ]; const result = aggregatePerAssertionResults({ @@ -92,31 +76,30 @@ describe('aggregatePerAssertionResults()', () => { }); assert({ - given: 'one assertion failing threshold', - should: 'return passed: false', - actual: result.passed, - expected: false - }); - - assert({ - given: 'the passing assertion', - should: 'mark it as passed', - actual: result.assertions[0].passed, - expected: true - }); - - assert({ - given: 'the failing assertion', - should: 'mark it as failed', - actual: result.assertions[1].passed, - expected: false - }); - - assert({ - given: 'the failing assertion', - should: 'have passCount 0', - actual: result.assertions[1].passCount, - expected: 0 + given: 'one assertion meeting threshold and one failing', + should: 'return full result with overall passed: false and correct per-assertion states', + actual: result, + expected: { + passed: false, + assertions: [ + { + requirement: 'Given addition, should add correctly', + passed: true, + passCount: 2, + totalRuns: 2, + averageScore: 0, + runResults: runResultsA + }, + { + requirement: 'Given format, should output JSON', + passed: false, + passCount: 0, + totalRuns: 2, + averageScore: 0, + runResults: runResultsB + } + ] + } }); }); @@ -134,17 +117,20 @@ describe('aggregatePerAssertionResults()', () => { }); assert({ - given: 'per-assertion run results', - should: 'include run results in the assertion', - actual: result.assertions[0].runResults, - expected: runResults - }); - - assert({ - given: 'per-assertion run results', - should: 'include totalRuns per assertion', - actual: result.assertions[0].totalRuns, - expected: 2 + given: 'per-assertion run results with 1 of 2 passes at 50% threshold', + should: 'return full result preserving run results and totalRuns', + actual: result, + expected: { + passed: true, + assertions: [{ + requirement: 'test assertion', + passed: true, + passCount: 1, + totalRuns: 2, + averageScore: 0, + runResults + }] + } }); }); @@ -208,16 +194,9 @@ describe('aggregatePerAssertionResults()', () => { assert({ given: 'empty perAssertionResults array', - should: 'return passed: true (all zero assertions meet threshold)', - actual: result.passed, - expected: true - }); - - assert({ - given: 'empty perAssertionResults array', - should: 'return empty assertions array', - actual: result.assertions, - expected: [] + should: 'return passed: true with empty assertions (vacuous truth)', + actual: result, + expected: { passed: true, assertions: [] } }); }); @@ -241,31 +220,36 @@ describe('aggregatePerAssertionResults()', () => { runs }); - assert({ - given: `${_} with ${passCount} of ${runs} passes`, - should: expectedPass ? 'pass the assertion' : 'fail the assertion', - actual: result.assertions[0].passed, - expected: expectedPass - }); + const averageScore = Math.round((passCount * 100 / runs) * 100) / 100; assert({ - given: `${_} with ${passCount} passes`, - should: `report passCount of ${totalPasses}`, - actual: result.assertions[0].passCount, - expected: totalPasses + given: `${_} with ${passCount} of ${runs} passes`, + should: expectedPass ? 'pass the assertion with correct counts and score' : 'fail the assertion with correct counts and score', + actual: result, + expected: { + passed: expectedPass, + assertions: [{ + requirement: 'test assertion', + passed: expectedPass, + passCount: totalPasses, + totalRuns: runs, + averageScore, + runResults + }] + } }); }); test.each([ - ['runs above maximum', { runs: 1001, threshold: 75 }, 'INVALID_AGGREGATION_PARAMS'], - ['zero runs', { runs: 0, threshold: 75 }, 'INVALID_AGGREGATION_PARAMS'], - ['negative runs', { runs: -1, threshold: 75 }, 'INVALID_AGGREGATION_PARAMS'], - ['non-integer runs', { runs: 1.5, threshold: 75 }, 'INVALID_AGGREGATION_PARAMS'], - ['NaN runs', { runs: NaN, threshold: 75 }, 'INVALID_AGGREGATION_PARAMS'], - ['threshold above maximum', { runs: 4, threshold: 150 }, 'INVALID_AGGREGATION_PARAMS'], - ['negative threshold', { runs: 4, threshold: -10 }, 'INVALID_AGGREGATION_PARAMS'], - ['NaN threshold', { runs: 4, threshold: NaN }, 'INVALID_AGGREGATION_PARAMS'], - ])('throws ValidationError for %s', (_, { runs, threshold }, expectedCode) => { + ['runs above maximum', { runs: 1001, threshold: 75 }, 'runs: runs must be at most 1000'], + ['zero runs', { runs: 0, threshold: 75 }, 'runs: runs must be at least 1'], + ['negative runs', { runs: -1, threshold: 75 }, 'runs: runs must be at least 1'], + ['non-integer runs', { runs: 1.5, threshold: 75 }, 'runs: runs must be an integer'], + ['NaN runs', { runs: NaN, threshold: 75 }, 'runs: Invalid input: expected number, received NaN'], + ['threshold above maximum', { runs: 4, threshold: 150 }, 'threshold: threshold must be at most 100'], + ['negative threshold', { runs: 4, threshold: -10 }, 'threshold: threshold must be at least 0'], + ['NaN threshold', { runs: 4, threshold: NaN }, 'threshold: Invalid input: expected number, received NaN'], + ])('throws ValidationError for %s', (_, { runs, threshold }, zodMessages) => { const perAssertionResults = [ { requirement: 'test', runResults: [{ passed: true }] } ]; @@ -274,16 +258,17 @@ describe('aggregatePerAssertionResults()', () => { assert({ given: _, - should: 'have ValidationError name in cause', - actual: error?.cause?.name, - expected: 'ValidationError' - }); - - assert({ - given: _, - should: 'have correct error code in cause', - actual: error?.cause?.code, - expected: expectedCode + should: 'throw ValidationError cause with all fields', + // ZodError doesn't set .name as an own property, so use .constructor.name + actual: { ...error?.cause, cause: error?.cause?.cause?.constructor?.name }, + expected: { + name: 'ValidationError', + code: 'INVALID_AGGREGATION_PARAMS', + message: `Invalid parameters for aggregatePerAssertionResults: ${zodMessages}`, + runs, + threshold, + cause: 'ZodError' + } }); }); }); @@ -297,32 +282,16 @@ describe('normalizeJudgment()', () => { const result = normalizeJudgment({ judgeResponse, requirement: 'test assertion', runIndex: 0, logger }); - assert({ - given: 'complete valid judgment with passed: true', - should: 'preserve passed as true', - actual: result.passed, - expected: true - }); - - assert({ - given: 'complete valid judgment', - should: 'preserve actual value', - actual: result.actual, - expected: 'Result from agent' - }); - assert({ given: 'complete valid judgment', - should: 'preserve expected value', - actual: result.expected, - expected: 'Expected output' - }); - - assert({ - given: 'complete valid judgment with score 85', - should: 'preserve score value', - actual: result.score, - expected: 85 + should: 'return the full normalized result unchanged', + actual: result, + expected: { + passed: true, + actual: 'Result from agent', + expected: 'Expected output', + score: 85 + } }); }); @@ -353,17 +322,15 @@ describe('normalizeJudgment()', () => { }); assert({ - given: 'judgment missing actual', - should: 'default actual to "No actual provided"', - actual: result.actual, - expected: 'No actual provided' - }); - - assert({ - given: 'judgment missing expected', - should: 'default expected to "No expected provided"', - actual: result.expected, - expected: 'No expected provided' + given: 'judgment missing actual and expected fields', + should: 'return full result with default placeholder strings', + actual: result, + expected: { + passed: true, + actual: 'No actual provided', + expected: 'No expected provided', + score: 100 + } }); assert({ @@ -422,42 +389,17 @@ describe('normalizeJudgment()', () => { assert({ given: _, - should: 'have ParseError name in cause', - actual: error?.cause?.name, - expected: 'ParseError' - }); - - assert({ - given: _, - should: 'have JUDGE_INVALID_RESPONSE code in cause', - actual: error?.cause?.code, - expected: 'JUDGE_INVALID_RESPONSE' + should: 'throw ParseError cause with all fields', + actual: error?.cause, + expected: { + name: 'ParseError', + code: 'JUDGE_INVALID_RESPONSE', + message: 'Judge returned non-object response', + requirement: 'test assertion', + runIndex: 1, + rawResponse: input + } }); }); - test('includes requirement and runIndex in ParseError cause', () => { - const logger = createMockLogger(); - const error = Try(normalizeJudgment, { judgeResponse: null, requirement: 'test assertion', runIndex: 1, logger }); - - assert({ - given: 'null input', - should: 'include requirement in cause', - actual: error?.cause?.requirement, - expected: 'test assertion' - }); - - assert({ - given: 'null input', - should: 'include runIndex in cause', - actual: error?.cause?.runIndex, - expected: 1 - }); - - assert({ - given: 'null input', - should: 'include rawResponse in cause', - actual: error?.cause?.rawResponse, - expected: null - }); - }); }); diff --git a/source/execute-agent.test.js b/source/execute-agent.test.js index 01b934a0..99e58176 100644 --- a/source/execute-agent.test.js +++ b/source/execute-agent.test.js @@ -169,26 +169,22 @@ describe('executeAgent()', () => { })); const err = await Try(executeAgent, { agentConfig, prompt: 'test prompt' }); + const args = '-p --output-format json --no-session-persistence'; assert({ given: 'non-zero exit code from agent process', - should: 'throw Error with AgentProcessError cause', - actual: err?.cause?.name, - expected: 'AgentProcessError' - }); - - assert({ - given: 'non-zero exit code', - should: 'have AGENT_PROCESS_FAILURE code', - actual: err?.cause?.code, - expected: 'AGENT_PROCESS_FAILURE' - }); - - assert({ - given: 'non-zero exit code', - should: 'include exit code in cause', - actual: err?.cause?.exitCode, - expected: 1 + should: 'throw with AgentProcessError cause including all fields', + actual: err?.cause, + expected: { + name: 'AgentProcessError', + code: 'AGENT_PROCESS_FAILURE', + message: `Agent process exited with code 1\nCommand: claude ${args}\nStderr: Permission denied\nStdout preview: `, + command: 'claude', + args, + exitCode: 1, + stderr: 'Permission denied', + stdoutPreview: '' + } }); }); @@ -203,19 +199,20 @@ describe('executeAgent()', () => { spawn.mockReturnValue(proc); const err = await Try(executeAgent, { agentConfig, prompt: 'test prompt', timeout: 1 }); + const args = '-p --output-format json --no-session-persistence'; assert({ given: 'agent process that exceeds timeout', - should: 'throw Error with TimeoutError cause', - actual: err?.cause?.name, - expected: 'TimeoutError' - }); - - assert({ - given: 'timeout exceeded', - should: 'have AGENT_TIMEOUT code', - actual: err?.cause?.code, - expected: 'AGENT_TIMEOUT' + should: 'throw with TimeoutError cause including all fields', + actual: err?.cause, + expected: { + name: 'TimeoutError', + code: 'AGENT_TIMEOUT', + message: `Agent process timed out after 1ms. Command: claude ${args}`, + command: 'claude', + args, + timeout: 1 + } }); }); @@ -223,12 +220,23 @@ describe('executeAgent()', () => { spawn.mockReturnValue(createMockProcess({ stdout: 'not valid json output' })); const err = await Try(executeAgent, { agentConfig, prompt: 'test prompt' }); + const args = '-p --output-format json --no-session-persistence'; assert({ given: 'stdout that is not valid JSON', - should: 'throw Error with ParseError cause', - actual: err?.cause?.name, - expected: 'ParseError' + should: 'throw with ParseError cause including all fields', + // 3-deep chain: processAgentOutput wraps unwrapAgentResult's ParseError, + // so err.cause.cause is a standard Error and err.cause.cause.cause is the inner plain cause + actual: { ...err?.cause, cause: err?.cause?.cause?.cause?.name }, + expected: { + name: 'ParseError', + code: 'AGENT_OUTPUT_PARSE_ERROR', + message: 'Failed to parse agent output as JSON: Agent output is not valid JSON: not valid json output', + command: 'claude', + args, + stdoutPreview: 'not valid json output', + cause: 'ParseError' + } }); }); diff --git a/source/extraction-parser.test.js b/source/extraction-parser.test.js index 592ae789..ebe7810c 100644 --- a/source/extraction-parser.test.js +++ b/source/extraction-parser.test.js @@ -91,41 +91,33 @@ describe('parseExtractionResult()', () => { assert({ given: 'non-JSON input', - should: 'throw ExtractionParseError cause', - actual: error?.cause?.name, - expected: 'ExtractionParseError' - }); - - assert({ - given: 'non-JSON input', - should: 'have EXTRACTION_PARSE_FAILURE code', - actual: error?.cause?.code, - expected: 'EXTRACTION_PARSE_FAILURE' - }); - - assert({ - given: 'non-JSON input', - should: 'preserve original JSON SyntaxError as cause', - actual: error?.cause?.cause?.name, - expected: 'SyntaxError' + should: 'throw ExtractionParseError cause with all fields including nested SyntaxError', + // SyntaxError sets .name as an own property, so .name suffices (unlike ZodError) + actual: { ...error?.cause, cause: error?.cause?.cause?.name }, + expected: { + name: 'ExtractionParseError', + code: 'EXTRACTION_PARSE_FAILURE', + message: 'Failed to parse extraction result', + rawInput: 'This is not JSON at all', + cause: 'SyntaxError' + } }); }); test('throws ExtractionValidationError when result has wrong structure', () => { - const error = Try(parseExtractionResult, JSON.stringify({ id: 1, description: 'test', prompt: 'test' })); + const rawOutput = JSON.stringify({ id: 1, description: 'test', prompt: 'test' }); + const error = Try(parseExtractionResult, rawOutput); assert({ given: 'extraction result with invalid structure', - should: 'throw ExtractionValidationError cause', - actual: error?.cause?.name, - expected: 'ExtractionValidationError' - }); - - assert({ - given: 'extraction result with invalid structure', - should: 'have EXTRACTION_VALIDATION_FAILURE code', - actual: error?.cause?.code, - expected: 'EXTRACTION_VALIDATION_FAILURE' + should: 'throw ExtractionValidationError cause with all fields', + actual: error?.cause, + expected: { + name: 'ExtractionValidationError', + code: 'EXTRACTION_VALIDATION_FAILURE', + message: 'Extraction result is missing required field: userPrompt', + rawOutput + } }); }); @@ -133,26 +125,35 @@ describe('parseExtractionResult()', () => { [ 'missing importPaths', { userPrompt: 'test', assertions: [] }, - 'importPaths' + 'importPaths', + 'Extraction result is missing required field: importPaths (must be an array)' ], [ 'missing userPrompt', { importPaths: [], assertions: [] }, - 'userPrompt' + 'userPrompt', + 'Extraction result is missing required field: userPrompt' ], [ 'missing assertions', { userPrompt: 'test', importPaths: [] }, - 'assertions' + 'assertions', + 'Extraction result is missing required field: assertions (must be an array)' ], - ])('throws when %s is missing', (_, input, missingField) => { - const error = Try(parseExtractionResult, JSON.stringify(input)); + ])('throws when %s is missing', (_, input, missingField, expectedCauseMessage) => { + const rawOutput = JSON.stringify(input); + const error = Try(parseExtractionResult, rawOutput); assert({ given: `extraction result missing ${missingField}`, - should: 'throw ExtractionValidationError', - actual: error?.cause?.name, - expected: 'ExtractionValidationError' + should: 'throw ExtractionValidationError cause with all fields', + actual: error?.cause, + expected: { + name: 'ExtractionValidationError', + code: 'EXTRACTION_VALIDATION_FAILURE', + message: expectedCauseMessage, + rawOutput + } }); assert({ @@ -164,19 +165,26 @@ describe('parseExtractionResult()', () => { }); test('throws when assertion is missing required field', () => { - const missingAssertionFields = JSON.stringify({ + const rawOutput = JSON.stringify({ userPrompt: 'test', importPaths: [], assertions: [{ id: 1 }] }); - const error = Try(parseExtractionResult, missingAssertionFields); + const error = Try(parseExtractionResult, rawOutput); assert({ given: 'assertion missing the requirement field', - should: 'throw ExtractionValidationError', - actual: error?.cause?.name, - expected: 'ExtractionValidationError' + should: 'throw ExtractionValidationError cause with all fields', + actual: error?.cause, + expected: { + name: 'ExtractionValidationError', + code: 'EXTRACTION_VALIDATION_FAILURE', + message: 'Assertion at index 0 is missing required field: requirement', + assertionIndex: 0, + missingField: 'requirement', + rawOutput + } }); assert({ diff --git a/source/tap-yaml.test.js b/source/tap-yaml.test.js index cc3606f0..9a3eea14 100644 --- a/source/tap-yaml.test.js +++ b/source/tap-yaml.test.js @@ -17,30 +17,14 @@ score: 100 assert({ given: 'TAP YAML with passed: true', - should: 'parse passed as boolean true', - actual: result.passed, - expected: true - }); - - assert({ - given: 'TAP YAML with passed: true', - should: 'parse actual field', - actual: result.actual, - expected: 'the result was correct' - }); - - assert({ - given: 'TAP YAML with passed: true', - should: 'parse expected field', - actual: result.expected, - expected: 'a correct result' - }); - - assert({ - given: 'TAP YAML with passed: true', - should: 'parse score as number', - actual: result.score, - expected: 100 + should: 'parse all fields correctly', + actual: result, + expected: { + passed: true, + actual: 'the result was correct', + expected: 'a correct result', + score: 100 + } }); }); @@ -56,16 +40,14 @@ score: 20 assert({ given: 'TAP YAML with passed: false', - should: 'parse passed as boolean false', - actual: result.passed, - expected: false - }); - - assert({ - given: 'TAP YAML with passed: false', - should: 'parse score as number', - actual: result.score, - expected: 20 + should: 'parse all fields correctly', + actual: result, + expected: { + passed: false, + actual: 'incorrect output', + expected: 'correct output', + score: 20 + } }); }); @@ -89,16 +71,26 @@ score: 90 assert({ given: 'quoted string values', - should: 'strip quotes from actual', - actual: resultQuoted.actual, - expected: 'quoted value' + should: 'strip quotes from all fields', + actual: resultQuoted, + expected: { + passed: true, + actual: 'quoted value', + expected: 'another quoted', + score: 90 + } }); assert({ given: 'unquoted string values', - should: 'parse unquoted actual', - actual: resultUnquoted.actual, - expected: 'unquoted value' + should: 'parse all fields without alteration', + actual: resultUnquoted, + expected: { + passed: true, + actual: 'unquoted value', + expected: 'another unquoted', + score: 90 + } }); }); @@ -109,23 +101,14 @@ score: 90 assert({ given: 'input without --- markers', - should: 'throw error', - actual: error !== undefined, - expected: true - }); - - assert({ - given: 'input without --- markers', - should: 'have ParseError cause name', - actual: error?.cause?.name, - expected: 'ParseError' - }); - - assert({ - given: 'input without --- markers', - should: 'have JUDGE_INVALID_TAP_YAML code', - actual: error?.cause?.code, - expected: 'JUDGE_INVALID_TAP_YAML' + should: 'throw with ParseError cause with all fields', + actual: error?.cause, + expected: { + name: 'ParseError', + code: 'JUDGE_INVALID_TAP_YAML', + message: 'Judge output does not contain a valid TAP YAML block (--- delimited)', + rawOutput: invalidInput + } }); }); @@ -141,16 +124,14 @@ score: 85 assert({ given: 'score field with numeric value', - should: 'parse score as number type', - actual: typeof result.score === 'number', - expected: true - }); - - assert({ - given: 'score field with numeric value', - should: 'parse correct score value', - actual: result.score, - expected: 85 + should: 'parse all fields including score as a number', + actual: result, + expected: { + passed: true, + actual: 'result', + expected: 'something', + score: 85 + } }); }); @@ -163,31 +144,13 @@ score: 75 const result = parseTAPYAML(input); assert({ - given: 'YAML block missing actual and expected fields', - should: 'parse passed field correctly', - actual: result.passed, - expected: true - }); - - assert({ - given: 'YAML block missing actual and expected fields', - should: 'parse score field correctly', - actual: result.score, - expected: 75 - }); - - assert({ - given: 'YAML block missing actual field', - should: 'not include actual in result', - actual: result.actual, - expected: undefined - }); - - assert({ - given: 'YAML block missing expected field', - should: 'not include expected in result', - actual: result.expected, - expected: undefined + given: 'YAML block with only passed and score fields', + should: 'return only the present fields without actual or expected', + actual: result, + expected: { + passed: true, + score: 75 + } }); }); });