diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index f261c5e86..35a8cb79a 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -300,6 +300,11 @@ All tools should have tests added in [tools/tests](tools/tests), tests are imple npm run test ``` +Specify the test path to run tests for one of the tools: +```bash +npm run test -- tools/tests/tester/ +``` + #### Lints All code is linted using [ESLint](https://eslint.org/) in combination with [typescript-eslint](https://typescript-eslint.io/). Linting can be run via: diff --git a/tests/index_lifecycle.yaml b/tests/index_lifecycle.yaml index 4e5bd943d..7009d8968 100644 --- a/tests/index_lifecycle.yaml +++ b/tests/index_lifecycle.yaml @@ -1,7 +1,7 @@ $schema: ../json_schemas/test_story.schema.yaml skip: false -description: This story tests all endpoints relevant the lifecycle of an index, from creation to deletion. +description: This story tests all endpoints relevant to the lifecycle of an index, from creation to deletion. epilogues: - path: /books method: DELETE diff --git a/tools/src/tester/ResultsDisplayer.ts b/tools/src/tester/ResultsDisplayer.ts index 7a0d04f47..f97c8d079 100644 --- a/tools/src/tester/ResultsDisplayer.ts +++ b/tools/src/tester/ResultsDisplayer.ts @@ -2,6 +2,10 @@ import { type ChapterEvaluation, type Evaluation, Result, type StoryEvaluation } import { overall_result } from './helpers' import * as ansi from './Ansi' +export interface TestRunOptions { + dry_run?: boolean +} + export interface DisplayOptions { tab_width?: number verbose?: boolean @@ -9,13 +13,11 @@ export interface DisplayOptions { export default class ResultsDisplayer { evaluation: StoryEvaluation - skip_components: boolean tab_width: number verbose: boolean constructor (evaluation: StoryEvaluation, opts: DisplayOptions) { this.evaluation = evaluation - this.skip_components = [Result.PASSED, Result.SKIPPED].includes(evaluation.result) this.tab_width = opts.tab_width ?? 4 this.verbose = opts.verbose ?? false } @@ -31,22 +33,20 @@ export default class ResultsDisplayer { #display_story (): void { const result = this.evaluation.result const message = this.evaluation.full_path - const title = ansi.cyan(ansi.b(this.evaluation.display_path)) + const title = ansi.cyan(ansi.b(this.evaluation.description ?? this.evaluation.display_path)) this.#display_evaluation({ result, message }, title) } #display_chapters (evaluations: ChapterEvaluation[], title: string): void { - if (this.skip_components || evaluations.length === 0) return + if (evaluations.length === 0) return const result = overall_result(evaluations.map(e => e.overall)) + if (!this.verbose && (result === Result.SKIPPED || result === Result.PASSED)) return this.#display_evaluation({ result }, title, this.tab_width) - if (result === Result.PASSED) return for (const evaluation of evaluations) this.#display_chapter(evaluation) } #display_chapter (chapter: ChapterEvaluation): void { this.#display_evaluation(chapter.overall, ansi.i(chapter.title), this.tab_width * 2) - if (chapter.overall.result === Result.PASSED || chapter.overall.result === Result.SKIPPED) return - this.#display_parameters(chapter.request?.parameters ?? {}) this.#display_request_body(chapter.request?.requestBody) this.#display_status(chapter.response?.status) @@ -57,7 +57,6 @@ export default class ResultsDisplayer { if (Object.keys(parameters).length === 0) return const result = overall_result(Object.values(parameters)) this.#display_evaluation({ result }, 'PARAMETERS', this.tab_width * 3) - if (result === Result.PASSED) return for (const [name, evaluation] of Object.entries(parameters)) { this.#display_evaluation(evaluation, name, this.tab_width * 4) } diff --git a/tools/src/tester/StoryEvaluator.ts b/tools/src/tester/StoryEvaluator.ts index 7c049125c..bea5afd77 100644 --- a/tools/src/tester/StoryEvaluator.ts +++ b/tools/src/tester/StoryEvaluator.ts @@ -12,13 +12,15 @@ export interface StoryFile { } export default class StoryEvaluator { + dry_run: boolean story: Story display_path: string full_path: string has_errors: boolean = false chapter_reader: ChapterReader - constructor (story_file: StoryFile) { + constructor (story_file: StoryFile, dry_run?: boolean) { + this.dry_run = dry_run ?? false this.story = story_file.story this.display_path = story_file.display_path this.full_path = story_file.full_path @@ -56,10 +58,15 @@ export default class StoryEvaluator { const evaluations: ChapterEvaluation[] = [] for (const chapter of chapters) { - const evaluator = new ChapterEvaluator(chapter) - const evaluation = await evaluator.evaluate(has_errors) - has_errors = has_errors || evaluation.overall.result === Result.ERROR - evaluations.push(evaluation) + if (this.dry_run) { + const title = chapter.synopsis || `${chapter.method} ${chapter.path}` + evaluations.push({ title, overall: { result: Result.SKIPPED, message: 'Dry Run', error: undefined } }) + } else { + const evaluator = new ChapterEvaluator(chapter) + const evaluation = await evaluator.evaluate(has_errors) + has_errors = has_errors || evaluation.overall.result === Result.ERROR + evaluations.push(evaluation) + } } return evaluations @@ -69,12 +76,16 @@ export default class StoryEvaluator { const evaluations: ChapterEvaluation[] = [] for (const chapter of chapters) { const title = `${chapter.method} ${chapter.path}` - const response = await this.chapter_reader.read(chapter) - const status = chapter.status ?? [] - if (status.includes(response.status)) evaluations.push({ title, overall: { result: Result.PASSED } }) - else { - this.has_errors = true - evaluations.push({ title, overall: { result: Result.ERROR, message: response.message, error: response.error as Error } }) + if (this.dry_run) { + evaluations.push({ title, overall: { result: Result.SKIPPED, message: 'Dry Run', error: undefined } }) + } else { + const response = await this.chapter_reader.read(chapter) + const status = chapter.status ?? [] + if (status.includes(response.status)) evaluations.push({ title, overall: { result: Result.PASSED } }) + else { + this.has_errors = true + evaluations.push({ title, overall: { result: Result.ERROR, message: response.message, error: response.error as Error } }) + } } } return evaluations diff --git a/tools/src/tester/TestsRunner.ts b/tools/src/tester/TestsRunner.ts index a5ba0689f..80e20604d 100644 --- a/tools/src/tester/TestsRunner.ts +++ b/tools/src/tester/TestsRunner.ts @@ -7,11 +7,11 @@ import fs from 'fs' import { type Story } from './types/story.types' import { read_yaml } from '../../helpers' import { Result } from './types/eval.types' -import ResultsDisplayer, { type DisplayOptions } from './ResultsDisplayer' +import ResultsDisplayer, { type TestRunOptions, type DisplayOptions } from './ResultsDisplayer' import SharedResources from './SharedResources' import { resolve, basename } from 'path' -type TestsRunnerOptions = DisplayOptions & Record +type TestsRunnerOptions = TestRunOptions & DisplayOptions & Record export default class TestsRunner { path: string // Path to a story file or a directory containing story files @@ -31,7 +31,7 @@ export default class TestsRunner { let failed = false const story_files = this.#collect_story_files(this.path, '', '').sort((a, b) => a.display_path.localeCompare(b.display_path)) for (const story_file of story_files) { - const evaluator = new StoryEvaluator(story_file) + const evaluator = new StoryEvaluator(story_file, this.opts.dry_run) const evaluation = await evaluator.evaluate() const displayer = new ResultsDisplayer(evaluation, this.opts) displayer.display() diff --git a/tools/src/tester/start.ts b/tools/src/tester/start.ts index 7cbe9a2ab..4b2527bf0 100644 --- a/tools/src/tester/start.ts +++ b/tools/src/tester/start.ts @@ -10,14 +10,17 @@ const command = new Command() .addOption(new Option('--tests, --tests-path ', 'path to the root folder of the tests').default('./tests')) .addOption(new Option('--tab-width ', 'tab width for displayed results').default('4')) .addOption(new Option('--verbose', 'whether to print the full stack trace of errors')) + .addOption(new Option('--dry-run', 'dry run only, do not make HTTP requests')) .allowExcessArguments(false) .parse() const opts = command.opts() -const display_options = { +const options = { verbose: opts.verbose ?? false, - tab_width: Number.parseInt(opts.tabWidth) + tab_width: Number.parseInt(opts.tabWidth), + dry_run: opts.dryRun ?? false } + const spec = (new OpenApiMerger(opts.specPath, LogLevel.error)).merge() -const runner = new TestsRunner(spec, opts.testsPath, display_options) +const runner = new TestsRunner(spec, opts.testsPath, options) void runner.run().then(() => { _.noop() }) diff --git a/tools/tests/tester/fixtures/empty_with_all_the_parts.yaml b/tools/tests/tester/fixtures/empty_with_all_the_parts.yaml new file mode 100644 index 000000000..9ea08619d --- /dev/null +++ b/tools/tests/tester/fixtures/empty_with_all_the_parts.yaml @@ -0,0 +1,46 @@ +$schema: ../json_schemas/test_story.schema.yaml + +skip: false + +description: A story with all its parts. + +prologues: + - path: /things + method: DELETE + status: [200, 404] + +epilogues: + - path: /things/one + method: DELETE + status: [200, 404] + +chapters: + - synopsis: A PUT method. + path: /{index} + method: PUT + parameters: + index: one + + - synopsis: A HEAD method. + path: /{index} + method: HEAD + parameters: + index: one + + - synopsis: A GET method. + path: /{index} + method: GET + parameters: + index: one + + - synopsis: A POST method. + path: /{index}/_doc + method: POST + parameters: + index: one + + - synopsis: A DELETE method. + path: /{index} + method: DELETE + parameters: + index: one diff --git a/tools/tests/tester/fixtures/empty_with_description.yaml b/tools/tests/tester/fixtures/empty_with_description.yaml new file mode 100644 index 000000000..2898df580 --- /dev/null +++ b/tools/tests/tester/fixtures/empty_with_description.yaml @@ -0,0 +1,5 @@ +$schema: ../json_schemas/test_story.schema.yaml + +description: A story with no beginning or end. + +chapters: [] \ No newline at end of file diff --git a/tools/tests/tester/start.test.ts b/tools/tests/tester/start.test.ts index 3d7a81ba0..aec9b67db 100644 --- a/tools/tests/tester/start.test.ts +++ b/tools/tests/tester/start.test.ts @@ -1,5 +1,6 @@ import { spawnSync } from 'child_process' import * as ansi from '../../src/tester/Ansi' +import * as path from 'path' const spec = (args: string[]): any => { const start = spawnSync('ts-node', ['tools/src/tester/start.ts'].concat(args), { @@ -19,12 +20,34 @@ test('--invalid', async () => { expect(spec(['--invalid']).stderr).toContain("error: unknown option '--invalid'") }) -test('--tests', async () => { - expect(spec(['--tests', 'tools/tests/tester/fixtures']).stdout).toContain( +test('displays story filename', async () => { + expect(spec(['--tests', 'tools/tests/tester/fixtures/empty.yaml']).stdout).toContain( `${ansi.green('PASSED ')} ${ansi.cyan(ansi.b('empty.yaml'))}` ) }) +test('displays story description', async () => { + expect(spec(['--tests', 'tools/tests/tester/fixtures/empty_with_description.yaml']).stdout).toContain( + `${ansi.green('PASSED ')} ${ansi.cyan(ansi.b('A story with no beginning or end.'))}` + ) +}) + test.todo('--tab-width') -test.todo('--verbose') + +test('--dry-run', async () => { + const test_yaml = 'tools/tests/tester/fixtures/empty_with_all_the_parts.yaml' + const s = spec(['--dry-run', '--tests', test_yaml]).stdout + const full_path = path.join(__dirname, '../../../' + test_yaml) + expect(s).toEqual(`${ansi.yellow('SKIPPED')} ${ansi.cyan(ansi.b('A story with all its parts.'))} ${ansi.gray('(' + full_path + ')')}\n\n\n`) +}) + +test('--dry-run --verbose', async () => { + const s = spec(['--dry-run', '--verbose', '--tests', 'tools/tests/tester/fixtures/empty_with_all_the_parts.yaml']).stdout + expect(s).toContain(`${ansi.yellow('SKIPPED')} ${ansi.cyan(ansi.b('A story with all its parts.'))}`) + expect(s).toContain(`${ansi.yellow('SKIPPED')} CHAPTERS`) + expect(s).toContain(`${ansi.yellow('SKIPPED')} EPILOGUES`) + expect(s).toContain(`${ansi.yellow('SKIPPED')} PROLOGUES`) + expect(s).toContain(`${ansi.yellow('SKIPPED')} ${ansi.i('A PUT method.')} ${ansi.gray('(Dry Run)')}`) +}) + test.todo('--spec')