diff --git a/.eslintrc.js b/.eslintrc.js index eb45c536..d3e20e93 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -10,7 +10,9 @@ module.exports = { 'digitalbazaar' ], ignorePatterns: [ + 'coverage/', 'dist/', + 'test-suites', 'tests/webidl/WebIDLParser.js', 'tests/webidl/idlharness.js', 'tests/webidl/testharness.js' diff --git a/CHANGELOG.md b/CHANGELOG.md index c039a057..91128491 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # jsonld ChangeLog -## 8.1.2 - 2023-03-xx +## 8.2.0 - 2023-03-xx ### Changed - Update for latest [rdf-canon][] changes: test suite location, README, links, @@ -8,6 +8,15 @@ - Skip test with 'U' escapes. Will enable when [rdf-canonize][] dependency is updated. - Test on Node.js 20.x. +- Align test and benchmark code with [rdf-canonize][]. + - **NOTE**: This changes various testing and benchmark runner features and + options. + - Update env var usage. + - Use more common code between Node.js and karma tests. + - Conditionally load test suites. + - Fix various minor bugs. + - Add multiple jobs benchmarking support. +- Update benchmark compare script. ### Fixed - Improve safe mode for `@graph` use cases. diff --git a/README.md b/README.md index 6d6db7c6..98299837 100644 --- a/README.md +++ b/README.md @@ -389,13 +389,13 @@ Node.js tests can be run with a simple command: npm test If you installed the test suites elsewhere, or wish to run other tests, use -the `JSONLD_TESTS` environment var: +the `TESTS` environment var: - JSONLD_TESTS="/tmp/org/test-suites /tmp/norm/tests" npm test + TESTS="/tmp/org/test-suites /tmp/norm/tests" npm test This feature can be used to run the older json-ld.org test suite: - JSONLD_TESTS=/tmp/json-ld.org/test-suite npm test + TESTS=/tmp/json-ld.org/test-suite npm test Browser testing can be done with Karma: @@ -419,7 +419,7 @@ Remote context tests are also available: # run the context server in the background or another terminal node tests/remote-context-server.js - JSONLD_TESTS=`pwd`/tests npm test + TESTS=`pwd`/tests npm test To generate EARL reports: @@ -432,7 +432,7 @@ To generate EARL reports: To generate an EARL report with the `json-ld-api` and `json-ld-framing` tests as used on the official [JSON-LD Processor Conformance][] page - JSONLD_TESTS="`pwd`/../json-ld-api/tests `pwd`/../json-ld-framing/tests" EARL="jsonld-js-earl.jsonld" npm test + TESTS="`pwd`/../json-ld-api/tests `pwd`/../json-ld-framing/tests" EARL="jsonld-js-earl.jsonld" npm test The EARL `.jsonld` output can be converted to `.ttl` using the [rdf][] tool: @@ -449,14 +449,14 @@ Benchmarks Benchmarks can be created from any manifest that the test system supports. Use a command line with a test suite and a benchmark flag: - JSONLD_TESTS=/tmp/benchmark-manifest.jsonld JSONLD_BENCHMARK=1 npm test + TESTS=/tmp/benchmark-manifest.jsonld BENCHMARK=1 npm test EARL reports with benchmark data can be generated with an optional environment details: - JSONLD_TESTS=`pwd`/../json-ld.org/benchmarks/b001-manifiest.jsonld JSONLD_BENCHMARK=1 EARL=earl-test.jsonld TEST_ENV=1 npm test + TESTS=`pwd`/../json-ld.org/benchmarks/b001-manifiest.jsonld BENCHMARK=1 EARL=earl-test.jsonld TEST_ENV=1 npm test -See `tests/test.js` for more `TEST_ENV` control and options. +See `tests/test.js` for more `TEST_ENV` and `BENCHMARK` control and options. These reports can be compared with the `benchmarks/compare/` tool and at the [JSON-LD Benchmarks][] site. diff --git a/benchmarks/compare/compare.js b/benchmarks/compare/compare.js index 12803c9b..e537495e 100755 --- a/benchmarks/compare/compare.js +++ b/benchmarks/compare/compare.js @@ -27,7 +27,7 @@ yargs(hideBin(process.argv)) }) .option('env', { alias: 'e', - choices: ['none', 'all', 'combined'], + choices: ['none', 'all', 'present', 'combined'], default: 'none', description: 'Output environment format' }) @@ -50,6 +50,7 @@ async function compare({ fn: f, content: await fs.readFile(f, 'utf8') }))); + //console.log(contents); const results = contents .map(c => ({ fn: c.fn, @@ -57,11 +58,14 @@ async function compare({ // map of test id => assertion testMap: new Map() })) + .map(c => { + //console.log('C', c); + return c; + }) .map(c => ({ ...c, - // FIXME process properly - env: c.content['@included'][0], - label: c.content['@included'][0]['jldb:label'] + env: c.content['@included']?.[0] || {}, + label: c.content['@included']?.[0]?.['jldb:label'] })); //console.log(JSON.stringify(results, null, 2)); // order of tests found in each result set @@ -96,14 +100,17 @@ async function compare({ hz(r.testMap.get(t)))) .map(d => relative ? d.toFixed(2) + '%' : d.toFixed(2)) ]); - //console.log(compared); - //console.log(results); + //console.log('COMPARED', compared); + //console.log('RESULTS', results); const fnprefixlen = commonPathPrefix(file).length; + function label(res) { + return res.label || res.fn.slice(fnprefixlen); + } console.log('## Comparison'); console.log(markdownTable([ [ 'Test', - ...results.map(r => r.label || r.fn.slice(fnprefixlen)) + ...results.map(label) ], ...compared ], { @@ -130,15 +137,58 @@ async function compare({ ['Comment', 'jldb:comment'] ]; + // show all properites if(env === 'all') { console.log(); console.log('## Environment'); - console.log(markdownTable([ - envProps.map(p => p[0]), - ...results.map(r => envProps.map(p => r.env[p[1]] || '')) + //const data = results.map(r => envProps.map(p => { + // return (p[1] === 'jldb:label') ? label(r) : r.env[p[1]] || ''; + //})); + const data = results.map(r => [ + label(r), + ...envProps.slice(1).map(p => r.env[p[1]] || '') + ]); + if(data.length > 0) { + console.log(markdownTable([ + envProps.map(p => p[0]), + ...data + ])); + } else { + console.log('*not specified*'); + } + } + + // show present properites + if(env === 'present') { + console.log(); + console.log('## Environment'); + // get all data + const data = results.map(r => [ + label(r), + ...envProps.slice(1).map(p => r.env[p[1]] || '') + ]); + // count present truthy fields per col + const propCounts = envProps.slice(1) + .map(p => results.reduce((c, r) => r.env[p[1]] ? ++c : c, 0)); + const presentProps = [ + envProps[0], + ...envProps.slice(1).filter((v, i) => propCounts[i] > 0) + ]; + const presentData = data.map(d => ([ + d[0], + ...d.slice(1).filter((v, i) => propCounts[i] > 0) ])); + if(data.length > 0) { + console.log(markdownTable([ + presentProps.map(p => p[0]), + ...presentData + ])); + } else { + console.log('*not specified*'); + } } + // show combined grouping of properties if(env === 'combined') { console.log(); console.log('## Environment'); @@ -149,11 +199,16 @@ async function compare({ ); return [key, values.size ? [...values].join(', ') : []]; } - console.log(markdownTable([ - ['Key', 'Values'], - ...envProps - .map(p => envline(p[0], p[1])) - .filter(p => p[1].length) - ])); + const data = envProps + .map(p => envline(p[0], p[1])) + .filter(p => p[1].length); + if(data.length > 0) { + console.log(markdownTable([ + ['Key', 'Values'], + ...data + ])); + } else { + console.log('*not specified*'); + } } } diff --git a/karma.conf.js b/karma.conf.js index bd8d5c4f..81a6ce96 100644 --- a/karma.conf.js +++ b/karma.conf.js @@ -1,17 +1,12 @@ /** - * Karam configuration for jsonld.js. + * Karma configuration for jsonld.js. * - * Set dirs, manifests, or js to run: - * JSONLD_TESTS="f1 f2 ..." - * Output an EARL report: - * EARL=filename - * Bail with tests fail: - * BAIL=true + * See ./test/test.js for env options. * * @author Dave Longley * @author David I. Lehn * - * Copyright (c) 2011-2017 Digital Bazaar, Inc. All rights reserved. + * Copyright (c) 2011-2023 Digital Bazaar, Inc. All rights reserved. */ const os = require('os'); const webpack = require('webpack'); @@ -67,11 +62,10 @@ module.exports = function(config) { plugins: [ new webpack.DefinePlugin({ 'process.env.BAIL': JSON.stringify(process.env.BAIL), + 'process.env.BENCHMARK': JSON.stringify(process.env.BENCHMARK), 'process.env.EARL': JSON.stringify(process.env.EARL), + 'process.env.TESTS': JSON.stringify(process.env.TESTS), 'process.env.TEST_ENV': JSON.stringify(process.env.TEST_ENV), - 'process.env.JSONLD_BENCHMARK': - JSON.stringify(process.env.JSONLD_BENCHMARK), - 'process.env.JSONLD_TESTS': JSON.stringify(process.env.JSONLD_TESTS), 'process.env.TEST_ROOT_DIR': JSON.stringify(__dirname), 'process.env.VERBOSE_SKIP': JSON.stringify(process.env.VERBOSE_SKIP), // for 'auto' test env @@ -149,10 +143,10 @@ module.exports = function(config) { [ 'envify', { BAIL: process.env.BAIL, + BENCHMARK: process.env.BENCHMARK, EARL: process.env.EARL, + TESTS: process.env.TESTS, TEST_ENV: process.env.TEST_ENV, - JSONLD_BENCHMARK: process.env.JSONLD_BENCHMARK, - JSONLD_TESTS: process.env.JSONLD_TESTS, TEST_ROOT_DIR: __dirname, VERBOSE_SKIP: process.env.VERBOSE_SKIP, // for 'auto' test env diff --git a/package.json b/package.json index a2d0cea9..7a0bd8ee 100644 --- a/package.json +++ b/package.json @@ -69,6 +69,7 @@ "karma-sourcemap-loader": "^0.3.7", "karma-tap-reporter": "0.0.6", "karma-webpack": "^4.0.2", + "klona": "^2.0.5", "mocha": "^8.3.2", "mocha-lcov-reporter": "^1.3.0", "nyc": "^15.1.0", @@ -99,7 +100,7 @@ "fetch-json-ld-org-test-suite": "if [ ! -e test-suites/json-ld.org ]; then git clone --depth 1 https://github.com/json-ld/json-ld.org.git test-suites/json-ld.org; fi", "fetch-rdf-canon-test-suite": "if [ ! -e test-suites/rdf-canon ]; then git clone --depth 1 https://github.com/w3c/rdf-canon.git test-suites/rdf-canon; fi", "test": "npm run test-node", - "test-node": "cross-env NODE_ENV=test mocha --delay -t 30000 -A -R ${REPORTER:-spec} tests/test.js", + "test-node": "cross-env NODE_ENV=test mocha --delay -t 30000 -A -R ${REPORTER:-spec} tests/test-node.js", "test-karma": "cross-env NODE_ENV=test karma start", "coverage": "cross-env NODE_ENV=test nyc --reporter=lcov --reporter=text-summary npm test", "coverage-ci": "cross-env NODE_ENV=test nyc --reporter=lcovonly npm run test", diff --git a/tests/earl-report.js b/tests/earl-report.js index e5545acd..79b8c940 100644 --- a/tests/earl-report.js +++ b/tests/earl-report.js @@ -6,116 +6,6 @@ * Copyright (c) 2011-2022 Digital Bazaar, Inc. All rights reserved. */ -/** - * Create an EARL Reporter. - * - * @param options {Object} reporter options - * id: {String} report id - * env: {Object} environment description - */ -function EarlReport(options) { - let today = new Date(); - today = today.getFullYear() + '-' + - (today.getMonth() < 9 ? - '0' + (today.getMonth() + 1) : today.getMonth() + 1) + '-' + - (today.getDate() < 10 ? '0' + today.getDate() : today.getDate()); - // one date for tests with no subsecond resolution - this.now = new Date(); - this.now.setMilliseconds(0); - this.id = options.id; - this.env = options.env; - // test environment - this._environment = null; - /* eslint-disable quote-props */ - this._report = { - '@context': { - 'doap': 'http://usefulinc.com/ns/doap#', - 'foaf': 'http://xmlns.com/foaf/0.1/', - 'dc': 'http://purl.org/dc/terms/', - 'earl': 'http://www.w3.org/ns/earl#', - 'xsd': 'http://www.w3.org/2001/XMLSchema#', - 'jsonld': 'http://www.w3.org/ns/json-ld#', - 'doap:homepage': {'@type': '@id'}, - 'doap:license': {'@type': '@id'}, - 'dc:creator': {'@type': '@id'}, - 'foaf:homepage': {'@type': '@id'}, - 'subjectOf': {'@reverse': 'earl:subject'}, - 'earl:assertedBy': {'@type': '@id'}, - 'earl:mode': {'@type': '@id'}, - 'earl:test': {'@type': '@id'}, - 'earl:outcome': {'@type': '@id'}, - 'dc:date': {'@type': 'xsd:date'}, - 'doap:created': {'@type': 'xsd:date'} - }, - '@id': 'https://github.com/digitalbazaar/jsonld.js', - '@type': [ - 'doap:Project', - 'earl:TestSubject', - 'earl:Software' - ], - 'doap:name': 'jsonld.js', - 'dc:title': 'jsonld.js', - 'doap:homepage': 'https://github.com/digitalbazaar/jsonld.js', - 'doap:license': - 'https://github.com/digitalbazaar/jsonld.js/blob/master/LICENSE', - 'doap:description': 'A JSON-LD processor for JavaScript', - 'doap:programming-language': 'JavaScript', - 'dc:creator': 'https://digitalbazaar.com/', - 'doap:developer': { - '@id': 'https://digitalbazaar.com/', - '@type': [ - 'foaf:Organization', - 'earl:Assertor' - ], - 'foaf:name': 'Digital Bazaar, Inc.', - 'foaf:homepage': 'https://digitalbazaar.com/' - }, - 'doap:release': { - 'doap:revision': '', - 'doap:created': today - }, - 'subjectOf': [] - }; - /* eslint-enable quote-props */ - if(this.env && this.env.version) { - this._report['doap:release']['doap:revision'] = this.env.version; - } -} - -EarlReport.prototype.addAssertion = function(test, pass, options) { - options = options || {}; - const assertion = { - '@type': 'earl:Assertion', - 'earl:assertedBy': this._report['doap:developer']['@id'], - 'earl:mode': 'earl:automatic', - 'earl:test': test['@id'], - 'earl:result': { - '@type': 'earl:TestResult', - 'dc:date': this.now.toISOString(), - 'earl:outcome': pass ? 'earl:passed' : 'earl:failed' - } - }; - if(options.benchmarkResult) { - const result = { - ...options.benchmarkResult - }; - if(this._environment) { - result['jldb:environment'] = this._environment['@id']; - } - assertion['jldb:result'] = result; - } - this._report.subjectOf.push(assertion); - return this; -}; - -EarlReport.prototype.report = function() { - return this._report; -}; - -EarlReport.prototype.reportJson = function() { - return JSON.stringify(this._report, null, 2); -}; - /* eslint-disable quote-props */ const _benchmarkContext = { 'jldb': 'http://json-ld.org/benchmarks/vocab#', @@ -162,39 +52,152 @@ const _benchmarkContext = { }; /* eslint-enable quote-props */ -// setup @context and environment to handle benchmark data -EarlReport.prototype.setupForBenchmarks = function(options) { - // add context if needed - if(!Array.isArray(this._report['@context'])) { - this._report['@context'] = [this._report['@context']]; - } - if(!this._report['@context'].some(c => c === _benchmarkContext)) { - this._report['@context'].push(_benchmarkContext); +/** + * EARL Reporter + */ +class EarlReport { + /** + * Create an EARL Reporter. + * + * @param options {Object} reporter options + * env: {Object} environment description + */ + constructor(options) { + let today = new Date(); + today = today.getFullYear() + '-' + + (today.getMonth() < 9 ? + '0' + (today.getMonth() + 1) : today.getMonth() + 1) + '-' + + (today.getDate() < 10 ? '0' + today.getDate() : today.getDate()); + // one date for tests with no subsecond resolution + this.now = new Date(); + this.now.setMilliseconds(0); + this.env = options.env; + // test environment + this._environment = null; + /* eslint-disable quote-props */ + this._report = { + '@context': { + 'doap': 'http://usefulinc.com/ns/doap#', + 'foaf': 'http://xmlns.com/foaf/0.1/', + 'dc': 'http://purl.org/dc/terms/', + 'earl': 'http://www.w3.org/ns/earl#', + 'xsd': 'http://www.w3.org/2001/XMLSchema#', + 'jsonld': 'http://www.w3.org/ns/json-ld#', + 'doap:homepage': {'@type': '@id'}, + 'doap:license': {'@type': '@id'}, + 'dc:creator': {'@type': '@id'}, + 'foaf:homepage': {'@type': '@id'}, + 'subjectOf': {'@reverse': 'earl:subject'}, + 'earl:assertedBy': {'@type': '@id'}, + 'earl:mode': {'@type': '@id'}, + 'earl:test': {'@type': '@id'}, + 'earl:outcome': {'@type': '@id'}, + 'dc:date': {'@type': 'xsd:date'}, + 'doap:created': {'@type': 'xsd:date'} + }, + '@id': 'https://github.com/digitalbazaar/jsonld.js', + '@type': [ + 'doap:Project', + 'earl:TestSubject', + 'earl:Software' + ], + 'doap:name': 'jsonld.js', + 'dc:title': 'jsonld.js', + 'doap:homepage': 'https://github.com/digitalbazaar/jsonld.js', + 'doap:license': + 'https://github.com/digitalbazaar/jsonld.js/blob/master/LICENSE', + 'doap:description': 'A JSON-LD processor for JavaScript', + 'doap:programming-language': 'JavaScript', + 'dc:creator': 'https://digitalbazaar.com/', + 'doap:developer': { + '@id': 'https://digitalbazaar.com/', + '@type': [ + 'foaf:Organization', + 'earl:Assertor' + ], + 'foaf:name': 'Digital Bazaar, Inc.', + 'foaf:homepage': 'https://digitalbazaar.com/' + }, + 'doap:release': { + 'doap:revision': '', + 'doap:created': today + }, + 'subjectOf': [] + }; + /* eslint-enable quote-props */ + if(this.env && this.env.version) { + this._report['doap:release']['doap:revision'] = this.env.version; + } } - if(options.testEnv) { - // add report environment - const fields = [ - ['label', 'jldb:label'], - ['arch', 'jldb:arch'], - ['cpu', 'jldb:cpu'], - ['cpuCount', 'jldb:cpuCount'], - ['platform', 'jldb:platform'], - ['runtime', 'jldb:runtime'], - ['runtimeVersion', 'jldb:runtimeVersion'], - ['comment', 'jldb:comment'] - ]; - const _env = { - '@id': '_:environment:0' + + addAssertion(test, pass, options) { + options = options || {}; + const assertion = { + '@type': 'earl:Assertion', + 'earl:assertedBy': this._report['doap:developer']['@id'], + 'earl:mode': 'earl:automatic', + 'earl:test': test['@id'], + 'earl:result': { + '@type': 'earl:TestResult', + 'dc:date': this.now.toISOString(), + 'earl:outcome': pass ? 'earl:passed' : 'earl:failed' + } }; - for(const [field, property] of fields) { - if(options.testEnv[field]) { - _env[property] = options.testEnv[field]; + if(options.benchmarkResult) { + const result = { + ...options.benchmarkResult + }; + if(this._environment) { + result['jldb:environment'] = this._environment['@id']; } + assertion['jldb:result'] = result; } - this._environment = _env; - this._report['@included'] = this._report['@included'] || []; - this._report['@included'].push(_env); + this._report.subjectOf.push(assertion); + return this; } -}; + + report() { + return this._report; + } + + reportJson() { + return JSON.stringify(this._report, null, 2); + } + + // setup @context and environment to handle benchmark data + setupForBenchmarks(options) { + // add context if needed + if(!Array.isArray(this._report['@context'])) { + this._report['@context'] = [this._report['@context']]; + } + if(!this._report['@context'].some(c => c === _benchmarkContext)) { + this._report['@context'].push(_benchmarkContext); + } + if(options.testEnv) { + // add report environment + const fields = [ + ['label', 'jldb:label'], + ['arch', 'jldb:arch'], + ['cpu', 'jldb:cpu'], + ['cpuCount', 'jldb:cpuCount'], + ['platform', 'jldb:platform'], + ['runtime', 'jldb:runtime'], + ['runtimeVersion', 'jldb:runtimeVersion'], + ['comment', 'jldb:comment'] + ]; + const _env = { + '@id': '_:environment:0' + }; + for(const [field, property] of fields) { + if(options.testEnv[field]) { + _env[property] = options.testEnv[field]; + } + } + this._environment = _env; + this._report['@included'] = this._report['@included'] || []; + this._report['@included'].push(_env); + } + } +} module.exports = EarlReport; diff --git a/tests/test-common.js b/tests/test-common.js deleted file mode 100644 index 616098fe..00000000 --- a/tests/test-common.js +++ /dev/null @@ -1,1094 +0,0 @@ -/** - * Copyright (c) 2011-2019 Digital Bazaar, Inc. All rights reserved. - */ -/* eslint-disable indent */ -const EarlReport = require('./earl-report'); -const join = require('join-path-js'); -const rdfCanonize = require('rdf-canonize'); -const {prependBase} = require('../lib/url'); - -module.exports = function(options) { - -'use strict'; - -const assert = options.assert; -const benchmark = options.benchmark; -const jsonld = options.jsonld; - -const manifest = options.manifest || { - '@context': 'https://json-ld.org/test-suite/context.jsonld', - '@id': '', - '@type': 'mf:Manifest', - description: 'Top level jsonld.js manifest', - name: 'jsonld.js', - sequence: options.entries || [], - filename: '/' -}; - -const TEST_TYPES = { - 'jld:CompactTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - /compact-manifest#t0112$/, - /compact-manifest#t0113$/, - // html - /html-manifest#tc001$/, - /html-manifest#tc002$/, - /html-manifest#tc003$/, - /html-manifest#tc004$/, - ] - }, - fn: 'compact', - params: [ - readTestUrl('input'), - readTestJson('context'), - createTestOptions() - ], - compare: compareExpectedJson - }, - 'jld:ExpandTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - // spec issues - // Unclear how to handle {"@id": null} edge case - // See https://github.com/w3c/json-ld-api/issues/480 - // non-normative test, also see toRdf-manifest#te122 - ///expand-manifest#t0122$/, - - // misc - /expand-manifest#tc037$/, - /expand-manifest#tc038$/, - /expand-manifest#ter54$/, - - // html - /html-manifest#te001$/, - /html-manifest#te002$/, - /html-manifest#te003$/, - /html-manifest#te004$/, - /html-manifest#te005$/, - /html-manifest#te006$/, - /html-manifest#te007$/, - /html-manifest#te010$/, - /html-manifest#te011$/, - /html-manifest#te012$/, - /html-manifest#te013$/, - /html-manifest#te014$/, - /html-manifest#te015$/, - /html-manifest#te016$/, - /html-manifest#te017$/, - /html-manifest#te018$/, - /html-manifest#te019$/, - /html-manifest#te020$/, - /html-manifest#te021$/, - /html-manifest#te022$/, - /html-manifest#tex01$/, - // HTML extraction - /expand-manifest#thc01$/, - /expand-manifest#thc02$/, - /expand-manifest#thc03$/, - /expand-manifest#thc04$/, - /expand-manifest#thc05$/, - // remote - /remote-doc-manifest#t0013$/, // HTML - ] - }, - fn: 'expand', - params: [ - readTestUrl('input'), - createTestOptions() - ], - compare: compareExpectedJson - }, - 'jld:FlattenTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - // html - /html-manifest#tf001$/, - /html-manifest#tf002$/, - /html-manifest#tf003$/, - /html-manifest#tf004$/, - ] - }, - fn: 'flatten', - params: [ - readTestUrl('input'), - readTestJson('context'), - createTestOptions() - ], - compare: compareExpectedJson - }, - 'jld:FrameTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - /frame-manifest#t0069$/, - ] - }, - fn: 'frame', - params: [ - readTestUrl('input'), - readTestJson('frame'), - createTestOptions() - ], - compare: compareExpectedJson - }, - 'jld:FromRDFTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - // direction (compound-literal) - /fromRdf-manifest#tdi11$/, - /fromRdf-manifest#tdi12$/, - ] - }, - fn: 'fromRDF', - params: [ - readTestNQuads('input'), - createTestOptions({format: 'application/n-quads'}) - ], - compare: compareExpectedJson - }, - 'jld:NormalizeTest': { - fn: 'normalize', - params: [ - readTestUrl('input'), - createTestOptions({format: 'application/n-quads'}) - ], - compare: compareExpectedNQuads - }, - 'jld:ToRDFTest': { - skip: { - // skip tests where behavior changed for a 1.1 processor - // see JSON-LD 1.0 Errata - specVersion: ['json-ld-1.0'], - // FIXME - // NOTE: idRegex format: - //MMM-manifest#tNNN$/, - idRegex: [ - // spec issues - // Unclear how to handle {"@id": null} edge case - // See https://github.com/w3c/json-ld-api/issues/480 - // normative test, also see expand-manifest#t0122 - ///toRdf-manifest#te122$/, - - // misc - /toRdf-manifest#tc037$/, - /toRdf-manifest#tc038$/, - /toRdf-manifest#ter54$/, - /toRdf-manifest#tli12$/, - /toRdf-manifest#tli14$/, - - // well formed - /toRdf-manifest#twf05$/, - - // html - /html-manifest#tr001$/, - /html-manifest#tr002$/, - /html-manifest#tr003$/, - /html-manifest#tr004$/, - /html-manifest#tr005$/, - /html-manifest#tr006$/, - /html-manifest#tr007$/, - /html-manifest#tr010$/, - /html-manifest#tr011$/, - /html-manifest#tr012$/, - /html-manifest#tr013$/, - /html-manifest#tr014$/, - /html-manifest#tr015$/, - /html-manifest#tr016$/, - /html-manifest#tr017$/, - /html-manifest#tr018$/, - /html-manifest#tr019$/, - /html-manifest#tr020$/, - /html-manifest#tr021$/, - /html-manifest#tr022$/, - // Invalid Statement - /toRdf-manifest#te075$/, - /toRdf-manifest#te111$/, - /toRdf-manifest#te112$/, - // direction (compound-literal) - /toRdf-manifest#tdi11$/, - /toRdf-manifest#tdi12$/, - ] - }, - fn: 'toRDF', - params: [ - readTestUrl('input'), - createTestOptions({format: 'application/n-quads'}) - ], - compare: compareCanonizedExpectedNQuads - }, - 'rdfc:Urgna2012EvalTest': { - fn: 'normalize', - params: [ - readTestNQuads('action'), - createTestOptions({ - algorithm: 'URGNA2012', - inputFormat: 'application/n-quads', - format: 'application/n-quads' - }) - ], - compare: compareExpectedNQuads - }, - 'rdfc:Urdna2015EvalTest': { - skip: { - // NOTE: idRegex format: - //manifest-urdna2015#testNNN$/, - idRegex: [ - // Unsupported U escape - /manifest-urdna2015#test060/ - ] - }, - fn: 'normalize', - params: [ - readTestNQuads('action'), - createTestOptions({ - algorithm: 'URDNA2015', - inputFormat: 'application/n-quads', - format: 'application/n-quads' - }) - ], - compare: compareExpectedNQuads - } -}; - -const SKIP_TESTS = []; - -// create earl report -if(options.earl && options.earl.filename) { - options.earl.report = new EarlReport({ - id: options.earl.id, - env: options.testEnv - }); - if(options.benchmarkOptions) { - options.earl.report.setupForBenchmarks({testEnv: options.testEnv}); - } -} - -return new Promise(resolve => { - -// async generated tests -// _tests => [{suite}, ...] -// suite => { -// title: ..., -// tests: [test, ...], -// suites: [suite, ...] -// } -const _tests = []; - -return addManifest(manifest, _tests) - .then(() => { - return _testsToMocha(_tests); - }).then(result => { - if(options.earl.report) { - describe('Writing EARL report to: ' + options.earl.filename, function() { - // print out EARL even if .only was used - const _it = result.hadOnly ? it.only : it; - _it('should print the earl report', function() { - return options.writeFile( - options.earl.filename, options.earl.report.reportJson()); - }); - }); - } - }).then(() => resolve()); - -// build mocha tests from local test structure -function _testsToMocha(tests) { - let hadOnly = false; - tests.forEach(suite => { - if(suite.skip) { - describe.skip(suite.title); - return; - } - describe(suite.title, () => { - suite.tests.forEach(test => { - if(test.only) { - hadOnly = true; - it.only(test.title, test.f); - return; - } - it(test.title, test.f); - }); - const {hadOnly: _hadOnly} = _testsToMocha(suite.suites); - hadOnly = hadOnly || _hadOnly; - }); - suite.imports.forEach(f => { - options.import(f); - }); - }); - return { - hadOnly - }; -} - -}); - -/** - * Adds the tests for all entries in the given manifest. - * - * @param manifest {Object} the manifest. - * @param parent {Object} the parent test structure - * @return {Promise} - */ -function addManifest(manifest, parent) { - return new Promise((resolve, reject) => { - // create test structure - const suite = { - title: manifest.name || manifest.label, - tests: [], - suites: [], - imports: [] - }; - parent.push(suite); - - // get entries and sequence (alias for entries) - const entries = [].concat( - getJsonLdValues(manifest, 'entries'), - getJsonLdValues(manifest, 'sequence') - ); - - const includes = getJsonLdValues(manifest, 'include'); - // add includes to sequence as jsonld files - for(let i = 0; i < includes.length; ++i) { - entries.push(includes[i] + '.jsonld'); - } - - // resolve all entry promises and process - Promise.all(entries).then(entries => { - let p = Promise.resolve(); - entries.forEach(entry => { - if(typeof entry === 'string' && entry.endsWith('js')) { - // process later as a plain JavaScript file - suite.imports.push(entry); - return; - } else if(typeof entry === 'function') { - // process as a function that returns a promise - p = p.then(() => { - return entry(options); - }).then(childSuite => { - if(suite) { - suite.suites.push(childSuite); - } - }); - return; - } - p = p.then(() => { - return readManifestEntry(manifest, entry); - }).then(entry => { - if(isJsonLdType(entry, '__SKIP__')) { - // special local skip logic - suite.tests.push(entry); - } else if(isJsonLdType(entry, 'mf:Manifest')) { - // entry is another manifest - return addManifest(entry, suite.suites); - } else { - // assume entry is a test - return addTest(manifest, entry, suite.tests); - } - }); - }); - return p; - }).then(() => { - resolve(); - }).catch(err => { - console.error(err); - reject(err); - }); - }); -} - -/** - * Adds a test. - * - * @param manifest {Object} the manifest. - * @param parent {Object} the test. - * @param tests {Array} the list of tests to add to. - * @return {Promise} - */ -function addTest(manifest, test, tests) { - // expand @id and input base - const test_id = test['@id'] || test.id; - //var number = test_id.substr(2); - test['@id'] = - manifest.baseIri + - basename(manifest.filename).replace('.jsonld', '') + - test_id; - test.base = manifest.baseIri + test.input; - test.manifest = manifest; - const description = test_id + ' ' + (test.purpose || test.name); - - const _test = { - title: description, - f: makeFn() - }; - // only based on test manifest - // skip handled via skip() - if('only' in test) { - _test.only = test.only; - } - tests.push(_test); - - function makeFn() { - return async function() { - const self = this; - self.timeout(5000); - const testInfo = TEST_TYPES[getJsonLdTestType(test)]; - - // skip based on test manifest - if('skip' in test && test.skip) { - if(options.verboseSkip) { - console.log('Skipping test due to manifest:', - {id: test['@id'], name: test.name}); - } - self.skip(); - } - - // skip based on unknown test type - const testTypes = Object.keys(TEST_TYPES); - if(!isJsonLdType(test, testTypes)) { - if(options.verboseSkip) { - const type = [].concat( - getJsonLdValues(test, '@type'), - getJsonLdValues(test, 'type') - ); - console.log('Skipping test due to unknown type:', - {id: test['@id'], name: test.name, type}); - } - self.skip(); - } - - // skip based on test type - if(isJsonLdType(test, SKIP_TESTS)) { - if(options.verboseSkip) { - const type = [].concat( - getJsonLdValues(test, '@type'), - getJsonLdValues(test, 'type') - ); - console.log('Skipping test due to test type:', - {id: test['@id'], name: test.name, type}); - } - self.skip(); - } - - // skip based on type info - if(testInfo.skip && testInfo.skip.type) { - if(options.verboseSkip) { - console.log('Skipping test due to type info:', - {id: test['@id'], name: test.name}); - } - self.skip(); - } - - // skip based on id regex - if(testInfo.skip && testInfo.skip.idRegex) { - testInfo.skip.idRegex.forEach(function(re) { - if(re.test(test['@id'])) { - if(options.verboseSkip) { - console.log('Skipping test due to id:', - {id: test['@id']}); - } - self.skip(); - } - }); - } - - // skip based on description regex - if(testInfo.skip && testInfo.skip.descriptionRegex) { - testInfo.skip.descriptionRegex.forEach(function(re) { - if(re.test(description)) { - if(options.verboseSkip) { - console.log('Skipping test due to description:', - {id: test['@id'], name: test.name, description}); - } - self.skip(); - } - }); - } - - // Make expandContext absolute to the manifest - if(test.hasOwnProperty('option') && test.option.expandContext) { - test.option.expandContext = - prependBase(test.manifest.baseIri, test.option.expandContext); - } - - const testOptions = getJsonLdValues(test, 'option'); - // allow special handling in case of normative test failures - let normativeTest = true; - - testOptions.forEach(function(opt) { - const processingModes = getJsonLdValues(opt, 'processingMode'); - processingModes.forEach(function(pm) { - let skipModes = []; - if(testInfo.skip && testInfo.skip.processingMode) { - skipModes = testInfo.skip.processingMode; - } - if(skipModes.indexOf(pm) !== -1) { - if(options.verboseSkip) { - console.log('Skipping test due to processingMode:', - {id: test['@id'], name: test.name, processingMode: pm}); - } - self.skip(); - } - }); - }); - - testOptions.forEach(function(opt) { - const specVersions = getJsonLdValues(opt, 'specVersion'); - specVersions.forEach(function(sv) { - let skipVersions = []; - if(testInfo.skip && testInfo.skip.specVersion) { - skipVersions = testInfo.skip.specVersion; - } - if(skipVersions.indexOf(sv) !== -1) { - if(options.verboseSkip) { - console.log('Skipping test due to specVersion:', - {id: test['@id'], name: test.name, specVersion: sv}); - } - self.skip(); - } - }); - }); - - testOptions.forEach(function(opt) { - const normative = getJsonLdValues(opt, 'normative'); - normative.forEach(function(n) { - normativeTest = normativeTest && n; - }); - }); - - const fn = testInfo.fn; - const params = testInfo.params.map(param => param(test)); - // resolve test data - const values = await Promise.all(params); - let err; - let result; - // run and capture errors and results - try { - result = await jsonld[fn].apply(null, values); - } catch(e) { - err = e; - } - - try { - if(isJsonLdType(test, 'jld:NegativeEvaluationTest')) { - await compareExpectedError(test, err); - } else if(isJsonLdType(test, 'jld:PositiveEvaluationTest') || - isJsonLdType(test, 'rdfc:Urgna2012EvalTest') || - isJsonLdType(test, 'rdfc:Urdna2015EvalTest')) { - if(err) { - throw err; - } - await testInfo.compare(test, result); - } else if(isJsonLdType(test, 'jld:PositiveSyntaxTest')) { - // no checks - } else { - throw Error('Unknown test type: ' + test.type); - } - - let benchmarkResult = null; - if(options.benchmarkOptions) { - const result = await runBenchmark({ - test, - fn, - params: testInfo.params.map(param => param(test, { - // pre-load params to avoid doc loader and parser timing - load: true - })), - mochaTest: self - }); - benchmarkResult = { - '@type': 'jldb:BenchmarkResult', - 'jldb:hz': result.target.hz, - 'jldb:rme': result.target.stats.rme - }; - } - - if(options.earl.report) { - options.earl.report.addAssertion(test, true, { - benchmarkResult - }); - } - } catch(err) { - // FIXME: improve handling of non-normative errors - // FIXME: for now, explicitly disabling tests. - //if(!normativeTest) { - // // failure ok - // if(options.verboseSkip) { - // console.log('Skipping non-normative test due to failure:', - // {id: test['@id'], name: test.name}); - // } - // self.skip(); - //} - if(options.bailOnError) { - if(err.name !== 'AssertionError') { - console.error('\nError: ', JSON.stringify(err, null, 2)); - } - options.exit(); - } - if(options.earl.report) { - options.earl.report.addAssertion(test, false); - } - console.error('Error: ', JSON.stringify(err, null, 2)); - throw err; - } - }; - } -} - -async function runBenchmark({test, fn, params, mochaTest}) { - const values = await Promise.all(params); - - return new Promise((resolve, reject) => { - const suite = new benchmark.Suite(); - suite.add({ - name: test.name, - defer: true, - fn: deferred => { - jsonld[fn].apply(null, values).then(() => { - deferred.resolve(); - }); - } - }); - suite - .on('start', e => { - // set timeout to a bit more than max benchmark time - mochaTest.timeout((e.target.maxTime + 2) * 1000); - }) - .on('cycle', e => { - console.log(String(e.target)); - }) - .on('error', err => { - reject(new Error(err)); - }) - .on('complete', e => { - resolve(e); - }) - .run({async: true}); - }); -} - -function getJsonLdTestType(test) { - const types = Object.keys(TEST_TYPES); - for(let i = 0; i < types.length; ++i) { - if(isJsonLdType(test, types[i])) { - return types[i]; - } - } - return null; -} - -function readManifestEntry(manifest, entry) { - let p = Promise.resolve(); - let _entry = entry; - if(typeof entry === 'string') { - let _filename; - p = p.then(() => { - if(entry.endsWith('json') || entry.endsWith('jsonld')) { - // load as file - return entry; - } - // load as dir with manifest.jsonld - return joinPath(entry, 'manifest.jsonld'); - }).then(entry => { - const dir = dirname(manifest.filename); - return joinPath(dir, entry); - }).then(filename => { - _filename = filename; - return readJson(filename); - }).then(entry => { - _entry = entry; - _entry.filename = _filename; - return _entry; - }).catch(err => { - if(err.code === 'ENOENT') { - //console.log('File does not exist, skipping: ' + _filename); - // return a "skip" entry - _entry = { - type: '__SKIP__', - title: 'Not found, skipping: ' + _filename, - filename: _filename, - skip: true - }; - return; - } - throw err; - }); - } - return p.then(() => { - _entry.dirname = dirname(_entry.filename || manifest.filename); - return _entry; - }); -} - -function readTestUrl(property) { - return async function(test, options) { - if(!test[property]) { - return null; - } - if(options && options.load) { - // always load - const filename = await joinPath(test.dirname, test[property]); - return readJson(filename); - } - return test.manifest.baseIri + test[property]; - }; -} - -function readTestJson(property) { - return async function(test) { - if(!test[property]) { - return null; - } - const filename = await joinPath(test.dirname, test[property]); - return readJson(filename); - }; -} - -function readTestNQuads(property) { - return async function(test) { - if(!test[property]) { - return null; - } - const filename = await joinPath(test.dirname, test[property]); - return readFile(filename); - }; -} - -function createTestOptions(opts) { - return function(test) { - const options = { - documentLoader: createDocumentLoader(test) - }; - const httpOptions = ['contentType', 'httpLink', 'httpStatus', 'redirectTo']; - const testOptions = test.option || {}; - for(const key in testOptions) { - if(httpOptions.indexOf(key) === -1) { - options[key] = testOptions[key]; - } - } - if(opts) { - // extend options - for(const key in opts) { - options[key] = opts[key]; - } - } - return options; - }; -} - -// find the expected output property or throw error -function _getExpectProperty(test) { - if('expectErrorCode' in test) { - return 'expectErrorCode'; - } else if('expect' in test) { - return 'expect'; - } else if('result' in test) { - return 'result'; - } else { - throw Error('No expected output property found'); - } -} - -async function compareExpectedJson(test, result) { - let expect; - try { - expect = await readTestJson(_getExpectProperty(test))(test); - assert.deepStrictEqual(result, expect); - } catch(err) { - if(options.bailOnError) { - console.log('\nTEST FAILED\n'); - console.log('EXPECTED: ' + JSON.stringify(expect, null, 2)); - console.log('ACTUAL: ' + JSON.stringify(result, null, 2)); - } - throw err; - } -} - -async function compareExpectedNQuads(test, result) { - let expect; - try { - expect = await readTestNQuads(_getExpectProperty(test))(test); - assert.strictEqual(result, expect); - } catch(ex) { - if(options.bailOnError) { - console.log('\nTEST FAILED\n'); - console.log('EXPECTED:\n' + expect); - console.log('ACTUAL:\n' + result); - } - throw ex; - } -} - -async function compareCanonizedExpectedNQuads(test, result) { - let expect; - try { - expect = await readTestNQuads(_getExpectProperty(test))(test); - const opts = {algorithm: 'URDNA2015'}; - const expectDataset = rdfCanonize.NQuads.parse(expect); - const expectCmp = await rdfCanonize.canonize(expectDataset, opts); - const resultDataset = rdfCanonize.NQuads.parse(result); - const resultCmp = await rdfCanonize.canonize(resultDataset, opts); - assert.strictEqual(resultCmp, expectCmp); - } catch(err) { - if(options.bailOnError) { - console.log('\nTEST FAILED\n'); - console.log('EXPECTED:\n' + expect); - console.log('ACTUAL:\n' + result); - } - throw err; - } -} - -async function compareExpectedError(test, err) { - let expect; - let result; - try { - expect = test[_getExpectProperty(test)]; - result = getJsonLdErrorCode(err); - assert.ok(err, 'no error present'); - assert.strictEqual(result, expect); - } catch(_err) { - if(options.bailOnError) { - console.log('\nTEST FAILED\n'); - console.log('EXPECTED: ' + expect); - console.log('ACTUAL: ' + result); - } - // log the unexpected error to help with debugging - console.log('Unexpected error:', err); - throw _err; - } -} - -function isJsonLdType(node, type) { - const nodeType = [].concat( - getJsonLdValues(node, '@type'), - getJsonLdValues(node, 'type') - ); - type = Array.isArray(type) ? type : [type]; - for(let i = 0; i < type.length; ++i) { - if(nodeType.indexOf(type[i]) !== -1) { - return true; - } - } - return false; -} - -function getJsonLdValues(node, property) { - let rval = []; - if(property in node) { - rval = node[property]; - if(!Array.isArray(rval)) { - rval = [rval]; - } - } - return rval; -} - -function getJsonLdErrorCode(err) { - if(!err) { - return null; - } - if(err.details) { - if(err.details.code) { - return err.details.code; - } - if(err.details.cause) { - return getJsonLdErrorCode(err.details.cause); - } - } - return err.name; -} - -async function readJson(filename) { - const data = await readFile(filename); - return JSON.parse(data); -} - -async function readFile(filename) { - return options.readFile(filename); -} - -async function joinPath() { - return join.apply(null, Array.prototype.slice.call(arguments)); -} - -function dirname(filename) { - if(options.nodejs) { - return options.nodejs.path.dirname(filename); - } - const idx = filename.lastIndexOf('/'); - if(idx === -1) { - return filename; - } - return filename.substr(0, idx); -} - -function basename(filename) { - if(options.nodejs) { - return options.nodejs.path.basename(filename); - } - const idx = filename.lastIndexOf('/'); - if(idx === -1) { - return filename; - } - return filename.substr(idx + 1); -} - -// check test.option.loader.rewrite map for url, -// if no test rewrite, check manifest, -// else no rewrite -function rewrite(test, url) { - if(test.option && - test.option.loader && - test.option.loader.rewrite && - url in test.option.loader.rewrite) { - return test.option.loader.rewrite[url]; - } - const manifest = test.manifest; - if(manifest.option && - manifest.option.loader && - manifest.option.loader.rewrite && - url in manifest.option.loader.rewrite) { - return manifest.option.loader.rewrite[url]; - } - return url; -} - -/** - * Creates a test remote document loader. - * - * @param test the test to use the document loader for. - * - * @return the document loader. - */ -function createDocumentLoader(test) { - const localBases = [ - 'http://json-ld.org/test-suite', - 'https://json-ld.org/test-suite', - 'https://json-ld.org/benchmarks', - 'https://w3c.github.io/json-ld-api/tests', - 'https://w3c.github.io/json-ld-framing/tests' - ]; - - const localLoader = function(url) { - // always load remote-doc tests remotely in node - // NOTE: disabled due to github pages issues. - //if(options.nodejs && test.manifest.name === 'Remote document') { - // return jsonld.documentLoader(url); - //} - - // handle loader rewrite options for test or manifest - url = rewrite(test, url); - - // FIXME: this check only works for main test suite and will not work if: - // - running other tests and main test suite not installed - // - use other absolute URIs but want to load local files - const isTestSuite = localBases.some(function(base) { - return url.startsWith(base); - }); - // TODO: improve this check - const isRelative = url.indexOf(':') === -1; - if(isTestSuite || isRelative) { - // attempt to load official test-suite files or relative URLs locally - return loadLocally(url); - } - - // load remotely - return jsonld.documentLoader(url); - }; - - return localLoader; - - function loadLocally(url) { - const doc = {contextUrl: null, documentUrl: url, document: null}; - const options = test.option; - if(options && url === test.base) { - if('redirectTo' in options && parseInt(options.httpStatus, 10) >= 300) { - doc.documentUrl = test.manifest.baseIri + options.redirectTo; - } else if('httpLink' in options) { - let contentType = options.contentType || null; - if(!contentType && url.indexOf('.jsonld', url.length - 7) !== -1) { - contentType = 'application/ld+json'; - } - if(!contentType && url.indexOf('.json', url.length - 5) !== -1) { - contentType = 'application/json'; - } - let linkHeader = options.httpLink; - if(Array.isArray(linkHeader)) { - linkHeader = linkHeader.join(','); - } - const linkHeaders = jsonld.parseLinkHeader(linkHeader); - const linkedContext = - linkHeaders['http://www.w3.org/ns/json-ld#context']; - if(linkedContext && contentType !== 'application/ld+json') { - if(Array.isArray(linkedContext)) { - throw {name: 'multiple context link headers'}; - } - doc.contextUrl = linkedContext.target; - } - - // If not JSON-LD, alternate may point there - if(linkHeaders.alternate && - linkHeaders.alternate.type == 'application/ld+json' && - !(contentType || '').match(/^application\/(\w*\+)?json$/)) { - doc.documentUrl = prependBase(url, linkHeaders.alternate.target); - } - } - } - - let p = Promise.resolve(); - if(doc.documentUrl.indexOf(':') === -1) { - p = p.then(() => { - return joinPath(test.manifest.dirname, doc.documentUrl); - }).then(filename => { - doc.documentUrl = 'file://' + filename; - return filename; - }); - } else { - p = p.then(() => { - return joinPath( - test.manifest.dirname, - doc.documentUrl.substr(test.manifest.baseIri.length)); - }).then(fn => { - return fn; - }); - } - - return p.then(readJson).then(json => { - doc.document = json; - return doc; - }).catch(() => { - throw {name: 'loading document failed', url}; - }); - } -} - -}; diff --git a/tests/test-karma.js b/tests/test-karma.js index 33910732..19ba045b 100644 --- a/tests/test-karma.js +++ b/tests/test-karma.js @@ -1,82 +1,66 @@ /** * Karma test runner for jsonld.js. * - * Use environment vars to control, set via karma.conf.js/webpack: - * - * Set dirs, manifests, or js to run: - * JSONLD_TESTS="r1 r2 ..." - * Output an EARL report: - * EARL=filename - * Test environment details for EARL report: - * This is useful for benchmark comparison. - * By default no details are added for privacy reasons. - * Automatic details can be added for all fields with '1', 'true', or 'auto': - * TEST_ENV=1 - * To include only certain fields, set them, or use 'auto': - * TEST_ENV=cpu='Intel i7-4790K @ 4.00GHz',runtime='Node.js',... - * TEST_ENV=cpu=auto # only cpu - * TEST_ENV=cpu,runtime # only cpu and runtime - * TEST_ENV=auto,comment='special test' # all auto with override - * Available fields: - * - arch - ex: 'x64' - * - cpu - ex: 'Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz' - * - cpuCount - ex: 8 - * - platform - ex: 'linux' - * - runtime - ex: 'Node.js' - * - runtimeVersion - ex: 'v14.19.0' - * - comment: any text - * - version: jsonld.js version - * Bail with tests fail: - * BAIL=true - * Verbose skip reasons: - * VERBOSE_SKIP=true - * Benchmark mode: - * Basic: - * JSONLD_BENCHMARK=1 - * With options: - * JSONLD_BENCHMARK=key1=value1,key2=value2,... + * See ./test.js for environment vars options. * * @author Dave Longley * @author David I. Lehn * - * Copyright (c) 2011-2022 Digital Bazaar, Inc. All rights reserved. + * Copyright (c) 2011-2023 Digital Bazaar, Inc. All rights reserved. */ /* global serverRequire */ // FIXME: hack to ensure delay is set first mocha.setup({delay: true, ui: 'bdd'}); const assert = require('chai').assert; -const common = require('./test-common'); -const jsonld = require('..'); +const benchmark = require('benchmark'); +const common = require('./test.js'); const server = require('karma-server-side'); const webidl = require('./test-webidl'); const join = require('join-path-js'); // special benchmark setup const _ = require('lodash'); -//const _process = require('process'); -const benchmark = require('benchmark'); -//const Benchmark = benchmark.runInContext({_, _process}); const Benchmark = benchmark.runInContext({_}); window.Benchmark = Benchmark; const entries = []; -if(process.env.JSONLD_TESTS) { - entries.push(...process.env.JSONLD_TESTS.split(' ')); +if(process.env.TESTS) { + entries.push(...process.env.TESTS.split(' ')); } else { const _top = process.env.TEST_ROOT_DIR; // TODO: support just adding certain entries in EARL mode? // json-ld-api main test suite - // FIXME: add path detection - entries.push(join(_top, 'test-suites/json-ld-api/tests')); - entries.push(join(_top, '../json-ld-api/tests')); + entries.push((async () => { + const testPath = join(_top, 'test-suites/json-ld-api/tests'); + const siblingPath = join(_top, '../json-ld-api/tests'); + return server.run(testPath, siblingPath, function(testPath, siblingPath) { + const fs = serverRequire('fs-extra'); + // use local tests if setup + if(fs.existsSync(testPath)) { + return testPath; + } + // default to sibling dir + return siblingPath; + }); + })()); // json-ld-framing main test suite - // FIXME: add path detection - entries.push(join(_top, 'test-suites/json-ld-framing/tests')); - entries.push(join(_top, '../json-ld-framing/tests')); + entries.push((async () => { + const testPath = join(_top, 'test-suites/json-ld-framing/tests'); + const siblingPath = join(_top, '../json-ld-framing/tests'); + return server.run(testPath, siblingPath, function(testPath, siblingPath) { + const fs = serverRequire('fs-extra'); + // use local tests if setup + if(fs.existsSync(testPath)) { + return testPath; + } + // default to sibling dir + return siblingPath; + }); + })()); /* // TODO: use json-ld-framing once tests are moved @@ -89,92 +73,53 @@ if(process.env.JSONLD_TESTS) { */ // W3C RDF Dataset Canonicalization "rdf-canon" test suite - // FIXME: add path detection - entries.push(join(_top, 'test-suites/rdf-canon/tests')); - entries.push(join(_top, '../rdf-canon/tests')); + entries.push((async () => { + const testPath = join(_top, 'test-suites/rdf-canon/tests'); + const siblingPath = join(_top, '../rdf-canon/tests'); + return server.run(testPath, siblingPath, function(testPath, siblingPath) { + const fs = serverRequire('fs-extra'); + // use local tests if setup + if(fs.existsSync(testPath)) { + return testPath; + } + // default to sibling dir + return siblingPath; + }); + })()); // other tests entries.push(join(_top, 'tests/misc.js')); entries.push(join(_top, 'tests/graph-container.js')); - entries.push(join(_top, 'tests/new-embed-api')); // WebIDL tests entries.push(webidl); } -// test environment -let testEnv = null; -if(process.env.TEST_ENV) { - let _test_env = process.env.TEST_ENV; - if(!(['0', 'false'].includes(_test_env))) { - testEnv = {}; - if(['1', 'true', 'auto'].includes(_test_env)) { - _test_env = 'auto'; - } - _test_env.split(',').forEach(pair => { - if(pair === 'auto') { - testEnv.arch = 'auto'; - testEnv.cpu = 'auto'; - testEnv.cpuCount = 'auto'; - testEnv.platform = 'auto'; - testEnv.runtime = 'auto'; - testEnv.runtimeVersion = 'auto'; - testEnv.comment = 'auto'; - testEnv.version = 'auto'; - } else { - const kv = pair.split('='); - if(kv.length === 1) { - testEnv[kv[0]] = 'auto'; - } else { - testEnv[kv[0]] = kv.slice(1).join('='); - } - } - }); - if(testEnv.arch === 'auto') { - testEnv.arch = process.env._TEST_ENV_ARCH; - } - if(testEnv.cpu === 'auto') { - testEnv.cpu = process.env._TEST_ENV_CPU; - } - if(testEnv.cpuCount === 'auto') { - testEnv.cpuCount = process.env._TEST_ENV_CPU_COUNT; - } - if(testEnv.platform === 'auto') { - testEnv.platform = process.env._TEST_ENV_PLATFORM; - } - if(testEnv.runtime === 'auto') { - testEnv.runtime = 'browser'; - } - if(testEnv.runtimeVersion === 'auto') { - testEnv.runtimeVersion = '(unknown)'; - } - if(testEnv.comment === 'auto') { - testEnv.comment = ''; - } - if(testEnv.version === 'auto') { - testEnv.version = require('../package.json').version; - } - } -} +// test environment defaults +const testEnvDefaults = { + label: '', + arch: process.env._TEST_ENV_ARCH, + cpu: process.env._TEST_ENV_CPU, + cpuCount: process.env._TEST_ENV_CPU_COUNT, + platform: process.env._TEST_ENV_PLATFORM, + runtime: 'browser', + runtimeVersion: '(unknown)', + comment: '', + version: require('../package.json').version +}; -let benchmarkOptions = null; -if(process.env.JSONLD_BENCHMARK) { - if(!(['0', 'false'].includes(process.env.JSONLD_BENCHMARK))) { - benchmarkOptions = {}; - if(!(['1', 'true'].includes(process.env.JSONLD_BENCHMARK))) { - process.env.JSONLD_BENCHMARK.split(',').forEach(pair => { - const kv = pair.split('='); - benchmarkOptions[kv[0]] = kv[1]; - }); - } - } -} +const env = { + BAIL: process.env.BAIL, + BENCHMARK: process.env.BENCHMARK, + TEST_ENV: process.env.TEST_ENV, + VERBOSE_SKIP: process.env.VERBOSE_SKIP +}; const options = { + env, nodejs: false, assert, benchmark, - jsonld, /* eslint-disable-next-line no-unused-vars */ exit: code => { console.error('exit not implemented'); @@ -183,11 +128,8 @@ const options = { earl: { filename: process.env.EARL }, - verboseSkip: process.env.VERBOSE_SKIP === 'true', - bailOnError: process.env.BAIL === 'true', entries, - testEnv, - benchmarkOptions, + testEnvDefaults, readFile: filename => { return server.run(filename, function(filename) { const fs = serverRequire('fs-extra'); @@ -204,7 +146,7 @@ const options = { }, /* eslint-disable-next-line no-unused-vars */ import: f => { - console.error('import not implemented'); + console.error('import not implemented for "' + f + '"'); } }; diff --git a/tests/test-node.js b/tests/test-node.js new file mode 100644 index 00000000..09ca54d3 --- /dev/null +++ b/tests/test-node.js @@ -0,0 +1,123 @@ +/** + * Node.js test runner for jsonld.js. + * + * See ./test.js for environment vars options. + * + * @author Dave Longley + * @author David I. Lehn + * + * Copyright (c) 2011-2023 Digital Bazaar, Inc. All rights reserved. + */ +const assert = require('chai').assert; +const benchmark = require('benchmark'); +const common = require('./test.js'); +const fs = require('fs-extra'); +const os = require('os'); +const path = require('path'); + +const entries = []; + +if(process.env.TESTS) { + entries.push(...process.env.TESTS.split(' ')); +} else { + const _top = path.resolve(__dirname, '..'); + + // json-ld-api main test suite + const apiPath = path.resolve(_top, 'test-suites/json-ld-api/tests'); + if(fs.existsSync(apiPath)) { + entries.push(apiPath); + } else { + // default to sibling dir + entries.push(path.resolve(_top, '../json-ld-api/tests')); + } + + // json-ld-framing main test suite + const framingPath = path.resolve(_top, 'test-suites/json-ld-framing/tests'); + if(fs.existsSync(framingPath)) { + entries.push(framingPath); + } else { + // default to sibling dir + entries.push(path.resolve(_top, '../json-ld-framing/tests')); + } + + /* + // TODO: use json-ld-framing once tests are moved + // json-ld.org framing test suite + const framingPath = path.resolve( + _top, 'test-suites/json-ld.org/test-suite/tests/frame-manifest.jsonld'); + if(fs.existsSync(framingPath)) { + entries.push(framingPath); + } else { + // default to sibling dir + entries.push(path.resolve( + _top, '../json-ld.org/test-suite/tests/frame-manifest.jsonld')); + } + */ + + // W3C RDF Dataset Canonicalization "rdf-canon" test suite + const rdfCanonPath = path.resolve(_top, 'test-suites/rdf-canon/tests'); + if(fs.existsSync(rdfCanonPath)) { + entries.push(rdfCanonPath); + } else { + // default to sibling dir + entries.push(path.resolve(_top, '../rdf-canon/tests')); + } + + // other tests + entries.push(path.resolve(_top, 'tests/misc.js')); + entries.push(path.resolve(_top, 'tests/graph-container.js')); + entries.push(path.resolve(_top, 'tests/node-document-loader-tests.js')); +} + +// test environment defaults +const testEnvDefaults = { + label: '', + arch: process.arch, + cpu: os.cpus()[0].model, + cpuCount: os.cpus().length, + platform: process.platform, + runtime: 'Node.js', + runtimeVersion: process.version, + comment: '', + version: require('../package.json').version +}; + +const env = { + BAIL: process.env.BAIL, + BENCHMARK: process.env.BENCHMARK, + TEST_ENV: process.env.TEST_ENV, + VERBOSE_SKIP: process.env.VERBOSE_SKIP +}; + +const options = { + env, + nodejs: { + path + }, + assert, + benchmark, + exit: code => process.exit(code), + earl: { + filename: process.env.EARL + }, + entries, + testEnvDefaults, + readFile: filename => { + return fs.readFile(filename, 'utf8'); + }, + writeFile: (filename, data) => { + return fs.outputFile(filename, data); + }, + import: f => require(f) +}; + +// wait for setup of all tests then run mocha +common(options).then(() => { + run(); +}).catch(err => { + console.error(err); +}); + +process.on('unhandledRejection', (reason, p) => { + console.error('Unhandled Rejection at:', p, 'reason:', reason); +}); diff --git a/tests/test.js b/tests/test.js index 074161d2..4f2449a1 100644 --- a/tests/test.js +++ b/tests/test.js @@ -1,10 +1,13 @@ /** - * Node.js test runner for jsonld.js. + * Test and benchmark runner for jsonld.js. * * Use environment vars to control: * + * General: + * Boolean env options enabled with case insensitve values: + * 'true', 't', 'yes', 'y', 'on', '1', similar for false * Set dirs, manifests, or js to run: - * JSONLD_TESTS="r1 r2 ..." + * TESTS="r1 r2 ..." * Output an EARL report: * EARL=filename * Test environment details for EARL report: @@ -28,102 +31,376 @@ * - comment: any text * - version: jsonld.js version * Bail with tests fail: - * BAIL=true + * BAIL= (default: false) * Verbose skip reasons: - * VERBOSE_SKIP=true + * VERBOSE_SKIP= (default: false) * Benchmark mode: * Basic: - * JSONLD_BENCHMARK=1 + * BENCHMARK=1 * With options: - * JSONLD_BENCHMARK=key1=value1,key2=value2,... + * BENCHMARK=key1=value1,key2=value2,... + * Benchmark options: + * jobs=N1[+N2[...]] (default: 1) + * Run each test with jobs size of N1, N2, ... + * Recommend 1+10 to get simple and parallel data. + * Note the N>1 tests use custom reporter to show time per job. + * fast1= (default: false) + * Run single job faster by omitting Promise.all wrapper. * * @author Dave Longley * @author David I. Lehn * - * Copyright (c) 2011-2022 Digital Bazaar, Inc. All rights reserved. + * Copyright (c) 2011-2023 Digital Bazaar, Inc. All rights reserved. */ -const assert = require('chai').assert; -const benchmark = require('benchmark'); -const common = require('./test-common'); -const fs = require('fs-extra'); +/* eslint-disable indent */ +const EarlReport = require('./earl-report'); +const join = require('join-path-js'); const jsonld = require('..'); -const os = require('os'); -const path = require('path'); +const {klona} = require('klona'); +const {prependBase} = require('../lib/url'); +const rdfCanonize = require('rdf-canonize'); -const entries = []; +// helper functions, inspired by 'boolean' package +function isTrue(value) { + return value && [ + 'true', 't', 'yes', 'y', 'on', '1' + ].includes(value.trim().toLowerCase()); +} -if(process.env.JSONLD_TESTS) { - entries.push(...process.env.JSONLD_TESTS.split(' ')); -} else { - const _top = path.resolve(__dirname, '..'); +function isFalse(value) { + return !value || [ + 'false', 'f', 'no', 'n', 'off', '0' + ].includes(value.trim().toLowerCase()); +} - // json-ld-api main test suite - const apiPath = path.resolve(_top, 'test-suites/json-ld-api/tests'); - if(fs.existsSync(apiPath)) { - entries.push(apiPath); - } else { - // default to sibling dir - entries.push(path.resolve(_top, '../json-ld-api/tests')); - } +module.exports = async function(options) { - // json-ld-framing main test suite - const framingPath = path.resolve(_top, 'test-suites/json-ld-framing/tests'); - if(fs.existsSync(framingPath)) { - entries.push(framingPath); - } else { - // default to sibling dir - entries.push(path.resolve(_top, '../json-ld-framing/tests')); - } +'use strict'; - /* - // TODO: use json-ld-framing once tests are moved - // json-ld.org framing test suite - const framingPath = path.resolve( - _top, 'test-suites/json-ld.org/test-suite/tests/frame-manifest.jsonld'); - if(fs.existsSync(framingPath)) { - entries.push(framingPath); - } else { - // default to sibling dir - entries.push(path.resolve( - _top, '../json-ld.org/test-suite/tests/frame-manifest.jsonld')); - } - */ +const assert = options.assert; +const benchmark = options.benchmark; - // W3C RDF Dataset Canonicalization "rdf-canon" test suite - const rdfCanonPath = path.resolve(_top, 'test-suites/rdf-canon/tests'); - if(fs.existsSync(rdfCanonPath)) { - entries.push(rdfCanonPath); - } else { - // default up to sibling dir - entries.push(path.resolve(_top, '../rdf-canon/tests')); +const bailOnError = isTrue(options.env.BAIL || 'false'); +const verboseSkip = isTrue(options.env.VERBOSE_SKIP || 'false'); + +const benchmarkOptions = { + enabled: false, + jobs: [1], + fast1: false +}; + +if(options.env.BENCHMARK) { + if(!isFalse(options.env.BENCHMARK)) { + benchmarkOptions.enabled = true; + if(!isTrue(options.env.BENCHMARK)) { + options.env.BENCHMARK.split(',').forEach(pair => { + const kv = pair.split('='); + switch(kv[0]) { + case 'jobs': + benchmarkOptions.jobs = kv[1].split('+').map(n => parseInt(n, 10)); + break; + case 'fast1': + benchmarkOptions.fast1 = isTrue(kv[1]); + break; + default: + throw new Error(`Unknown benchmark option: "${pair}"`); + } + }); + } } +} - // other tests - entries.push(path.resolve(_top, 'tests/misc.js')); - entries.push(path.resolve(_top, 'tests/graph-container.js')); - entries.push(path.resolve(_top, 'tests/node-document-loader-tests.js')); +// Only support one job size for EARL output to simplify reporting and avoid +// multi-variable issues. Can compare multiple runs with different job sizes. +if(options.earl.filename && benchmarkOptions.jobs.length > 1) { + throw new Error('Only one job size allowed when outputting EARL.'); } -// test environment +const manifest = options.manifest || { + '@context': 'https://json-ld.org/test-suite/context.jsonld', + '@id': '', + '@type': 'mf:Manifest', + description: 'Top level jsonld.js manifest', + name: 'jsonld.js', + // allow for async generated entries + // used for karma tests to allow async server exist check + sequence: (await Promise.all(options.entries || [])).flat().filter(e => e), + filename: '/' +}; + +const TEST_TYPES = { + 'jld:CompactTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + /compact-manifest#t0112$/, + /compact-manifest#t0113$/, + // html + /html-manifest#tc001$/, + /html-manifest#tc002$/, + /html-manifest#tc003$/, + /html-manifest#tc004$/, + ] + }, + fn: 'compact', + params: [ + readTestUrl('input'), + readTestJson('context'), + createTestOptions() + ], + compare: compareExpectedJson + }, + 'jld:ExpandTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + // spec issues + // Unclear how to handle {"@id": null} edge case + // See https://github.com/w3c/json-ld-api/issues/480 + // non-normative test, also see toRdf-manifest#te122 + ///expand-manifest#t0122$/, + + // misc + /expand-manifest#tc037$/, + /expand-manifest#tc038$/, + /expand-manifest#ter54$/, + + // html + /html-manifest#te001$/, + /html-manifest#te002$/, + /html-manifest#te003$/, + /html-manifest#te004$/, + /html-manifest#te005$/, + /html-manifest#te006$/, + /html-manifest#te007$/, + /html-manifest#te010$/, + /html-manifest#te011$/, + /html-manifest#te012$/, + /html-manifest#te013$/, + /html-manifest#te014$/, + /html-manifest#te015$/, + /html-manifest#te016$/, + /html-manifest#te017$/, + /html-manifest#te018$/, + /html-manifest#te019$/, + /html-manifest#te020$/, + /html-manifest#te021$/, + /html-manifest#te022$/, + /html-manifest#tex01$/, + // HTML extraction + /expand-manifest#thc01$/, + /expand-manifest#thc02$/, + /expand-manifest#thc03$/, + /expand-manifest#thc04$/, + /expand-manifest#thc05$/, + // remote + /remote-doc-manifest#t0013$/, // HTML + ] + }, + fn: 'expand', + params: [ + readTestUrl('input'), + createTestOptions() + ], + compare: compareExpectedJson + }, + 'jld:FlattenTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + // html + /html-manifest#tf001$/, + /html-manifest#tf002$/, + /html-manifest#tf003$/, + /html-manifest#tf004$/, + ] + }, + fn: 'flatten', + params: [ + readTestUrl('input'), + readTestJson('context'), + createTestOptions() + ], + compare: compareExpectedJson + }, + 'jld:FrameTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + /frame-manifest#t0069$/, + ] + }, + fn: 'frame', + params: [ + readTestUrl('input'), + readTestJson('frame'), + createTestOptions() + ], + compare: compareExpectedJson + }, + 'jld:FromRDFTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + // direction (compound-literal) + /fromRdf-manifest#tdi11$/, + /fromRdf-manifest#tdi12$/, + ] + }, + fn: 'fromRDF', + params: [ + readTestNQuads('input'), + createTestOptions({format: 'application/n-quads'}) + ], + compare: compareExpectedJson + }, + 'jld:NormalizeTest': { + fn: 'normalize', + params: [ + readTestUrl('input'), + createTestOptions({format: 'application/n-quads'}) + ], + compare: compareExpectedNQuads + }, + 'jld:ToRDFTest': { + skip: { + // skip tests where behavior changed for a 1.1 processor + // see JSON-LD 1.0 Errata + specVersion: ['json-ld-1.0'], + // FIXME + // NOTE: idRegex format: + //MMM-manifest#tNNN$/, + idRegex: [ + // spec issues + // Unclear how to handle {"@id": null} edge case + // See https://github.com/w3c/json-ld-api/issues/480 + // normative test, also see expand-manifest#t0122 + ///toRdf-manifest#te122$/, + + // misc + /toRdf-manifest#tc037$/, + /toRdf-manifest#tc038$/, + /toRdf-manifest#ter54$/, + /toRdf-manifest#tli12$/, + /toRdf-manifest#tli14$/, + + // well formed + /toRdf-manifest#twf05$/, + + // html + /html-manifest#tr001$/, + /html-manifest#tr002$/, + /html-manifest#tr003$/, + /html-manifest#tr004$/, + /html-manifest#tr005$/, + /html-manifest#tr006$/, + /html-manifest#tr007$/, + /html-manifest#tr010$/, + /html-manifest#tr011$/, + /html-manifest#tr012$/, + /html-manifest#tr013$/, + /html-manifest#tr014$/, + /html-manifest#tr015$/, + /html-manifest#tr016$/, + /html-manifest#tr017$/, + /html-manifest#tr018$/, + /html-manifest#tr019$/, + /html-manifest#tr020$/, + /html-manifest#tr021$/, + /html-manifest#tr022$/, + // Invalid Statement + /toRdf-manifest#te075$/, + /toRdf-manifest#te111$/, + /toRdf-manifest#te112$/, + // direction (compound-literal) + /toRdf-manifest#tdi11$/, + /toRdf-manifest#tdi12$/, + ] + }, + fn: 'toRDF', + params: [ + readTestUrl('input'), + createTestOptions({format: 'application/n-quads'}) + ], + compare: compareCanonizedExpectedNQuads + }, + 'rdfc:Urgna2012EvalTest': { + fn: 'normalize', + params: [ + readTestNQuads('action'), + createTestOptions({ + algorithm: 'URGNA2012', + inputFormat: 'application/n-quads', + format: 'application/n-quads' + }) + ], + compare: compareExpectedNQuads + }, + 'rdfc:Urdna2015EvalTest': { + skip: { + // NOTE: idRegex format: + //manifest-urdna2015#testNNN$/, + idRegex: [ + // Unsupported U escape + /manifest-urdna2015#test060/ + ] + }, + fn: 'canonize', + params: [ + readTestNQuads('action'), + createTestOptions({ + algorithm: 'URDNA2015', + inputFormat: 'application/n-quads', + format: 'application/n-quads' + }) + ], + compare: compareExpectedNQuads + } +}; + +const SKIP_TESTS = []; + +// build test env from defaults +const testEnvFields = [ + 'label', 'arch', 'cpu', 'cpuCount', 'platform', 'runtime', 'runtimeVersion', + 'comment', 'version' +]; let testEnv = null; -if(process.env.TEST_ENV) { - let _test_env = process.env.TEST_ENV; - if(!(['0', 'false'].includes(_test_env))) { +if(options.env.TEST_ENV) { + let _test_env = options.env.TEST_ENV; + if(!isFalse(_test_env)) { testEnv = {}; - if(['1', 'true', 'auto'].includes(_test_env)) { + if(isTrue(_test_env)) { _test_env = 'auto'; } _test_env.split(',').forEach(pair => { if(pair === 'auto') { - testEnv.name = 'auto'; - testEnv.arch = 'auto'; - testEnv.cpu = 'auto'; - testEnv.cpuCount = 'auto'; - testEnv.platform = 'auto'; - testEnv.runtime = 'auto'; - testEnv.runtimeVersion = 'auto'; - testEnv.comment = 'auto'; - testEnv.version = 'auto'; + testEnvFields.forEach(f => testEnv[f] = 'auto'); } else { const kv = pair.split('='); if(kv.length === 1) { @@ -133,81 +410,884 @@ if(process.env.TEST_ENV) { } } }); - if(testEnv.label === 'auto') { - testEnv.label = ''; + testEnvFields.forEach(f => { + if(testEnv[f] === 'auto') { + testEnv[f] = options.testEnvDefaults[f]; + } + }); + } +} + +// create earl report +if(options.earl && options.earl.filename) { + options.earl.report = new EarlReport({ + env: testEnv + }); + if(benchmarkOptions.enabled) { + options.earl.report.setupForBenchmarks({testEnv}); + } +} + +return new Promise(resolve => { + +// async generated tests +// _tests => [{suite}, ...] +// suite => { +// title: ..., +// tests: [test, ...], +// suites: [suite, ...] +// } +const _tests = []; + +return addManifest(manifest, _tests) + .then(() => { + return _testsToMocha(_tests); + }).then(result => { + if(options.earl.report) { + describe('Writing EARL report to: ' + options.earl.filename, function() { + // print out EARL even if .only was used + const _it = result.hadOnly ? it.only : it; + _it('should print the earl report', function() { + return options.writeFile( + options.earl.filename, options.earl.report.reportJson()); + }); + }); + } + }).then(() => resolve()); + +// build mocha tests from local test structure +function _testsToMocha(tests) { + let hadOnly = false; + tests.forEach(suite => { + if(suite.skip) { + describe.skip(suite.title); + return; + } + describe(suite.title, () => { + suite.tests.forEach(test => { + if(test.only) { + hadOnly = true; + it.only(test.title, test.f); + return; + } + it(test.title, test.f); + }); + const {hadOnly: _hadOnly} = _testsToMocha(suite.suites); + hadOnly = hadOnly || _hadOnly; + }); + suite.imports.forEach(f => { + options.import(f); + }); + }); + return { + hadOnly + }; +} + +}); + +/** + * Adds the tests for all entries in the given manifest. + * + * @param manifest {Object} the manifest. + * @param parent {Object} the parent test structure + * @return {Promise} + */ +function addManifest(manifest, parent) { + return new Promise((resolve, reject) => { + // create test structure + const suite = { + title: manifest.name || manifest.label, + tests: [], + suites: [], + imports: [] + }; + parent.push(suite); + + // get entries and sequence (alias for entries) + const entries = [].concat( + getJsonLdValues(manifest, 'entries'), + getJsonLdValues(manifest, 'sequence') + ); + + const includes = getJsonLdValues(manifest, 'include'); + // add includes to sequence as jsonld files + for(let i = 0; i < includes.length; ++i) { + entries.push(includes[i] + '.jsonld'); + } + + // resolve all entry promises and process + Promise.all(entries).then(entries => { + let p = Promise.resolve(); + entries.forEach(entry => { + if(typeof entry === 'string' && entry.endsWith('js')) { + // process later as a plain JavaScript file + suite.imports.push(entry); + return; + } else if(typeof entry === 'function') { + // process as a function that returns a promise + p = p.then(() => { + return entry(options); + }).then(childSuite => { + if(suite) { + suite.suites.push(childSuite); + } + }); + return; + } + p = p.then(() => { + return readManifestEntry(manifest, entry); + }).then(entry => { + if(isJsonLdType(entry, '__SKIP__')) { + // special local skip logic + suite.tests.push(entry); + } else if(isJsonLdType(entry, 'mf:Manifest')) { + // entry is another manifest + return addManifest(entry, suite.suites); + } else { + // assume entry is a test + return addTest(manifest, entry, suite.tests); + } + }); + }); + return p; + }).then(() => { + resolve(); + }).catch(err => { + console.error(err); + reject(err); + }); + }); +} + +/** + * Adds a test. + * + * @param manifest {Object} the manifest. + * @param test {Object} the test. + * @param tests {Array} the list of tests to add to. + * @return {Promise} + */ +async function addTest(manifest, test, tests) { + // expand @id and input base + const test_id = test['@id'] || test.id; + //var number = test_id.substr(2); + test['@id'] = + (manifest.baseIri || '') + + basename(manifest.filename).replace('.jsonld', '') + + test_id; + test.base = manifest.baseIri + test.input; + test.manifest = manifest; + const description = test_id + ' ' + (test.purpose || test.name); + + /* + // build test options for omit checks + const testInfo = TEST_TYPES[getJsonLdTestType(test)]; + const params = testInfo.params.map(param => param(test)); + const testOptions = params[1]; + */ + + // number of parallel jobs for benchmarks + const jobTests = benchmarkOptions.enabled ? benchmarkOptions.jobs : [1]; + const fast1 = benchmarkOptions.enabled ? benchmarkOptions.fast1 : true; + + jobTests.forEach(jobs => { + const _test = { + title: description + ` (jobs=${jobs})`, + f: makeFn({ + test, + run: ({/*test, */testInfo, params}) => { + // skip Promise.all + if(jobs === 1 && fast1) { + return jsonld[testInfo.fn](...params); + } + const all = []; + for(let j = 0; j < jobs; j++) { + all.push(jsonld[testInfo.fn](...params)); + } + return Promise.all(all); + }, + jobs, + isBenchmark: benchmarkOptions.enabled + }) + }; + // 'only' based on test manifest + // 'skip' handled via skip() + if('only' in test) { + _test.only = test.only; } - if(testEnv.arch === 'auto') { - testEnv.arch = process.arch; + tests.push(_test); + }); +} + +function makeFn({ + test, + adjustParams = p => p, + run, + jobs, + isBenchmark = false, + unsupportedInBrowser = false +}) { + return async function() { + const self = this; + self.timeout(10000); + const testInfo = TEST_TYPES[getJsonLdTestType(test)]; + + // skip if unsupported in browser + if(unsupportedInBrowser) { + if(verboseSkip) { + console.log('Skipping test due no browser support:', + {id: test['@id'], name: test.name}); + } + self.skip(); } - if(testEnv.cpu === 'auto') { - testEnv.cpu = os.cpus()[0].model; + + // skip based on test manifest + if('skip' in test && test.skip) { + if(verboseSkip) { + console.log('Skipping test due to manifest:', + {id: test['@id'], name: test.name}); + } + self.skip(); } - if(testEnv.cpuCount === 'auto') { - testEnv.cpuCount = os.cpus().length; + + // skip based on unknown test type + const testTypes = Object.keys(TEST_TYPES); + if(!isJsonLdType(test, testTypes)) { + if(verboseSkip) { + const type = [].concat( + getJsonLdValues(test, '@type'), + getJsonLdValues(test, 'type') + ); + console.log('Skipping test due to unknown type:', + {id: test['@id'], name: test.name, type}); + } + self.skip(); } - if(testEnv.platform === 'auto') { - testEnv.platform = process.platform; + + // skip based on test type + if(isJsonLdType(test, SKIP_TESTS)) { + if(verboseSkip) { + const type = [].concat( + getJsonLdValues(test, '@type'), + getJsonLdValues(test, 'type') + ); + console.log('Skipping test due to test type:', + {id: test['@id'], name: test.name, type}); + } + self.skip(); } - if(testEnv.runtime === 'auto') { - testEnv.runtime = 'Node.js'; + + // skip based on type info + if(testInfo.skip && testInfo.skip.type) { + if(verboseSkip) { + console.log('Skipping test due to type info:', + {id: test['@id'], name: test.name}); + } + self.skip(); } - if(testEnv.runtimeVersion === 'auto') { - testEnv.runtimeVersion = process.version; + + // skip based on id regex + if(testInfo.skip && testInfo.skip.idRegex) { + testInfo.skip.idRegex.forEach(function(re) { + if(re.test(test['@id'])) { + if(verboseSkip) { + console.log('Skipping test due to id:', + {id: test['@id']}); + } + self.skip(); + } + }); + } + + // skip based on description regex + /* + if(testInfo.skip && testInfo.skip.descriptionRegex) { + testInfo.skip.descriptionRegex.forEach(function(re) { + if(re.test(description)) { + if(verboseSkip) { + console.log('Skipping test due to description:', { + id: test['@id'], + name: test.name, + description + }); + } + self.skip(); + } + }); } - if(testEnv.comment === 'auto') { - testEnv.comment = ''; + */ + + // Make expandContext absolute to the manifest + if(test.hasOwnProperty('option') && test.option.expandContext) { + test.option.expandContext = + prependBase(test.manifest.baseIri, test.option.expandContext); } - if(testEnv.version === 'auto') { - testEnv.version = require('../package.json').version; + + const testOptions = getJsonLdValues(test, 'option'); + // allow special handling in case of normative test failures + let normativeTest = true; + + testOptions.forEach(function(opt) { + const processingModes = getJsonLdValues(opt, 'processingMode'); + processingModes.forEach(function(pm) { + let skipModes = []; + if(testInfo.skip && testInfo.skip.processingMode) { + skipModes = testInfo.skip.processingMode; + } + if(skipModes.indexOf(pm) !== -1) { + if(options.verboseSkip) { + console.log('Skipping test due to processingMode:', + {id: test['@id'], name: test.name, processingMode: pm}); + } + self.skip(); + } + }); + }); + + testOptions.forEach(function(opt) { + const specVersions = getJsonLdValues(opt, 'specVersion'); + specVersions.forEach(function(sv) { + let skipVersions = []; + if(testInfo.skip && testInfo.skip.specVersion) { + skipVersions = testInfo.skip.specVersion; + } + if(skipVersions.indexOf(sv) !== -1) { + if(options.verboseSkip) { + console.log('Skipping test due to specVersion:', + {id: test['@id'], name: test.name, specVersion: sv}); + } + self.skip(); + } + }); + }); + + testOptions.forEach(function(opt) { + const normative = getJsonLdValues(opt, 'normative'); + normative.forEach(function(n) { + normativeTest = normativeTest && n; + }); + }); + + const params = adjustParams(testInfo.params.map(param => param(test))); + // resolve test data + const values = await Promise.all(params); + // copy used to check inputs do not change + const valuesOrig = klona(values); + let err; + let result; + // run and capture errors and results + try { + result = await run({test, testInfo, params: values}); + // check input not changed + assert.deepStrictEqual(valuesOrig, values); + } catch(e) { + err = e; + } + + try { + if(isJsonLdType(test, 'jld:NegativeEvaluationTest')) { + if(!isBenchmark) { + await compareExpectedError(test, err); + } + } else if(isJsonLdType(test, 'jld:PositiveEvaluationTest') || + isJsonLdType(test, 'rdfc:Urgna2012EvalTest') || + isJsonLdType(test, 'rdfc:Urdna2015EvalTest')) { + if(err) { + throw err; + } + if(!isBenchmark) { + await testInfo.compare(test, result); + } + } else if(isJsonLdType(test, 'jld:PositiveSyntaxTest')) { + // no checks + } else { + throw Error('Unknown test type: ' + test.type); + } + + let benchmarkResult = null; + if(benchmarkOptions.enabled) { + const bparams = adjustParams(testInfo.params.map(param => param(test, { + // pre-load params to avoid doc loader and parser timing + load: true + }))); + // resolve test data + const bvalues = await Promise.all(bparams); + + const result = await runBenchmark({ + test, + testInfo, + jobs, + run, + params: bvalues, + mochaTest: self + }); + benchmarkResult = { + // FIXME use generic prefix + '@type': 'jldb:BenchmarkResult', + // normalize to jobs/sec from overall ops/sec + 'jldb:hz': result.target.hz * jobs, + 'jldb:rme': result.target.stats.rme + }; + } + + if(options.earl.report) { + options.earl.report.addAssertion(test, true, { + benchmarkResult + }); + } + } catch(err) { + // FIXME: improve handling of non-normative errors + // FIXME: for now, explicitly disabling tests. + //if(!normativeTest) { + // // failure ok + // if(verboseSkip) { + // console.log('Skipping non-normative test due to failure:', + // {id: test['@id'], name: test.name}); + // } + // self.skip(); + //} + if(bailOnError) { + if(err.name !== 'AssertionError') { + console.error('\nError: ', JSON.stringify(err, null, 2)); + } + options.exit(); + } + if(options.earl.report) { + options.earl.report.addAssertion(test, false); + } + console.error('Error: ', JSON.stringify(err, null, 2)); + throw err; + } + }; +} + +async function runBenchmark({test, testInfo, jobs, params, run, mochaTest}) { + return new Promise((resolve, reject) => { + const suite = new benchmark.Suite(); + suite.add({ + name: test.name, + defer: true, + fn: deferred => { + run({test, testInfo, params}).then(() => { + deferred.resolve(); + }); + } + }); + suite + .on('start', e => { + // set timeout to a bit more than max benchmark time + mochaTest.timeout((e.target.maxTime + 10) * 1000 * jobs); + }) + .on('cycle', e => { + const jobsHz = e.target.hz * jobs; + const jobsPerSec = jobsHz.toFixed(jobsHz < 100 ? 2 : 0); + const msg = `${String(e.target)} (${jobsPerSec} jobs/sec)`; + console.log(msg); + }) + .on('error', err => { + reject(new Error(err)); + }) + .on('complete', e => { + resolve(e); + }) + .run({async: true}); + }); +} + +function getJsonLdTestType(test) { + const types = Object.keys(TEST_TYPES); + for(let i = 0; i < types.length; ++i) { + if(isJsonLdType(test, types[i])) { + return types[i]; } } + return null; } -let benchmarkOptions = null; -if(process.env.JSONLD_BENCHMARK) { - if(!(['0', 'false'].includes(process.env.JSONLD_BENCHMARK))) { - benchmarkOptions = {}; - if(!(['1', 'true'].includes(process.env.JSONLD_BENCHMARK))) { - process.env.JSONLD_BENCHMARK.split(',').forEach(pair => { - const kv = pair.split('='); - benchmarkOptions[kv[0]] = kv[1]; - }); +function readManifestEntry(manifest, entry) { + let p = Promise.resolve(); + let _entry = entry; + if(typeof entry === 'string') { + let _filename; + p = p.then(() => { + if(entry.endsWith('json') || entry.endsWith('jsonld')) { + // load as file + return entry; + } + // load as dir with manifest.jsonld + return joinPath(entry, 'manifest.jsonld'); + }).then(entry => { + const dir = dirname(manifest.filename); + return joinPath(dir, entry); + }).then(filename => { + _filename = filename; + return readJson(filename); + }).then(entry => { + _entry = entry; + _entry.filename = _filename; + return _entry; + }).catch(err => { + if(err.code === 'ENOENT') { + //console.log('File does not exist, skipping: ' + _filename); + // return a "skip" entry + _entry = { + type: '__SKIP__', + title: 'Not found, skipping: ' + _filename, + filename: _filename, + skip: true + }; + return; + } + throw err; + }); + } + return p.then(() => { + _entry.dirname = dirname(_entry.filename || manifest.filename); + return _entry; + }); +} + +function readTestUrl(property) { + return async function(test, options) { + if(!test[property]) { + return null; + } + if(options && options.load) { + // always load + const filename = await joinPath(test.dirname, test[property]); + return readJson(filename); } + return test.manifest.baseIri + test[property]; + }; +} + +function readTestJson(property) { + return async function(test) { + if(!test[property]) { + return null; + } + const filename = await joinPath(test.dirname, test[property]); + return readJson(filename); + }; +} + +function readTestNQuads(property) { + return async function(test) { + if(!test[property]) { + return null; + } + const filename = await joinPath(test.dirname, test[property]); + return readFile(filename); + }; +} + +function createTestOptions(opts) { + return function(test) { + const options = { + documentLoader: createDocumentLoader(test) + }; + const httpOptions = ['contentType', 'httpLink', 'httpStatus', 'redirectTo']; + const testOptions = test.option || {}; + Object.assign(options, testOptions); + for(const key in testOptions) { + if(httpOptions.indexOf(key) === -1) { + options[key] = testOptions[key]; + } + } + if(opts) { + // extend options + Object.assign(options, opts); + } + return options; + }; +} + +// find the expected output property or throw error +function _getExpectProperty(test) { + if('expectErrorCode' in test) { + return 'expectErrorCode'; + } else if('expect' in test) { + return 'expect'; + } else if('result' in test) { + return 'result'; + } else { + throw Error('No expected output property found'); } } -const options = { - nodejs: { - path - }, - assert, - benchmark, - jsonld, - exit: code => process.exit(code), - earl: { - filename: process.env.EARL - }, - verboseSkip: process.env.VERBOSE_SKIP === 'true', - bailOnError: process.env.BAIL === 'true', - entries, - testEnv, - benchmarkOptions, - readFile: filename => { - return fs.readFile(filename, 'utf8'); - }, - writeFile: (filename, data) => { - return fs.outputFile(filename, data); - }, - import: f => require(f) -}; +async function compareExpectedJson(test, result) { + let expect; + try { + expect = await readTestJson(_getExpectProperty(test))(test); + assert.deepStrictEqual(result, expect); + } catch(err) { + if(options.bailOnError) { + console.log('\nTEST FAILED\n'); + console.log('EXPECTED: ' + JSON.stringify(expect, null, 2)); + console.log('ACTUAL: ' + JSON.stringify(result, null, 2)); + } + throw err; + } +} -// wait for setup of all tests then run mocha -common(options).then(() => { - run(); -}).catch(err => { - console.error(err); -}); +async function compareExpectedNQuads(test, result) { + let expect; + try { + expect = await readTestNQuads(_getExpectProperty(test))(test); + assert.strictEqual(result, expect); + } catch(ex) { + if(bailOnError) { + console.log('\nTEST FAILED\n'); + console.log('EXPECTED:\n' + expect); + console.log('ACTUAL:\n' + result); + } + throw ex; + } +} -process.on('unhandledRejection', (reason, p) => { - console.error('Unhandled Rejection at:', p, 'reason:', reason); -}); +async function compareCanonizedExpectedNQuads(test, result) { + let expect; + try { + expect = await readTestNQuads(_getExpectProperty(test))(test); + const opts = {algorithm: 'URDNA2015'}; + const expectDataset = rdfCanonize.NQuads.parse(expect); + const expectCmp = await rdfCanonize.canonize(expectDataset, opts); + const resultDataset = rdfCanonize.NQuads.parse(result); + const resultCmp = await rdfCanonize.canonize(resultDataset, opts); + assert.strictEqual(resultCmp, expectCmp); + } catch(err) { + if(options.bailOnError) { + console.log('\nTEST FAILED\n'); + console.log('EXPECTED:\n' + expect); + console.log('ACTUAL:\n' + result); + } + throw err; + } +} + +async function compareExpectedError(test, err) { + let expect; + let result; + try { + expect = test[_getExpectProperty(test)]; + result = getJsonLdErrorCode(err); + assert.ok(err, 'no error present'); + assert.strictEqual(result, expect); + } catch(_err) { + if(options.bailOnError) { + console.log('\nTEST FAILED\n'); + console.log('EXPECTED: ' + expect); + console.log('ACTUAL: ' + result); + } + // log the unexpected error to help with debugging + console.log('Unexpected error:', err); + throw _err; + } +} + +function isJsonLdType(node, type) { + const nodeType = [].concat( + getJsonLdValues(node, '@type'), + getJsonLdValues(node, 'type') + ); + type = Array.isArray(type) ? type : [type]; + for(let i = 0; i < type.length; ++i) { + if(nodeType.indexOf(type[i]) !== -1) { + return true; + } + } + return false; +} + +function getJsonLdValues(node, property) { + let rval = []; + if(property in node) { + rval = node[property]; + if(!Array.isArray(rval)) { + rval = [rval]; + } + } + return rval; +} + +function getJsonLdErrorCode(err) { + if(!err) { + return null; + } + if(err.details) { + if(err.details.code) { + return err.details.code; + } + if(err.details.cause) { + return getJsonLdErrorCode(err.details.cause); + } + } + return err.name; +} + +async function readJson(filename) { + const data = await readFile(filename); + return JSON.parse(data); +} + +async function readFile(filename) { + return options.readFile(filename); +} + +async function joinPath() { + return join.apply(null, Array.prototype.slice.call(arguments)); +} + +function dirname(filename) { + if(options.nodejs) { + return options.nodejs.path.dirname(filename); + } + const idx = filename.lastIndexOf('/'); + if(idx === -1) { + return filename; + } + return filename.substr(0, idx); +} + +function basename(filename) { + if(options.nodejs) { + return options.nodejs.path.basename(filename); + } + const idx = filename.lastIndexOf('/'); + if(idx === -1) { + return filename; + } + return filename.substr(idx + 1); +} + +// check test.option.loader.rewrite map for url, +// if no test rewrite, check manifest, +// else no rewrite +function rewrite(test, url) { + if(test.option && + test.option.loader && + test.option.loader.rewrite && + url in test.option.loader.rewrite) { + return test.option.loader.rewrite[url]; + } + const manifest = test.manifest; + if(manifest.option && + manifest.option.loader && + manifest.option.loader.rewrite && + url in manifest.option.loader.rewrite) { + return manifest.option.loader.rewrite[url]; + } + return url; +} + +/** + * Creates a test remote document loader. + * + * @param test the test to use the document loader for. + * + * @return the document loader. + */ +function createDocumentLoader(test) { + const localBases = [ + 'http://json-ld.org/test-suite', + 'https://json-ld.org/test-suite', + 'https://json-ld.org/benchmarks', + 'https://w3c.github.io/json-ld-api/tests', + 'https://w3c.github.io/json-ld-framing/tests' + ]; + + const localLoader = function(url) { + // always load remote-doc tests remotely in node + // NOTE: disabled due to github pages issues. + //if(options.nodejs && test.manifest.name === 'Remote document') { + // return jsonld.documentLoader(url); + //} + + // handle loader rewrite options for test or manifest + url = rewrite(test, url); + + // FIXME: this check only works for main test suite and will not work if: + // - running other tests and main test suite not installed + // - use other absolute URIs but want to load local files + const isTestSuite = localBases.some(function(base) { + return url.startsWith(base); + }); + // TODO: improve this check + const isRelative = url.indexOf(':') === -1; + if(isTestSuite || isRelative) { + // attempt to load official test-suite files or relative URLs locally + return loadLocally(url); + } + + // load remotely + return jsonld.documentLoader(url); + }; + + return localLoader; + + function loadLocally(url) { + const doc = {contextUrl: null, documentUrl: url, document: null}; + const options = test.option; + if(options && url === test.base) { + if('redirectTo' in options && parseInt(options.httpStatus, 10) >= 300) { + doc.documentUrl = test.manifest.baseIri + options.redirectTo; + } else if('httpLink' in options) { + let contentType = options.contentType || null; + if(!contentType && url.indexOf('.jsonld', url.length - 7) !== -1) { + contentType = 'application/ld+json'; + } + if(!contentType && url.indexOf('.json', url.length - 5) !== -1) { + contentType = 'application/json'; + } + let linkHeader = options.httpLink; + if(Array.isArray(linkHeader)) { + linkHeader = linkHeader.join(','); + } + const linkHeaders = jsonld.parseLinkHeader(linkHeader); + const linkedContext = + linkHeaders['http://www.w3.org/ns/json-ld#context']; + if(linkedContext && contentType !== 'application/ld+json') { + if(Array.isArray(linkedContext)) { + throw {name: 'multiple context link headers'}; + } + doc.contextUrl = linkedContext.target; + } + + // If not JSON-LD, alternate may point there + if(linkHeaders.alternate && + linkHeaders.alternate.type == 'application/ld+json' && + !(contentType || '').match(/^application\/(\w*\+)?json$/)) { + doc.documentUrl = prependBase(url, linkHeaders.alternate.target); + } + } + } + + let p = Promise.resolve(); + if(doc.documentUrl.indexOf(':') === -1) { + p = p.then(() => { + return joinPath(test.manifest.dirname, doc.documentUrl); + }).then(filename => { + doc.documentUrl = 'file://' + filename; + return filename; + }); + } else { + p = p.then(() => { + return joinPath( + test.manifest.dirname, + doc.documentUrl.substr(test.manifest.baseIri.length)); + }).then(fn => { + return fn; + }); + } + + return p.then(readJson).then(json => { + doc.document = json; + return doc; + }).catch(() => { + throw {name: 'loading document failed', url}; + }); + } +} + +};