From 4595f642d5c63d7245200a85e6448d2a053d2005 Mon Sep 17 00:00:00 2001 From: sven-oly Date: Tue, 9 Sep 2025 20:19:30 +0000 Subject: [PATCH 1/4] Update logging with info on the source file running --- genData100.sh | 2 + generateDataAndRun.sh | 2 + schema/check_generated_data.py | 20 +++--- schema/check_schemas.py | 16 ++--- schema/check_test_output.py | 10 ++- testdriver/testdriver.py | 31 ++++----- testdriver/testplan.py | 80 ++++++++++++------------ testgen/generators/base.py | 2 +- testgen/generators/datetime_fmt.py | 43 ++----------- testgen/generators/localeDisplayNames.py | 4 +- testgen/generators/message_fmt2.py | 24 +++---- testgen/generators/number_fmt.py | 18 +++--- testgen/generators/plurals.py | 4 +- testgen/testdata_gen.py | 5 -- 14 files changed, 108 insertions(+), 153 deletions(-) diff --git a/genData100.sh b/genData100.sh index e7f6ceda..0a131357 100755 --- a/genData100.sh +++ b/genData100.sh @@ -14,6 +14,8 @@ logrotate -s logrotate.state logrotate.conf export NVM_DIR=$HOME/.nvm; source $NVM_DIR/nvm.sh; +# To clear warnings +npm install --package-lock-only # # Setup diff --git a/generateDataAndRun.sh b/generateDataAndRun.sh index 672b3195..87c54e3f 100755 --- a/generateDataAndRun.sh +++ b/generateDataAndRun.sh @@ -22,6 +22,8 @@ fi # Install NVM if it is not install in CI export NVM_DIR=$HOME/.nvm +# To clear lock +npm install --package-lock-only [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion diff --git a/schema/check_generated_data.py b/schema/check_generated_data.py index df433de8..906a1fd2 100644 --- a/schema/check_generated_data.py +++ b/schema/check_generated_data.py @@ -27,9 +27,9 @@ def main(args): logging.debug('TEST DATA PATH = %s', test_data_path) - logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") + logger = logging.Logger("schema/check_generated_data.py: Checking Test Data vs. Schemas LOGGER") logger.setLevel(logging.INFO) - logger.info('+++ Test Generated test data vs. schemas files') + logger.info('schema/check_generated_data.py: +++ Test Generated test data vs. schemas') # TODO: get ICU versions icu_versions = [] @@ -39,8 +39,8 @@ def main(args): for dir_name in icu_dirs: icu_versions.append(os.path.basename(dir_name)) - logging.debug('ICU directories = %s', icu_versions) - logging.debug('test types = %s', ALL_TEST_TYPES) + logging.debug('schema/check_generated_data.py: ICU directories = %s', icu_versions) + logging.debug('schema/check_generated_data.py: test types = %s', ALL_TEST_TYPES) validator = schema_validator.ConformanceSchemaValidator() @@ -52,7 +52,7 @@ def main(args): validator.debug = 1 all_results = validator.validate_test_data_with_schema() - logging.info(' %d results for generated test data', len(all_results)) + logging.info('schema/check_generated_data.py: %d results for generated test data', len(all_results)) schema_errors = [] failed_validations = [] @@ -78,7 +78,7 @@ def main(args): try: summary_data = json.dumps(summary_json) except BaseException as error: - logging.error('json.dumps Summary data problem: %s at %s', error, error) + logging.error('schema/check_generated_data.py: json.dumps Summary data problem: %s at %s', error, error) sys.exit(1) output_filename = os.path.join(test_data_path, 'test_data_validation_summary.json') @@ -88,17 +88,17 @@ def main(args): file_out.close() except BaseException as error: schema_errors.append(output_filename) - logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) + logging.fatal('schema/check_generated_data.py: %s. Cannot save validation summary in file %s', error, output_filename) sys.exit(1) if schema_errors: - logging.critical('Test data file files: %d fail out of %d:', + logging.critical('schema/check_generated_data.py: Test data file files: %d fail out of %d:', len(schema_errors), schema_count) for failure in schema_errors: - logging.critical(' %s', failure) + logging.critical('schema/check_generated_data.py: %s', failure) sys.exit(1) else: - logging.info("All %d generated test data files match with schema", schema_count) + logging.info("schema/check_generated_data.py: All %d generated test data files match with schema", schema_count) diff --git a/schema/check_schemas.py b/schema/check_schemas.py index c16865cc..374a2100 100644 --- a/schema/check_schemas.py +++ b/schema/check_schemas.py @@ -45,7 +45,7 @@ def save_schema_validation_summary(self, validation_status): try: summary_data = json.dumps(summary_json) except BaseException as err: - logging.error('%s: Cannot create JSON summary: %s', err, summary_json) + logging.error('schema/check_schemas: %s: Cannot create JSON summary: %s', err, summary_json) return None output_filename = os.path.join(self.schema_base, 'schema_validation_summary.json') @@ -54,7 +54,7 @@ def save_schema_validation_summary(self, validation_status): file_out.write(summary_data) file_out.close() except BaseException as error: - logging.warning('Error: %s. Cannot save validation summary in file %s', error, output_filename) + logging.warning('schema/check_schemas: Error: %s. Cannot save validation summary in file %s', error, output_filename) return None return output_filename @@ -62,7 +62,7 @@ def save_schema_validation_summary(self, validation_status): def parallel_validate_schema(validator, file_names): num_processors = multiprocessing.cpu_count() - logging.info('Schema validation: %s processors for %s schema validations', num_processors, len(file_names)) + logging.info('schema/check_schemas: Schema validation: %s processors for %s schema validations', num_processors, len(file_names)) processor_pool = multiprocessing.Pool(num_processors) # How to get all the results @@ -77,7 +77,7 @@ def parallel_validate_schema(validator, file_names): def main(args): logger = logging.Logger("TEST SCHEMAS LOGGER") logger.setLevel(logging.INFO) - logger.info('+++ Test JSON Schema files') + logger.info('schema/check_schemas: JSON Schema files') validator = schema_validator.ConformanceSchemaValidator() # Todo: use setters to initialize validator @@ -116,20 +116,20 @@ def main(args): }) if not result: schema_errors.append([schema_file, result, err, file_path]) - logging.error('Bad Schema at %s', schema_file) + logging.error('schema/check_schemas: Bad Schema at %s', schema_file) schema_count += 1 output_filename = val_schema.save_schema_validation_summary(validation_status) if schema_errors: - logging.error('SCHEMA: %d fail out of %d:', + logging.error('schema/check_schemas: SCHEMA: %d fail out of %d:', len(schema_errors), schema_count) for failure in schema_errors: - logging.error(' %s', failure) + logging.error('schema/check_schemas: %s', failure) # We need to clobber the process sys.exit(1) else: - logging.info("All %d schema are valid in file %s", schema_count, output_filename) + logging.info("schema/check_schemas: All %d schema are valid in file %s", schema_count, output_filename) exit(0) diff --git a/schema/check_test_output.py b/schema/check_test_output.py index 5d45d676..93e17a94 100644 --- a/schema/check_test_output.py +++ b/schema/check_test_output.py @@ -24,11 +24,9 @@ def main(args): else: test_output_path = args[1] - logging.debug('TEST OUTPUT PATH = %s', test_output_path) - logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") logger.setLevel(logging.INFO) - logger.info('+++ Test Generated test data vs. schemas files') + logger.info('+++ schema/check_test_output') # TODO: get ICU versions executor_set = set() @@ -74,7 +72,7 @@ def main(args): validator.debug = 1 all_results, test_validation_plans = validator.validate_test_output_with_schema() - logging.info(' %d results for test output', len(all_results)) + logging.info('schema/check_test_output: %d results for test output', len(all_results)) # Check if any files in the expected list were not validated. test_paths = set() @@ -83,7 +81,7 @@ def main(args): for json_file in json_files: if json_file not in test_paths: - logging.fatal('JSON file %s was not verified against a schema', json_file) + logging.fatal('schema/check_test_output: JSON file %s was not verified against a schema', json_file) # Bail out right away! sys.exit(1) @@ -128,7 +126,7 @@ def main(args): # Don't continue after this problem. sys.exit(1) - logging.info("All %d test output files match with schema", schema_count) + logging.info("schema/check_test_output: All %d test output files match with schema", schema_count) return diff --git a/testdriver/testdriver.py b/testdriver/testdriver.py index 981a7717..157a6bbd 100644 --- a/testdriver/testdriver.py +++ b/testdriver/testdriver.py @@ -46,12 +46,12 @@ def set_args(self, arg_options): for test_type in arg_options.test_type: if test_type not in ddt_data.testDatasets: - logging.warning('**** WARNING: test_type %s not in testDatasets', test_type) + logging.warning('testdriver.py **** WARNING: test_type %s not in testDatasets', test_type) else: # Create a test plan based on data and options test_data_info = ddt_data.testDatasets[test_type] if self.debug: - logging.debug('$$$$$ test_type = %s test_data_info = %s', + logging.debug('testdriver.py $$$$$ test_type = %s test_data_info = %s', test_type, test_data_info.testDataFilename) for executor in arg_options.exec: @@ -59,7 +59,7 @@ def set_args(self, arg_options): # Run a non-specified executor. Compatibility of versions # between test data and the executor should be done the text executor # program itself. - logging.error('No executable command configured for executor platform: %s', executor) + logging.error('testdriver.py: No executable command configured for executor platform: %s', executor) exec_command = {'path': executor} else: # Set details for execution from ExecutorInfo @@ -76,7 +76,7 @@ def set_args(self, arg_options): test_data = ddt_data.testDatasets[test_type] new_plan.set_test_data(test_data) except KeyError as err: - logging.warning('!!! %s: No test data filename for %s', err, test_type) + logging.warning('testdriver.py !!! %s: No test data filename for %s', err, test_type) if not new_plan.ignore: self.test_plans.append(new_plan) @@ -91,30 +91,30 @@ def parse_args(self, args): # Get all the arguments argparse = ddtargs.DdtArgs(args) - logging.debug('TestDriver OPTIONS: %s', argparse.getOptions()) + logging.debug('testdriver.py TestDriver OPTIONS: %s', argparse.getOptions()) # Now use the argparse.options to set the values in the driver self.set_args(argparse.getOptions()) return - def run_plans(self): + def run_plans(self, logger): # For each of the plans, run with the appropriate type of parallelism - # Debugging output + logger.info('testdriver.py: running %s plans serially', len(self.test_plans)) for plan in self.test_plans: plan.run_plan() def run_one(self, plan): - logging.debug("Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version)) + logging.debug("testdriver.py Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version)) plan.run_plan() - def run_plans_parallel(self): + def run_plans_parallel(self, logger): # Testing 15-Jan-2024 if not self.test_plans or len(self.test_plans) == 0: return num_processors = mp.cpu_count() plan_info = '%s, %s' % (self.test_plans[0].test_type, self.test_plans[0].exec_command) - logging.info('TestDriver: %s processors for %s plans. %s' % + logger.info('testdriver.py TestDriver: %s processors for %s plans. %s' % (num_processors, len(self.test_plans), plan_info)) processor_pool = mp.Pool(num_processors) @@ -133,16 +133,9 @@ def main(args): logger.setLevel(logging.INFO) if driver.run_serial: - driver.run_plans() + driver.run_plans(logger) else: - driver.run_plans_parallel() - - # if len(args)> 2: - # Set limit on number to run - # numberToRun = int(args[2]) - # driver.runLimit = numberToRun - - # driver.initExecutor() + driver.run_plans_parallel(logger) if __name__ == "__main__": diff --git a/testdriver/testplan.py b/testdriver/testplan.py index 29909dca..c5ad3239 100644 --- a/testdriver/testplan.py +++ b/testdriver/testplan.py @@ -64,7 +64,7 @@ def set_options(self, options): try: self.icu_version = options.icu_version except KeyError: - logging.warning('NO ICU VERSION SET') + logging.warning('testdriver/testplan.py: NO ICU VERSION SET') if options.ignore and not options.ignore == "null": self.ignore = True @@ -98,7 +98,7 @@ def run_plan(self): # Test data versions are given as "icu" + primary number, e.g., "73" # TODO: Consider sorting with possible dotted versions, e.g., 73.1.3 newest_version = sorted(icu_test_dirs, reverse=True)[0] - logging.warning('** Replacing proposed icu version of %s with version %s', + logging.warning('testdriver/testplan.py: ** Replacing proposed icu version of %s with version %s', self.icu_version, newest_version) self.icu_version = newest_version @@ -130,9 +130,9 @@ def run_plan(self): if self.options.run_limit: self.run_limit = int(self.options.run_limit) - logging.debug('!!! RUN LIMIT SET: %d', self.run_limit) + logging.debug('testdriver/testplan.py: !!! RUN LIMIT SET: %d', self.run_limit) - logging.debug('Running plan %s on data %s', + logging.debug('testdriver/testplan.py: Running plan %s on data %s', self.exec_command, self.inputFilePath) if self.options.exec_mode == 'one_test': @@ -153,14 +153,14 @@ def request_executor_info(self): self.jsonOutput["platform error"] = self.run_error_message return None else: - logging.debug('EXECUTOR INFO = %s', result) + logging.debug('testdriver/testplan.py: EXECUTOR INFO = %s', result) try: self.jsonOutput["platform"] = json.loads(result) except json.JSONDecodeError as error: - logging.error("Encountered error in parsing executor result string as JSON: %s", error) - logging.error("DETAILS: testplan info = %s, %s, %s", self.exec_command, self.icuVersion, self.test_type) - logging.error("Result string received from executor: [%s]", result) + logging.error("testdriver/testplan.py: Encountered error in parsing executor result string as JSON: %s", error) + logging.error("testdriver/testplan.py: DETAILS: testplan info = %s, %s, %s", self.exec_command, self.icuVersion, self.test_type) + logging.error("testdriver/testplan.py: Result string received from executor: [%s]", result) return None try: @@ -182,8 +182,8 @@ def request_executor_info(self): # self.platformVersion, self.testData.testDataFilename) except (KeyError, IndexError) as error: - logging.error("Encountered error processing executor JSON values: %s", error) - logging.error("DETAILS: testplan info = %s, %s, %s", self.exec_command, self.icuVersion, self.test_type) + logging.error("testdriver/testplan.py: Encountered error processing executor JSON values: %s", error) + logging.error("testdriver/testplan.py: DETAILS: testplan info = %s, %s, %s", self.exec_command, self.icuVersion, self.test_type) return None return True @@ -198,7 +198,7 @@ def request_executor_termination(self, terminate_args=None): if not result: self.jsonOutput["platform error"] = self.run_error_message else: - logging.debug('TERMINATION INFO = %s', result) + logging.debug('testdriver/testplan.py: TERMINATION INFO = %s', result) self.jsonOutput["platform"] = json.loads(result) def generate_header(self): @@ -236,7 +236,7 @@ def complete_output_file(self, error_info): self.resultsFile.close() def run_one_test_mode(self): - logging.debug(' Running OneTestMode %s on data %s', + logging.debug('testdriver/testplan.py: Running OneTestMode %s on data %s', self.exec_command, self.inputFilePath) # Set up calls for version data --> results @@ -250,13 +250,13 @@ def run_one_test_mode(self): # The test data was not found. Skip this test. return None - logging.debug('@@@ %d tests found', len(tests)) + logging.debug('testdriver/testplan.py: @@@ %d tests found', len(tests)) # Initialize JSON output headers --> results self.exec_list = self.exec_command.split() # TODO: get other things about the exec - logging.debug('EXEC info: exec_command %s, exec_list >%s<', + logging.debug('testdriver/testplan.py: EXEC info: exec_command %s, exec_list >%s<', self.exec_command, self.exec_list) @@ -274,14 +274,14 @@ def run_one_test_mode(self): if not os.path.isdir(result_dir): os.makedirs(result_dir, exist_ok=True) except BaseException as error: - logging.error('testplay.py: %s for %s / %s', error, result_dir, self.outputFilePath) + logging.error('testdriver/testplan.py: %s for %s / %s', error, result_dir, self.outputFilePath) # Create results file try: - logging.debug('++++++ Results file path = %s', self.outputFilePath) + logging.debug('testdriver/testplan.py: ++++++ Results file path = %s', self.outputFilePath) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') except BaseException as error: - logging.error('*** Cannot open results file at %s. Err = %s', + logging.error('testdriver/testplan.py: *** Cannot open results file at %s. Err = %s', self.outputFilePath, error) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') @@ -345,7 +345,7 @@ def run_all_single_tests(self, tests_per_execution=1): if self.progress_interval and test_num % self.progress_interval == 0: formatted_num = '{:,}'.format(test_num) - logging.debug('Testing %s / %s. %s of %s', + logging.debug('testdriver/testplan.py: Testing %s / %s. %s of %s', self.exec_list[0], self.testScenario, formatted_num, formatted_count) # Accumulate tests_per_execution items into a single outline @@ -359,8 +359,8 @@ def run_all_single_tests(self, tests_per_execution=1): all_test_results.extend(result) else: num_errors += 1 - logging.error("!!!!!! platform error: %s", self.run_error_message) - logging.error(' %s %s %s %s', self.test_type, self.test_lang, self.icu_version, self.platformVersion) + logging.error("testdriver/testplan.py: !!!!!! platform error: %s", self.run_error_message) + logging.error('testdriver/testplan.py: %s %s %s %s', self.test_type, self.test_lang, self.icu_version, self.platformVersion) # Reset the batch lines_in_batch = 0 @@ -368,7 +368,7 @@ def run_all_single_tests(self, tests_per_execution=1): test_num += 1 if self.run_limit and test_num > self.run_limit: - logging.debug('** Stopped after %d tests', (test_num - 1)) + logging.debug('testdriver/testplan.py: ** Stopped after %d tests', (test_num - 1)) break # PROCESS THE LAST BATCH, if any @@ -386,13 +386,13 @@ def process_batch_of_tests(self, tests_to_send): return [] if self.debug > 2: - logging.debug('PROCESSING %d tests', len(tests_to_send)) + logging.debug('testdriver/testplan.py: PROCESSING %d tests', len(tests_to_send)) # Ask process to exit when finished. out_and_exit = '\n'.join(tests_to_send) + '\n#EXIT\n' if self.debug > 2: - logging.info('+++ Test LINE TO EXECUTOR = %s', out_and_exit) + logging.info('testdriver/testplan.py: +++ Test LINE TO EXECUTOR = %s', out_and_exit) result = self.send_one_line(out_and_exit) @@ -400,18 +400,18 @@ def process_batch_of_tests(self, tests_to_send): # don't sent more of that type. if not result: num_errors += 1 - logging.warning('!!!!!! process_batch_of_tests: "platform error": "%s"\n', + logging.warning('testdriver/testplan.py: !!!!!! process_batch_of_tests: "platform error": "%s"\n', self.run_error_message) return None if self.debug > 2: - logging.info('+++ Line from EXECUTOR = %s', result) + logging.info('testdriver/testplan.py: +++ Line from EXECUTOR = %s', result) index = 0 batch_out = [] for item in result.split('\n'): if self.debug > 1: - logging.info(' RESULT %d = (%d) >%s<', index, len(item), item) + logging.info('testdriver/testplan.py: RESULT %d = (%d) >%s<', index, len(item), item) if not item or len(item) <= 0: # Check for special results returned from the executor, # indicated by '#' in the first column of the line returned. @@ -419,26 +419,26 @@ def process_batch_of_tests(self, tests_to_send): # TODO: Document these, perhaps in the project's JSON schema. continue if item[0] == "#": - logging.debug('#### DEBUG OUTPUT = %s', item) + logging.debug('testdriver/testplan.py: #### DEBUG OUTPUT = %s', item) # Process some types of errors if item[1:3] == "!!" and self.debug > 1: - logging.warning(" !!!!!!!!!!!!!!!!! ERROR: %s", item) + logging.warning("testdriver/testplan.py: !!!!!!!!!!!!!!!!! ERROR: %s", item) # Extract the message and check if we continue or not. json_start = item.index('{') json_text = item[json_start:] - logging.debug('JSON TEXT = %s', json_text) + logging.debug('testdriver/testplan.py: JSON TEXT = %s', json_text) json_out = json.loads(json_text) if 'error_retry' in json_out and json_out['error_retry']: should_retry = json_out['error_retry'] - logging.warning('!!! SHOULD RETRY = %s', should_retry) + logging.warning('testdriver/testplan.py: !!! SHOULD RETRY = %s', should_retry) elif not(item is None) and item != "": try: json_out = json.loads(item) batch_out.append(json_out) except BaseException as error: if self.debug > 1: - logging.warning(' && Item %s. Error in= %s. Received (%d): >%s<', + logging.warning('testdriver/testplan.py: && Item %s. Error in= %s. Received (%d): >%s<', index, error, len(item), item) index += 1 @@ -446,9 +446,9 @@ def process_batch_of_tests(self, tests_to_send): def run_multitest_mode(self): # TODO Implement this - logging.info('!!! Running MultiTestMode %s on data %s', + logging.info('testdriver/testplan.py: !!! Running MultiTestMode %s on data %s', self.exec_command, self.inputFilePath) - logging.warning(' ** UNIMPLEMENTED **') + logging.warning('testdriver/testplan.py: ** UNIMPLEMENTED **') # Open the input file and get tests # Open results file @@ -492,7 +492,7 @@ def open_json_test_data(self): # Send a single line of data or command to Stdout, capturing the output def send_one_line(self, input_line): - self.run_error_message = None + self.runer_ror_message = None try: result = subprocess.run(self.exec_command, input=input_line, # Usually a JSON string. @@ -503,13 +503,13 @@ def send_one_line(self, input_line): if not result.returncode: return result.stdout else: - logging.debug('$$$$$$$$$$$$$$$$ ---> return code: %s', result.returncode) - logging.debug(' ----> INPUT LINE= >%s<', input_line) - logging.debug(' ----> STDOUT= >%s<', result.stdout) + logging.debug('testdriver/testplan.py: $$$$$$$$$$$$$$$$ ---> return code: %s', result.returncode) + logging.debug('testdriver/testplan.py: ----> INPUT LINE= >%s<', input_line) + logging.debug('testdriver/testplan.py: ----> STDOUT= >%s<', result.stdout) self.run_error_message = '!!!! ERROR IN EXECUTION: %s. STDERR = %s' % ( result.returncode, result.stderr) - logging.error(' !!!!!! exec_list = %s\n input_line = %s' % (self.exec_list, input_line)) - logging.error(' !!!!!! %s' % self.run_error_message) + logging.error('testdriver/testplan.py: !!!!!! exec_list = %s\n input_line = %s' % (self.exec_list, input_line)) + logging.error('testdriver/testplan.py: !!!!!! %s' % self.run_error_message) # Handle problems with decoding errors and other unknowns. error_result = {'label': 'UNKNOWN', @@ -518,7 +518,7 @@ def send_one_line(self, input_line): } return json.dumps(error_result) except BaseException as err: - logging.error('Err = %s', err) + logging.error('testdriver/testplan.py: Err = %s', err) input = json.loads(input_line.replace('#EXIT', '').strip()) error_result = {'label': input['label'], 'input_data': input, diff --git a/testgen/generators/base.py b/testgen/generators/base.py index 66b99b1c..03fe558a 100644 --- a/testgen/generators/base.py +++ b/testgen/generators/base.py @@ -137,7 +137,7 @@ def readFile(self, filename, version="", filetype="txt"): with codecs.open(path, "r", encoding="utf-8") as testdata: return json.load(testdata) if filetype == "json" else testdata.read() except BaseException as err: - logging.warning("** readFile: %s", err) + logging.warning("testgen/generators: %s", err) return None def computeMaxDigitsForCount(self, count): diff --git a/testgen/generators/datetime_fmt.py b/testgen/generators/datetime_fmt.py index f39a9e46..99eb0dc7 100644 --- a/testgen/generators/datetime_fmt.py +++ b/testgen/generators/datetime_fmt.py @@ -29,7 +29,7 @@ def generate_datetime_data_from_cldr(self, dt_json_path, run_limit=-1): test_obj = { 'Test scenario': 'datetime_fmt', 'test_type': 'datetime_fmt', - 'description': 'date/time format test data generated by Node', + 'description': 'date/time formatCLDR test data', 'icuVersion': self.icu_version, 'cldrVersion': '??' } @@ -38,7 +38,7 @@ def generate_datetime_data_from_cldr(self, dt_json_path, run_limit=-1): verify_cases = [] verify_obj = { 'test_type': 'datetime_fmt', - 'description': 'date/time format test data generated by Node', + 'description': 'date/time format CLDR test data', 'icuVersion': self.icu_version, 'cldrVersion': '??' } @@ -139,45 +139,10 @@ def generate_datetime_data_from_cldr(self, dt_json_path, run_limit=-1): return None def process_test_data(self): - # Use NOde JS to create the .json files - icu_nvm_versions = { - 'icu77': '24.0.0', - 'icu76': '23.11.0', - 'icu75': '22.9.0', - 'icu74': '21.6.0', - 'icu73': '20.1.0', - 'icu72': '18.14.2', - 'icu71': '18.7.0', - } - - # Update to check for datetime.json which has been generated from CLDR data. + # Check for datetime.json which has been generated from CLDR data. dt_json_path = os.path.join('.', self.icu_version, 'datetime.json') if os.path.exists(dt_json_path): result = self.generate_datetime_data_from_cldr(dt_json_path, self.run_limit) return result - - # OK, there's no CLDR-based JSON data available. - run_list = [ - ['source ~/.nvm/nvm.sh; nvm install 21.6.0; nvm use 21.6.0 --silent'], - ['node generators/datetime_gen.js'], - ['mv datetime_fmt*.json icu74'] - ] - - if self.icu_version not in icu_nvm_versions: - logging.warning('Generating datetime data not configured for icu version %s', self.icu_version) + else: return False - - # Set up Node version and call the generator - # Add temporal to the package. - nvm_version = icu_nvm_versions[self.icu_version] - generate_command = 'source ~/.nvm/nvm.sh; nvm install %s; nvm use %s --silent; npm ci; node generators/datetime_gen.js %s %s' % ( - nvm_version, nvm_version, '-run_limit', self.run_limit) - - logging.debug('Running this command: %s', generate_command) - result = subprocess.run(generate_command, shell=True) - - # Move results to the right directory - mv_command = 'mv datetime_fmt*.json %s' % self.icu_version - result = subprocess.run(mv_command, shell=True) - - return result diff --git a/testgen/generators/localeDisplayNames.py b/testgen/generators/localeDisplayNames.py index 2f3c729d..c0c9cd36 100644 --- a/testgen/generators/localeDisplayNames.py +++ b/testgen/generators/localeDisplayNames.py @@ -99,8 +99,8 @@ def generateLanguageNameTestDataObjects(self, rawtestdata): else: # Ignore the root locale if locale_label == 'root': - logging.info('root locale ignored for %s, %s, %s', - test_data[0], locale_label, language_display) + logging.debug('testgen/generator/localeDisplayNames: %s ignored for %s, %s', + locale_label, test_data[0], language_display) continue label = str(count).rjust(max_digits, "0") test_json = { diff --git a/testgen/generators/message_fmt2.py b/testgen/generators/message_fmt2.py index 68a8508d..4491da3d 100644 --- a/testgen/generators/message_fmt2.py +++ b/testgen/generators/message_fmt2.py @@ -43,7 +43,7 @@ def process_test_data(self): for test_file_path in src_file_paths: src_data = self.readFile(test_file_path, filetype="json") if src_data is None: - logging.error("Problem reading JSON. Omitting file %s", test_file_path) + logging.error("testgen/generators/message_fmt2.py: Problem reading JSON. Omitting file %s", test_file_path) continue defaults = src_data.get("defaultTestProperties") @@ -55,9 +55,8 @@ def process_test_data(self): try: validate(src_data, json_schema) except ValidationError as err: - logging.error("Problem validating JSON: %s against schema", - test_file_path, json_schema_path) - logging.error(err) + logging.error("testgen/generators/message_fmt2.py: JSON %s not validated against schem a %s. Error = %s", + test_file_path, json_schema_path, error) for src_test in src_data["tests"]: test_count += 1 @@ -99,8 +98,8 @@ def from_src_test_or_default(dct, key): skipped_test_count += 1 continue except KeyError as err: - logging.error("Missing value for %s in %s", err, test_file_path) - logging.error("Omitting test %s", test["label"]) + logging.error("testgen/generators/message_fmt2.py: Missing value for %s in %s", err, test_file_path) + logging.error("testgen/generators/message_fmt2.py: Omitting test %s", test["label"]) json_test["tests"] = self.sample_tests(test_list) json_verify["verifications"] = self.sample_tests(verify_list) @@ -108,9 +107,10 @@ def from_src_test_or_default(dct, key): self.saveJsonFile(f"{TestType.MESSAGE_FMT2.value}_test.json", json_test, 2) self.saveJsonFile(f"{TestType.MESSAGE_FMT2.value}_verify.json", json_verify, 2) - logging.info( - "MessageFormat2 Test (%s): %d tests processed (of which %d were skipped)", - self.icu_version, - test_count, - skipped_test_count - ) + if test_count > 0: + logging.info( + "testgen/generators: MessageFormat2 Test (%s): %d tests processed (of which %d were skipped)", + self.icu_version, + test_count, + skipped_test_count + ) diff --git a/testgen/generators/number_fmt.py b/testgen/generators/number_fmt.py index 46505b5a..6379ab22 100644 --- a/testgen/generators/number_fmt.py +++ b/testgen/generators/number_fmt.py @@ -54,7 +54,7 @@ def process_test_data(self): num_fmt_verify_file.close() logging.info( - "NumberFormat Test (%s): %s tests created", self.icu_version, count + "testgen/generators/number_fmt.py: NumberFormat Test (%s): %s tests created", self.icu_version, count ) return @@ -131,14 +131,14 @@ def generateNumberFmtTestDataObjects(self, rawtestdata, count=0): ) except KeyError as error: logging.warning( - "Looking up Skeletons: %s [0-2] = %s, %s %s", + "testgen/generators/number_fmt.py: Looking up Skeletons: %s [0-2] = %s, %s %s", error, test_options[0], test_options[1], test_options[2], ) if not options_dict: - logging.warning("$$$ OPTIONS not found for %s", label) + logging.warning("testgen/generators/number_fmt.py: $$$ OPTIONS not found for %s", label) # TODO: Look at the items in the options_dict to resolve conflicts and set up things better. resolved_options_dict = self.resolveOptions( options_dict, test_options @@ -148,8 +148,8 @@ def generateNumberFmtTestDataObjects(self, rawtestdata, count=0): all_tests_list.append(entry) # All the tests in JSON form count += 1 - logging.info( - " generateNumberFmtTestDataObjects gives %d tests", + logging.debug( + "testgen/generators/number_fmt.py: generateNumberFmtTestDataObjects gives %d tests", (count - original_count), ) return all_tests_list, verify_list, count @@ -222,7 +222,7 @@ def generateDcmlFmtTestDataObjects(self, rawtestdata, count=0): if round_mode: skeleton += ' ' + self.mapRoundingToSkeleton(round_mode) else: - logging.error('Pattern %s not converted to skelection', pattern) + logging.error('testgen/generators/number_fmt.py: Pattern %s not converted to skelection', pattern) skeleton = None if skeleton: @@ -266,7 +266,7 @@ def generateDcmlFmtTestDataObjects(self, rawtestdata, count=0): count += 1 logging.info( - " generateDcmlFmtTestDataObjects gives %d tests", (count - original_count) + "testgen/generators/number_fmt.py: generateDcmlFmtTestDataObjects gives %d tests", (count - original_count) ) return all_tests_list, verify_list, count @@ -278,9 +278,9 @@ def parseDcmlFmtTestData(self, rawtestdata): try: test_match = reformat.search(rawtestdata) except AttributeError as error: - logging.warning("** parseDcmlFmtTestData: %s", error) + logging.warning("testgen/generators/number_fmt.py: ** parseDcmlFmtTestData: %s", error) if not test_match: - logging.warning("No test match with rawtestdata = %s", rawtestdata) + logging.warning("testgen/generators/number_fmt.py: No test match with rawtestdata = %s", rawtestdata) return None, None, None, None return ( test_match.group(1), diff --git a/testgen/generators/plurals.py b/testgen/generators/plurals.py index e5ffb57e..cc46caa3 100644 --- a/testgen/generators/plurals.py +++ b/testgen/generators/plurals.py @@ -109,7 +109,7 @@ def process_xml_file(self, filename, num_type): try: tree = ET.parse(filename) except: - logging.warning('No plurals file found: %s', filename) + logging.warning('testgen/generators/plurals.py: No plurals file found: %s', filename) return None, None root = tree.getroot() @@ -137,7 +137,7 @@ def process_xml_file(self, filename, num_type): for sample in samples: # Ignore "root" locale. if locale == 'root': - logging.info('Plural rules: root locale ignored for %s, %s, %s', + logging.debug('testgen/generators: Plural rules: %s locale ignored for %s, %s', locale, num_type, sample) continue # Locales should not use '_' but rather '-' diff --git a/testgen/testdata_gen.py b/testgen/testdata_gen.py index d41bc237..3d572ce3 100644 --- a/testgen/testdata_gen.py +++ b/testgen/testdata_gen.py @@ -78,12 +78,7 @@ def generate_versioned_data(version_info): generator.process_test_data() if TestType.LANG_NAMES in args.test_types: - # This is slow - - # First try with the new source of data. If not found, then use the older - # lang names generator. generator = LocaleNamesGenerator(icu_version, args.run_limit) - logging.info('lang generated from new LocaleNames data in %s', icu_version) if generator: generator.process_test_data() From 00bca4bcaf83370dac29b98374b4ed2eacaf95e9 Mon Sep 17 00:00:00 2001 From: sven-oly Date: Tue, 9 Sep 2025 20:50:14 +0000 Subject: [PATCH 2/4] Add package.json to fix NPM failure --- package.json | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 package.json diff --git a/package.json b/package.json new file mode 100644 index 00000000..340ac5c3 --- /dev/null +++ b/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "@js-temporal/polyfill": "^0.5.1" + } +} From 5c8e7382814cbff4b5bf802b7d497accc7d6ff5b Mon Sep 17 00:00:00 2001 From: sven-oly Date: Thu, 2 Oct 2025 20:57:21 +0000 Subject: [PATCH 3/4] Change hardcoded file names to __file__. Fix typos --- schema/check_schemas.py | 22 +++++++++++++--------- testdriver/testdriver.py | 5 ++++- testdriver/testplan.py | 18 +++++++++++------- testgen/generators/localeDisplayNames.py | 7 +++++-- testgen/generators/message_fmt2.py | 3 ++- 5 files changed, 35 insertions(+), 20 deletions(-) diff --git a/schema/check_schemas.py b/schema/check_schemas.py index 374a2100..de5ecef6 100644 --- a/schema/check_schemas.py +++ b/schema/check_schemas.py @@ -45,7 +45,8 @@ def save_schema_validation_summary(self, validation_status): try: summary_data = json.dumps(summary_json) except BaseException as err: - logging.error('schema/check_schemas: %s: Cannot create JSON summary: %s', err, summary_json) + logging.error('%s: %s: Cannot create JSON summary: %s', + __file__, err, summary_json) return None output_filename = os.path.join(self.schema_base, 'schema_validation_summary.json') @@ -54,7 +55,8 @@ def save_schema_validation_summary(self, validation_status): file_out.write(summary_data) file_out.close() except BaseException as error: - logging.warning('schema/check_schemas: Error: %s. Cannot save validation summary in file %s', error, output_filename) + logging.warning('%s: Error: %s. Cannot save validation summary in file %s', + __file__, error, output_filename) return None return output_filename @@ -62,7 +64,8 @@ def save_schema_validation_summary(self, validation_status): def parallel_validate_schema(validator, file_names): num_processors = multiprocessing.cpu_count() - logging.info('schema/check_schemas: Schema validation: %s processors for %s schema validations', num_processors, len(file_names)) + logging.info('%s: Schema validation: %s processors for %s schema validations', + __file__, num_processors, len(file_names)) processor_pool = multiprocessing.Pool(num_processors) # How to get all the results @@ -77,7 +80,7 @@ def parallel_validate_schema(validator, file_names): def main(args): logger = logging.Logger("TEST SCHEMAS LOGGER") logger.setLevel(logging.INFO) - logger.info('schema/check_schemas: JSON Schema files') + logger.info('%s: JSON Schema files', __file__) validator = schema_validator.ConformanceSchemaValidator() # Todo: use setters to initialize validator @@ -116,20 +119,21 @@ def main(args): }) if not result: schema_errors.append([schema_file, result, err, file_path]) - logging.error('schema/check_schemas: Bad Schema at %s', schema_file) + logging.error('%s: Bad Schema at %s', __file__, schema_file) schema_count += 1 output_filename = val_schema.save_schema_validation_summary(validation_status) if schema_errors: - logging.error('schema/check_schemas: SCHEMA: %d fail out of %d:', - len(schema_errors), schema_count) + logging.error('%s: SCHEMA: %d fail out of %d:', + __file__, len(schema_errors), schema_count) for failure in schema_errors: - logging.error('schema/check_schemas: %s', failure) + logging.error('%s: %s', __file__, failure) # We need to clobber the process sys.exit(1) else: - logging.info("schema/check_schemas: All %d schema are valid in file %s", schema_count, output_filename) + logging.info("%s: All %d schema are valid in file %s", + __file__, schema_count, output_filename) exit(0) diff --git a/testdriver/testdriver.py b/testdriver/testdriver.py index 157a6bbd..55914e94 100644 --- a/testdriver/testdriver.py +++ b/testdriver/testdriver.py @@ -46,7 +46,10 @@ def set_args(self, arg_options): for test_type in arg_options.test_type: if test_type not in ddt_data.testDatasets: - logging.warning('testdriver.py **** WARNING: test_type %s not in testDatasets', test_type) + logging.warning( + '%s **** WARNING: test_type %s not in testDatasets', + __file__, + test_type) else: # Create a test plan based on data and options test_data_info = ddt_data.testDatasets[test_type] diff --git a/testdriver/testplan.py b/testdriver/testplan.py index c5ad3239..81ffb872 100644 --- a/testdriver/testplan.py +++ b/testdriver/testplan.py @@ -64,7 +64,7 @@ def set_options(self, options): try: self.icu_version = options.icu_version except KeyError: - logging.warning('testdriver/testplan.py: NO ICU VERSION SET') + logging.warning('%s: NO ICU VERSION SET', __file__) if options.ignore and not options.ignore == "null": self.ignore = True @@ -98,8 +98,11 @@ def run_plan(self): # Test data versions are given as "icu" + primary number, e.g., "73" # TODO: Consider sorting with possible dotted versions, e.g., 73.1.3 newest_version = sorted(icu_test_dirs, reverse=True)[0] - logging.warning('testdriver/testplan.py: ** Replacing proposed icu version of %s with version %s', - self.icu_version, newest_version) + logging.warning( + '%s: ** Replacing proposed icu version of %s with version %s', + __file__, + self.icu_version, + newest_version) self.icu_version = newest_version if self.test_lang == 'node' and 'node_version' in self.options: @@ -130,9 +133,10 @@ def run_plan(self): if self.options.run_limit: self.run_limit = int(self.options.run_limit) - logging.debug('testdriver/testplan.py: !!! RUN LIMIT SET: %d', self.run_limit) + logging.debug('%s: !!! RUN LIMIT SET: %d', __file__, self.run_limit) - logging.debug('testdriver/testplan.py: Running plan %s on data %s', + logging.debug('%s: Running plan %s on data %s', + __file__, self.exec_command, self.inputFilePath) if self.options.exec_mode == 'one_test': @@ -153,7 +157,7 @@ def request_executor_info(self): self.jsonOutput["platform error"] = self.run_error_message return None else: - logging.debug('testdriver/testplan.py: EXECUTOR INFO = %s', result) + logging.debug('%s: EXECUTOR INFO = %s', __file__, result) try: self.jsonOutput["platform"] = json.loads(result) @@ -492,7 +496,7 @@ def open_json_test_data(self): # Send a single line of data or command to Stdout, capturing the output def send_one_line(self, input_line): - self.runer_ror_message = None + self.run_error_message = None try: result = subprocess.run(self.exec_command, input=input_line, # Usually a JSON string. diff --git a/testgen/generators/localeDisplayNames.py b/testgen/generators/localeDisplayNames.py index c0c9cd36..8908aa79 100644 --- a/testgen/generators/localeDisplayNames.py +++ b/testgen/generators/localeDisplayNames.py @@ -99,8 +99,11 @@ def generateLanguageNameTestDataObjects(self, rawtestdata): else: # Ignore the root locale if locale_label == 'root': - logging.debug('testgen/generator/localeDisplayNames: %s ignored for %s, %s', - locale_label, test_data[0], language_display) + logging.debug('%s: %s ignored for %s, %s', + __file__, + locale_label, + test_data[0], + language_display) continue label = str(count).rjust(max_digits, "0") test_json = { diff --git a/testgen/generators/message_fmt2.py b/testgen/generators/message_fmt2.py index 4491da3d..c1ba224e 100644 --- a/testgen/generators/message_fmt2.py +++ b/testgen/generators/message_fmt2.py @@ -55,7 +55,8 @@ def process_test_data(self): try: validate(src_data, json_schema) except ValidationError as err: - logging.error("testgen/generators/message_fmt2.py: JSON %s not validated against schem a %s. Error = %s", + logging.error("%s: JSON %s not validated against schema %s. Error = %s", + __file__, test_file_path, json_schema_path, error) for src_test in src_data["tests"]: From b2d7fe7d846964d401f3a139eeef98905e562647 Mon Sep 17 00:00:00 2001 From: sven-oly Date: Thu, 2 Oct 2025 22:31:05 +0000 Subject: [PATCH 4/4] Small change --- testgen/generators/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgen/generators/base.py b/testgen/generators/base.py index 03fe558a..a4a0c4a7 100644 --- a/testgen/generators/base.py +++ b/testgen/generators/base.py @@ -137,7 +137,7 @@ def readFile(self, filename, version="", filetype="txt"): with codecs.open(path, "r", encoding="utf-8") as testdata: return json.load(testdata) if filetype == "json" else testdata.read() except BaseException as err: - logging.warning("testgen/generators: %s", err) + logging.warning("Error in %s: %s", __file__, err) return None def computeMaxDigitsForCount(self, count):