diff --git a/.gitignore b/.gitignore index 9b6cce94..44987679 100644 --- a/.gitignore +++ b/.gitignore @@ -31,4 +31,6 @@ testgen/*.json TEMP_DATA/* +# log files (including logrotate state and file backups) debug.log* +logrotate.state diff --git a/README.md b/README.md index 10fd34b2..45535a27 100644 --- a/README.md +++ b/README.md @@ -252,6 +252,10 @@ Requirements to run Data Driven Testing code locally: ``` rustup update ``` + - Install `logrotate` + ``` + sudo apt-get install logrotate + ``` # History diff --git a/generateDataAndRun.sh b/generateDataAndRun.sh index efa0034f..00f55c48 100755 --- a/generateDataAndRun.sh +++ b/generateDataAndRun.sh @@ -5,28 +5,26 @@ # Save the results set -e +# Rotate log files +logrotate -s logrotate.state logrotate.conf + +########## +# Setup (generate) test data & expected values +########## + # Enable seting the version of NodeJS export NVM_DIR=$HOME/.nvm; source $NVM_DIR/nvm.sh; -# -# Setup -# - export TEMP_DIR=TEMP_DATA rm -rf $TEMP_DIR # Clear out old data, then create new directory and copy test / verify data there mkdir -p $TEMP_DIR/testData -# -# Setup (generate) test data & expected values -# - -source_file=${1:-'run_config.json'} - # Generates all new test data +source_file=${1:-'run_config.json'} pushd testgen all_icu_versions=$(jq '.[].run.icu_version' ../$source_file | jq -s '.' | jq 'unique' | jq -r 'join(" ")') python3 testdata_gen.py --icu_versions $all_icu_versions @@ -41,7 +39,10 @@ python3 check_schemas.py $pwd python3 check_generated_data.py ../$TEMP_DIR/testData popd -all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique') +########## +# Run tests using per-platform executors +########## + # # Run test data tests through all executors # @@ -54,6 +55,15 @@ all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique') # popd # fi +# +# Run Dart executors in a custom way +# + +# TODO(?): Figure out why datasets.py can't support runnign multiple CLI commands, +# if that is the reason why Dart needs custom handling in this end-to-end script + +all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique') + if jq -e 'index("dart_native")' <<< $all_execs_json > /dev/null then pushd executors/dart_native/ @@ -73,12 +83,17 @@ fi # Executes all tests on that new data in the new directory mkdir -p $TEMP_DIR/testOutput +# # Invoke all tests on all platforms +# + +# Change to directory of `testdriver` (which will be used to invoke each platform executor) pushd testdriver # Set to use NVM source "$HOME/.nvm/nvm.sh" +# Invoke all tests jq -c '.[]' ../$source_file | while read i; do if jq -e 'has("prereq")' <<< $i > /dev/null then @@ -97,9 +112,9 @@ done # Done with test execution popd -# +########## # Run verifier -# +########## # Verify that test output matches schema. pushd schema @@ -116,6 +131,10 @@ python3 verifier.py --file_base ../$TEMP_DIR --exec $all_execs --test_type $all_ popd +########## +# Finish and clean up +########## + # # Push testresults and test reports to Cloud Storge # TODO diff --git a/logging.conf b/logging.conf index b0e33627..3f31efd8 100644 --- a/logging.conf +++ b/logging.conf @@ -18,13 +18,10 @@ format=[%(asctime)s,%(module)s:%(lineno)d] [%(levelname)s] %(message)s keys=file,screen [handler_file] -class=handlers.TimedRotatingFileHandler -interval=midnight -backupCount=5 +class=handlers.RotatingFileHandler formatter=complex level=DEBUG -# equivalent to: 'debug.log', when='S', interval=10, backupCount=5 -args=('debug.log', 'S', 10, 5) +args=('debug.log', 'a') [handler_screen] class=StreamHandler diff --git a/logrotate.conf b/logrotate.conf new file mode 100644 index 00000000..4275e5d1 --- /dev/null +++ b/logrotate.conf @@ -0,0 +1,19 @@ +"testdriver/debug.log" { + rotate 3 + missingok +} + +"verifier/debug.log" { + rotate 3 + missingok +} + +"schema/debug.log" { + rotate 3 + missingok +} + +"testgen/debug.log" { + rotate 3 + missingok +} diff --git a/schema/check_generated_data.py b/schema/check_generated_data.py index ad02c0dc..ec853102 100644 --- a/schema/check_generated_data.py +++ b/schema/check_generated_data.py @@ -25,7 +25,7 @@ def main(args): else: test_data_path = args[1] - print('TEST DATA PATH = %s' % test_data_path) + logging.debug('TEST DATA PATH = %s', test_data_path) logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") logger.setLevel(logging.INFO) @@ -37,12 +37,12 @@ def main(args): if os.path.exists(test_data_path): check_path = os.path.join(test_data_path, 'icu*') icu_dirs = glob.glob(check_path) - print('ICU DIRECTORIES = %s' % icu_dirs) + logging.debug('ICU DIRECTORIES = %s', icu_dirs) for dir in icu_dirs: icu_versions.append(os.path.basename(dir)) - print('ICU directories = %s' % icu_versions) - print('test types = %s' % ALL_TEST_TYPES) + logging.debug('ICU directories = %s', icu_versions) + logging.debug('test types = %s', ALL_TEST_TYPES) validator = schema_validator.ConformanceSchemaValidator() # Todo: use setters to initialize validator @@ -56,7 +56,7 @@ def main(args): schema_count = 0 all_results = validator.validate_test_data_with_schema() - print(' %d results for generated test data' % (len(all_results))) + logging.info(' %d results for generated test data', len(all_results)) schema_errors = 0 failed_validations = [] @@ -92,13 +92,13 @@ def main(args): if schema_errors: - print('Test data file files: %d fail out of %d:' % ( - len(schema_errors, schema_count))) + logging.critical('Test data file files: %d fail out of %d:', + len(schema_errors, schema_count)) for failure in schema_errors: - print(' %s' % failure) + logging.critical(' %s', failure) exit(1) else: - print("All %d generated test data files match with schema" % schema_count) + logging.info("All %d generated test data files match with schema", schema_count) exit(0) if __name__ == "__main__": diff --git a/schema/check_schemas.py b/schema/check_schemas.py index 0c3e591f..d746303f 100644 --- a/schema/check_schemas.py +++ b/schema/check_schemas.py @@ -24,7 +24,7 @@ def save_schema_validation_summary(self, validation_status): failed_validations = [] passed_validations = [] for result in validation_status: - print(result) + logging.debug(result) if result['result']: passed_validations.append(result) else: @@ -99,13 +99,13 @@ def main(args): ok = val_schema.save_schema_validation_summary(validation_status) if schema_errors: - print('SCHEMA: %d fail out of %d:' % ( - len(schema_errors), schema_count)) + logging.error('SCHEMA: %d fail out of %d:', + len(schema_errors), schema_count) for failure in schema_errors: - print(' %s' % failure) + logging.error(' %s', failure) exit(1) else: - print("All %d schema are valid" % schema_count) + logging.info("All %d schema are valid", schema_count) exit(0) diff --git a/schema/check_test_output.py b/schema/check_test_output.py index 938c84b6..567bbb78 100644 --- a/schema/check_test_output.py +++ b/schema/check_test_output.py @@ -26,7 +26,7 @@ def main(args): else: test_output_path = args[1] - print('TEST OUTPUT PATH = %s' % test_output_path) + logging.debug('TEST OUTPUT PATH = %s', test_output_path) logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") logger.setLevel(logging.INFO) @@ -61,8 +61,8 @@ def main(args): icu_version_set.add(os.path.basename(dir)) icu_versions = sorted(list(icu_version_set)) - print('ICU directories = %s' % icu_versions) - print('test types = %s' % ALL_TEST_TYPES) + logging.debug('ICU directories = %s', icu_versions) + logging.debug('test types = %s', ALL_TEST_TYPES) validator = schema_validator.ConformanceSchemaValidator() # Todo: use setters to initialize validator @@ -78,14 +78,14 @@ def main(args): schema_count = 0 all_results = validator.validate_test_output_with_schema() - print(' %d results for generated test data' % (len(all_results))) + logging.debug(' %d results for generated test data', len(all_results)) schema_errors = 0 failed_validations = [] passed_validations = [] schema_count = len(all_results) for result in all_results: - print(result) + logging.debug(result) if result['result']: passed_validations.append(result) else: @@ -114,13 +114,13 @@ def main(args): if schema_errors: - print('Test data file files: %d fail out of %d:' % ( - len(schema_errors, schema_count))) + logging.error('Test data file files: %d fail out of %d:', + len(schema_errors, schema_count)) for failure in schema_errors: - print(' %s' % failure) + logging.error(' %s', failure) exit(1) else: - print("All %d test output files match with schema" % schema_count) + logging.info("All %d test output files match with schema", schema_count) exit(0) if __name__ == "__main__": diff --git a/schema/schema_validator.py b/schema/schema_validator.py index 848d2541..d285d622 100644 --- a/schema/schema_validator.py +++ b/schema/schema_validator.py @@ -93,10 +93,10 @@ def validate_test_data_with_schema(self): for test_type in self.test_types: for icu_version in self.icu_versions: if self.debug > 0: - logging.debug('Checking test data %s, %s', test_type, icu_version) + logging.info('Checking test data %s, %s', test_type, icu_version) logging.info('Checking %s, %s', test_type, icu_version) result_data = self.check_test_data_schema(icu_version, test_type) - print(result_data) + logging.debug('test result data = %s', result_data) msg = result_data['err_info'] if not result_data['data_file_name']: # This is not an error but simple a test that wasn't run. @@ -105,7 +105,7 @@ def validate_test_data_with_schema(self): logging.warning('VALIDATION FAILS: %s %s. MSG=%s', test_type, icu_version, result_data['err_info']) else: - logging.warning('VALIDATION WORKS: %s %s', test_type, icu_version) + logging.info('VALIDATION WORKS: %s %s', test_type, icu_version) all_results.append(result_data) return all_results @@ -244,7 +244,7 @@ def validate_test_output_with_schema(self): logging.warning('VALIDATION FAILS: %s %s %s. MSG=%s', test_type, icu_version, executor, results['err_info']) else: - logging.warning('VALIDATION WORKS: %s %s %s', test_type, icu_version, executor) + logging.info('VALIDATION WORKS: %s %s %s', test_type, icu_version, executor) all_results.append(results) return all_results @@ -266,7 +266,7 @@ def process_args(args): # Directory for test result files # Get name of test and type if len(args) < 2: - print('you gotta give me something...') + logging.error('Not enough arguments provided') return base_folder = args[1] @@ -327,10 +327,10 @@ def main(args): schema_validator.icu_versions = ['icu71', 'icu72', 'icu73', 'icu74'] schema_validator.executors = ['node', 'rust', 'dart_web'] - print('Checking test outputs') + logging.info('Checking test outputs') all_test_out_results = schema_validator.validate_test_output_with_schema() for result in all_test_out_results: - print(' %s' % result) + logging.debug(' %s', result) # Check all schema files for correctness. schema_errors = schema_validator.check_schema_files() @@ -342,16 +342,16 @@ def main(args): icu_versions = ['icu71', 'icu72', 'icu73', 'icu74'] executor_list = ['node', 'rust', 'dart_web'] - print('Checking generated data') + logging.info('Checking generated data') all_test_data_results = schema_validator.validate_test_data_with_schema() for result in all_test_data_results: - print(' %s' % result) + logging.debug(' %s', result) - print('Checking test outputs') + logging.info('Checking test outputs') all_test_out_results = schema_validator.validate_test_output_with_schema() for result in all_test_out_results: - print(' %s' % result) + logging.debug(' %s', result) return if __name__ == "__main__": diff --git a/testdriver/datasets.py b/testdriver/datasets.py index b530635f..e5f511b3 100644 --- a/testdriver/datasets.py +++ b/testdriver/datasets.py @@ -288,7 +288,7 @@ def versionForCldr(self, lang, cldr_needed): return system[version] return {'path': ExecutorCommands[lang]} # Nothing found except KeyError as err: - print('versionForCldr error = %s' % err) + logging.error('versionForCldr error = %s', err) return {'path': lang} # Nothing found def has(self, exec) : @@ -380,59 +380,55 @@ def has(self, exec) : # TESTING def printExecutors(executors): - print('Executor paths:') + logging.debug('Executor paths:') for lang in executors.systems: ex = executors.systems[lang] - print(' %s' % (lang)) + logging.debug(' %s', lang) for key in ex.keys(): - print(' %s: %s' % (key, ex[key]['path'])) + logging.debug(' %s: %s', key, ex[key]['path']) def printDatasets(ds): - print('Defined datasets:') + logging.debug('Defined datasets:') for item in ds: - print(' %s:' % (item)) - print(' type: %s' % ds[item].test_type) - print(' test filename: %s' % ds[item].testDataFilename) - print(' verify filename: %s' % ds[item].verifyFilename) - print(' CLDR version: %s' % ds[item].cldr_version) - print(' ICU version: %s' % ds[item].icu_version) + logging.debug(' %s:', item) + logging.debug(' type: %s', ds[item].test_type) + logging.debug(' test filename: %s', ds[item].testDataFilename) + logging.debug(' verify filename: %s', ds[item].verifyFilename) + logging.debug(' CLDR version: %s', ds[item].cldr_version) + logging.debug(' ICU version: %s', ds[item].icu_version) def printCldrIcuMap(): - print('CLDR version maps') + logging.debug('CLDR version maps') for name, member in CLDRVersion.__members__.items(): try: - print(' %s in %s' % (name, cldr_icu_map[member])) + logging.debug(' %s in %s', name, cldr_icu_map[member]) except KeyError: - print(' %s not in map' % (name)) + logging.debug(' %s not in map', name) def main(args): logging.config.fileConfig("../logging.conf") printCldrIcuMap() - print() printDatasets(testDatasets) - print() printExecutors(allExecutors) - print() cldr_version = CLDRVersion.CLDR41 ds = dataSetsForCldr(testDatasets, cldr_version) - print('test datasets for %s:' % cldr_version) + logging.info('test datasets for %s:', cldr_version) for label in ds: - print(' data file = %s' % (testDatasets[label].testDataFilename)) + logging.info(' data file = %s', testDatasets[label].testDataFilename) - print() - print('All paths for testing %s' % (cldr_version)) + logging.info('All paths for testing %s', cldr_version) lang = ExecutorLang.NODE for lang in [ExecutorLang.NODE, ExecutorLang.RUST, ExecutorLang.CPP, 'newLanguage']: exec = allExecutors.versionsForCldr(lang, cldr_version) - print(' executor: %s' % (lang)) + logging.info(' executor: %s', lang) if exec: - print(' path: %s argList=%s' % (exec['path'], exec['argList'])) + logging.debug(' path: %s argList=%s', exec['path'], exec['argList']) else: - print(' ** No defined path') + logging.debug(' ** No defined path for executor\'s CLI command + args') if __name__ == '__main__': main(sys.argv) diff --git a/testdriver/ddtargs.py b/testdriver/ddtargs.py index 8f784532..e6b97f66 100644 --- a/testdriver/ddtargs.py +++ b/testdriver/ddtargs.py @@ -20,6 +20,7 @@ # faster but not hermetically isolated import argparse +import logging import sys class DdtOptions(): @@ -172,12 +173,12 @@ def main(args): tests = argsTestData() for test in tests: try: - print('Args = %s' % test) + logging.debug('Args = %s' % test) result = argparse.parse(test) - print(' OPTIONS = %s' % argparse.options) + logging.debug(' OPTIONS = %s' % argparse.options) except BaseException as err: - print(' ERROR: %s ' % err) + logging.error(' ERROR: %s ' % err) if __name__ == "__main__": main(sys.argv) diff --git a/testdriver/testplan.py b/testdriver/testplan.py index 55e0e349..da03b87d 100644 --- a/testdriver/testplan.py +++ b/testdriver/testplan.py @@ -131,11 +131,11 @@ def run_plan(self): if self.options.run_limit: self.run_limit = int(self.options.run_limit) if self.debug: - print('!!! RUN LIMIT SET: %d' % self.run_limit) + logging.debug('!!! RUN LIMIT SET: %d', self.run_limit) if self.debug: - print('Running plan %s on data %s' % ( - self.exec_command, self.inputFilePath)) + logging.debug('Running plan %s on data %s', + self.exec_command, self.inputFilePath) if self.options.exec_mode == 'one_test': self.run_one_test_mode() @@ -156,7 +156,7 @@ def request_executor_info(self): return None else: if self.debug: - print('EXECUTOR INFO = %s' % result) + logging.debug('EXECUTOR INFO = %s', result) try: self.jsonOutput["platform"] = json.loads(result) self.platformVersion = self.jsonOutput["platform"]["platformVersion"] @@ -192,7 +192,7 @@ def request_executor_termination(self, terminate_args=None): self.jsonOutput["platform error"] = self.run_error_message else: if self.debug: - print('TERMINATION INFO = %s' % result) + logging.debug('TERMINATION INFO = %s', result) self.jsonOutput["platform"] = json.loads(result) def generate_header(self): @@ -230,8 +230,8 @@ def complete_output_file(self, error_info): def run_one_test_mode(self): if self.debug: - print(' Running OneTestMode %s on data %s' % - (self.exec_command, self.inputFilePath)) + logging.debug(' Running OneTestMode %s on data %s', + self.exec_command, self.inputFilePath) # Set up calls for version data --> results @@ -277,11 +277,11 @@ def run_one_test_mode(self): # Create results file try: if self.debug: - logging.info('++++++ Results file path = %s', self.outputFilePath) + logging.debug('++++++ Results file path = %s', self.outputFilePath) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') except BaseException as error: - print('*** Cannot open results file at %s. Err = %s' % - (self.outputFilePath, error)) + logging.error('*** Cannot open results file at %s. Err = %s', + self.outputFilePath, error) self.resultsFile = open(self.outputFilePath, encoding='utf-8', mode='w') # Store information the test run @@ -344,8 +344,8 @@ def run_all_single_tests(self, tests_per_execution=1): if self.progress_interval and test_num % self.progress_interval == 0: formatted_num = '{:,}'.format(test_num) - print('Testing %s / %s. %s of %s' % ( - self.exec_list[0], self.testScenario, formatted_num, formatted_count), end='\r') + logging.debug('Testing %s / %s. %s of %s', + self.exec_list[0], self.testScenario, formatted_num, formatted_count) # Accumulate tests_per_execution items into a single outline if lines_in_batch < tests_per_execution: @@ -358,7 +358,7 @@ def run_all_single_tests(self, tests_per_execution=1): all_test_results.extend(result) else: num_errors += 1 - print('!!!!!! "platform error": "%s",\n' % self.run_error_message) + logging.error('!!!!!! "platform error": "%s",\n', self.run_error_message) # Reset the batch lines_in_batch = 0 @@ -384,7 +384,7 @@ def process_batch_of_tests(self, tests_to_send): return [] if self.debug > 2: - print('PROCESSING %d tests' % len(tests_to_send)) + logging.debug('PROCESSING %d tests', len(tests_to_send)) # Ask process to exit when finished. out_and_exit = '\n'.join(tests_to_send) + '\n#EXIT' @@ -417,7 +417,7 @@ def process_batch_of_tests(self, tests_to_send): # TODO: Document these, perhaps in the project's JSON schema. continue if item[0] == "#": - logging.info('#### DEBUG OUTPUT = %s', item) + logging.debug('#### DEBUG OUTPUT = %s', item) # Process some types of errors if item[1:3] == "!!" and self.debug > 1: @@ -425,7 +425,7 @@ def process_batch_of_tests(self, tests_to_send): # Extract the message and check if we continue or not. json_start = item.index('{') json_text = item[json_start:] - print('JSON TEXT = %s' % json_text) + logging.debug('JSON TEXT = %s', json_text) json_out = json.loads(json_text) if 'error_retry' in json_out and json_out['error_retry']: should_retry = json_out['error_retry'] @@ -446,7 +446,7 @@ def run_multitest_mode(self): # TODO Implement this logging.info('!!! Running MultiTestMode %s on data %s', self.exec_command, self.inputFilePath) - print(' ** UNIMPLEMENTED **') + logging.warning(' ** UNIMPLEMENTED **') # Open the input file and get tests # Open results file @@ -475,11 +475,10 @@ def open_json_test_data(self): try: self.jsonData = json.loads(file_raw) except json.JSONDecodeError as error: - print('CANNOT parse JSON from file %s: %s' % (self.inputFilePath, error)) + logging.error('CANNOT parse JSON from file %s: %s', self.inputFilePath, error) return None except FileNotFoundError as err: - print('*** Cannot open file %s. Err = %s' % - (self.inputFilePath, err)) + logging.error('*** Cannot open file %s. Err = %s', self.inputFilePath, err) return None try: @@ -504,9 +503,9 @@ def send_one_line(self, input_line): if not result.returncode: return result.stdout else: - print('$$$$$$$$$$$$$$$$ ---> return code: %s' % result.returncode) - print(' ----> INPUT LINE= >%s<' % input_line) - print(' ----> STDOUT= >%s<' % result.stdout) + logging.debug('$$$$$$$$$$$$$$$$ ---> return code: %s', result.returncode) + logging.debug(' ----> INPUT LINE= >%s<', input_line) + logging.debug(' ----> STDOUT= >%s<', result.stdout) self.run_error_message = '!!!! ERROR IN EXECUTION: %s. STDERR = %s' % ( result.returncode, result.stderr) logging.error(' !!!!!! %s' % self.run_error_message) @@ -521,7 +520,7 @@ def send_one_line(self, input_line): } return json.dumps(error_result) except BaseException as err: - print('!!! send_one_line fails: input => %s<. Err = %s' % (input_line, err)) + logging.error('!!! send_one_line fails: input => %s<. Err = %s', input_line, err) input = json.loads(input_line.replace('#EXIT', '')) error_result = {'label': input['label'], 'input_data': input, diff --git a/testgen/testdata_gen.py b/testgen/testdata_gen.py index 1b02c0b2..95f60090 100644 --- a/testgen/testdata_gen.py +++ b/testgen/testdata_gen.py @@ -143,7 +143,7 @@ def processNumberFmtTestData(self): json.dump(json_verify, num_fmt_verify_file, indent=1) num_fmt_verify_file.close() - logging.warning('NumberFormat Test (%s): %s tests created', self.icu_version, count) + logging.info('NumberFormat Test (%s): %s tests created', self.icu_version, count) return def processLangNameTestData(self): diff --git a/verifier/testreport.py b/verifier/testreport.py index 4f6b9b8c..d7764ae6 100644 --- a/verifier/testreport.py +++ b/verifier/testreport.py @@ -774,9 +774,9 @@ def summarize_failures(self): if self.debug > 0: logging.info('--------- %s %s %d failures-----------', self.exec, self.test_type, len(self.failing_tests)) - logging.info(' SINGLE SUBSTITUTIONS: %s', + logging.debug(' SINGLE SUBSTITUTIONS: %s', sort_dict_by_count(self.diff_summary.single_diffs)) - logging.info(' PARAMETER DIFFERENCES: %s', + logging.debug(' PARAMETER DIFFERENCES: %s', sort_dict_by_count(self.diff_summary.params_diff)) def analyze_simple(self, test): @@ -979,8 +979,8 @@ def summarize_reports(self): self.type_summary[test_type].append(test_results) except BaseException as err: - print('SUMMARIZE REPORTS in exec_summary %s, %s. Error: %s' % ( - executor, test_type, err)) + logging.error('SUMMARIZE REPORTS in exec_summary %s, %s. Error: %s', + executor, test_type, err) def get_stats(self, entry): # Process items in a map to give HTML table value @@ -1081,8 +1081,8 @@ def create_summary_html(self): html_output = self.templates.summary_html_template.safe_substitute(html_map) if self.debug > 1: - print('HTML OUTPUT =\n%s' % html_output) - print('HTML OUTPUT FILEPATH =%s' % self.summary_html_path) + logging.debug('HTML OUTPUT =\n%s', html_output) + logging.debug('HTML OUTPUT FILEPATH =%s', self.summary_html_path) file.write(html_output) file.close() diff --git a/verifier/verifier.py b/verifier/verifier.py index 3668cea3..2d68f5e2 100644 --- a/verifier/verifier.py +++ b/verifier/verifier.py @@ -72,13 +72,13 @@ def open_verify_files(self): file_time = os.path.getmtime(self.result_path) self.result_timestamp = datetime.datetime.fromtimestamp(file_time).strftime('%Y-%m-%d %H:%M') except BaseException as err: - print(' *** Cannot open results file %s:\n %s' % (self.result_path, err)) + logging.error(' *** Cannot open results file %s:\n %s', self.result_path, err) return None try: self.verify_data_file = open(self.verify_path, encoding='utf-8', mode='r') except BaseException as err: - print(' **!!* %s: Cannot open verify file %s' % (err, self.verify_path)) + logging.error(' **!!* %s: Cannot open verify file %s', err, self.verify_path) return None # Create report directory if needed @@ -90,19 +90,24 @@ def open_verify_files(self): sys.stderr.write(' !!! Cannot create directory %s for report file %s' % (report_dir, self.report_path)) sys.stderr.write(' !!! Error = %s' % err) + + logging.error(' !!! Cannot create directory %s for report file %s', + report_dir, self.report_path) + logging.error(' !!! Error = %s', err) + return None try: self.report_file = open(self.report_path, encoding='utf-8', mode='w') except BaseException as err: - print('*** Cannot open file %s: Error = %s' % (self.report_path, err)) + logging.error('*** Cannot open file %s: Error = %s', self.report_path, err) return None # Get the input file to explain test failures try: self.testdata_file = open(self.testdata_path, encoding='utf-8', mode='r') except BaseException as err: - print('*** Cannot open testdata file %s: Error = %s' % (self.testdata_path, err)) + logging.error('*** Cannot open testdata file %s: Error = %s', self.testdata_path, err) return None # Initialize values for this case. @@ -115,8 +120,8 @@ def parseargs(self, args): # Initialize commandline arguments verify_info = VerifyArgs(args) if self.debug > 1: - print('!!! ARGS = %s' % args) - print('VERIFY INFO: %s' % verify_info) + logging.debug('!!! ARGS = %s', args) + logging.debug('VERIFY INFO: %s', verify_info) self.set_verify_args(verify_info.getOptions()) def set_verify_args(self, arg_options): @@ -128,7 +133,7 @@ def set_verify_args(self, arg_options): self.file_base = arg_options.file_base if self.debug > 1: - print('TEST TYPES = %s' % self.test_types) + logging.debug('TEST TYPES = %s', self.test_types) if not arg_options.summary_only: self.setup_verify_plans() @@ -152,7 +157,7 @@ def setup_verify_plans(self): # TODO: Run for each test_type! if test_type not in ddt_data.testDatasets: - print('**** WARNING: test_type %s not in testDatasets' % + logging.warning('**** WARNING: test_type %s not in testDatasets', test_type) raise ValueError('No test dataset found for test type >%s<' % self.test_type) @@ -243,14 +248,14 @@ def verify_data_results(self): self.test_type = vplan.test_type - print('VERIFY %s: %s %s' % (vplan.exec, self.test_type, vplan.result_path)) + logging.info('VERIFY %s: %s %s', vplan.exec, self.test_type, vplan.result_path) if not self.open_verify_files(): continue self.compare_test_to_expected() # Save the results if not self.report.save_report(): - print('!!! Could not save report for (%s, %s)', + logging.error('!!! Could not save report for (%s, %s)', vplan.test_type, self.exec) else: self.report.create_html_report() @@ -259,13 +264,14 @@ def verify_data_results(self): self.report.summarize_failures() if self.debug > 0: - print('\nTEST RESULTS in %s for %s. %d tests found' % ( - self.exec, vplan.test_type, len(self.results))) + logging.debug('\nTEST RESULTS in %s for %s. %d tests found', + self.exec, vplan.test_type, len(self.results)) try: logging.info(' Platform: %s', self.resultData["platform"]) logging.info(' %d Errors running tests', self.report.error_count) except BaseException as err: sys.stderr.write('### Missing fields %s, Error = %s' % (self.resultData, err)) + logging.error('### Missing fields %s, Error = %s', self.resultData, err) # Experimental # TODO: Finish difference analysis @@ -278,10 +284,11 @@ def get_results_and_verify_data(self): self.results = self.resultData['tests'] except BaseException as err: sys.stderr.write('Cannot load %s result data: %s' % (self.result_path, err)) + logging.error('Cannot load %s result data: %s', self.result_path, err) return None if self.debug >= 1: - print('^^^ Result file has %d entries' % (len(self.results))) + logging.debug('^^^ Result file has %d entries', len(self.results)) self.result_file.close() try: @@ -315,10 +322,11 @@ def get_results_and_verify_data(self): except BaseException as err: sys.stderr.write('!!! Cannot sort test results by label: %s' % err) sys.stderr.flush() + logging.error('!!! Cannot sort test results by label: %s', err) if 'platform_error' in self.resultData: - print('PLATFORM ERROR: %s' % self.resultData['platform error']) - print('No verify done!!!') + logging.error('PLATFORM ERROR: %s', self.resultData['platform error']) + logging.error('No verify done!!!') return None def compare_test_to_expected(self): @@ -343,10 +351,11 @@ def compare_test_to_expected(self): self.report.test_type = self.test_type if not self.verifyExpected: sys.stderr.write('No expected data in %s' % self.verify_path) + logging.error('No expected data in %s', self.verify_path) return None if not self.results: - print('*$*$*$*$* self.results = %s' % self.results) + logging.error('*$*$*$*$* self.results = %s', self.results) return None # Loop over all results found, comparing with the expected result. @@ -357,10 +366,10 @@ def compare_test_to_expected(self): for test in self.results: if not test: - print('@@@@@ no test string: %s of %s' % (test, len(self.results))) + logging.debug('@@@@@ no test string: %s of %s', test, len(self.results)) if index % 10000 == 0: - print(' progress = %d / %s' % (index, total_results), end='\r') + logging.debug(' progress = %d / %s', index, total_results) # The input to the test try: @@ -386,7 +395,7 @@ def compare_test_to_expected(self): verification_data = self.find_expected_with_label(test_label) if verification_data is None: - print('*** Cannot find verify data with label %s' % test_label) + logging.warning('*** Cannot find verify data with label %s', test_label) self.report.record_missing_verify_data(test) # Bail on this test continue @@ -396,9 +405,9 @@ def compare_test_to_expected(self): except: expected_result = 'UNKNOWN' if self.debug > 1: - print('VVVVV: %s actual %s, expected %s' % ( + logging.debug('VVVVV: %s actual %s, expected %s', (actual_result == expected_result), - actual_result, expected_result)) + actual_result, expected_result) # Remember details about the test test['input_data'] = test_data test['expected'] = expected_result @@ -409,7 +418,6 @@ def compare_test_to_expected(self): index += 1 - print('') return def find_expected_with_label(self, test_label): @@ -422,8 +430,8 @@ def find_expected_with_label(self, test_label): try: return self.verifyExpectedDict[test_label] except BaseException as err: - print('----- find_expected_with_label %s' % err) - print(' No expected item with test_label = %s' % test_label) + logging.error('----- find_expected_with_label %s', err) + logging.error(' No expected item with test_label = %s', test_label) return True def find_testdata_with_label(self, test_label): @@ -435,9 +443,9 @@ def find_testdata_with_label(self, test_label): try: return self.testdataDict[test_label] except BaseException as err: - print('----- findTestdataWithLabel %s' % err) - print(' No test data item with test_label = %s' % test_label) - print(' SUGGESTION: Check if test results are synced with test data!') + logging.error('----- findTestdataWithLabel %s', err) + logging.error(' No test data item with test_label = %s', test_label) + logging.error(' SUGGESTION: Check if test results are synced with test data!') return True @@ -449,7 +457,7 @@ def analyze_failures(self): def setup_paths(self, executor, testfile, verify_file): base_dir = self.file_base if self.debug > 1: - print('&&& FILE BASE = %s' % base_dir) + logging.deubg('&&& FILE BASE = %s', base_dir) # Check on the path defined here test_output_dir = 'testOutput' self.resultPath = os.path.join( @@ -459,9 +467,9 @@ def setup_paths(self, executor, testfile, verify_file): self.reportPath = os.path.join( base_dir, 'testReports', executor, testfile) if self.debug > 0: - print('RESULT PATH = %s' % self.resultPath) - print('VERIFY PATH = %s' % self.verifyPath) - print('TESTDATA PATH = %s' % self.testdata_path) + logging.debug('RESULT PATH = %s', self.resultPath) + logging.debug('VERIFY PATH = %s', self.verifyPath) + logging.debug('TESTDATA PATH = %s', self.testdata_path) # Create HTML summary files def create_summary_reports(self): @@ -478,7 +486,7 @@ def create_summary_reports(self): # And make the output HTML results result = summary_report.create_summary_html() if not result: - print('!!!!!! SUMMARY HTML fails') + logging.error('!!!!!! SUMMARY HTML fails') def schema_results(self): # Locate the files in schema, testData, and testOutput @@ -589,10 +597,10 @@ def display_names_exec(self, executor): executor, 'display_names.json', 'display_names_verify.json') def print_result(self): - print('\n Test Report for %s' % self.title) + logging.info('\n Test Report for %s', self.title) test_report = self.verifier.report report_data = test_report.create_report() - print(' Report: %s' % report_data) + logging.info(' Report: %s', report_data) # Test basic verifier functions @@ -622,13 +630,13 @@ def main(args): if not verifier.options.summary_only: # Run the tests on the provided parameters. - print('Verifier starting on %d verify cases' % (len(verifier.verify_plans))) + logging.info('Verifier starting on %d verify cases', len(verifier.verify_plans)) verifier.verify_data_results() - print('Verifier completed %d data reports' % (len(verifier.verify_plans))) + logging.info('Verifier completed %d data reports', len(verifier.verify_plans)) # TODO: Should this be optional? verifier.create_summary_reports() - print('Verifier completed summary report') + logging.info('Verifier completed summary report') if __name__ == '__main__':