diff --git a/executors/node/collator.js b/executors/node/collator.js index a0f28dfd..f73f2b53 100644 --- a/executors/node/collator.js +++ b/executors/node/collator.js @@ -57,18 +57,29 @@ module.exports = { outputLine['error_detail'] = 'No rules'; outputLine['error'] = 'rules'; } - else if (compare_type) { - outputLine['unsupported'] = 'Compare type not supported'; - outputLine['error_detail'] = 'No comparison'; - outputLine['error'] = 'compare_type'; + else { + outputLine['actual_options'] = JSON.stringify(coll.resolvedOptions()); //.toString(); + outputLine['compare_result'] = compared; + outputLine['result'] = result_bool; } } } catch (error) { - outputLine = {'label': json['label'], - 'error_message': 'LABEL: ' + json['label'] + ' ' + error.message, - 'error': 'Collator compare failed' - }; + const error_message = error.message; + + if (testLocale == "root" || error_message == "Incorrect locale information provided") { + outputLine = {'label': json['label'], + 'unsupported': 'root locale', + 'error_detail': error_message + ': ' + testLocale, + 'error': 'Unsupported locale' + }; + } else { + outputLine = {'label': json['label'], + 'error_message': error_message, + 'error_detail': testLocale, + 'error': 'Something wrong' + }; + } } return outputLine; } diff --git a/schema/collation_short/result_schema.json b/schema/collation_short/result_schema.json index d259507d..2d27d9a8 100644 --- a/schema/collation_short/result_schema.json +++ b/schema/collation_short/result_schema.json @@ -84,7 +84,7 @@ "type": "string" }, "actual_options": { - "description": "What was sent to the collation function", + "description": "Options used by collation as a string", "type": "string" }, "input_data": { diff --git a/testgen/generators/base.py b/testgen/generators/base.py index 34d42064..4b7f3d1e 100644 --- a/testgen/generators/base.py +++ b/testgen/generators/base.py @@ -68,7 +68,7 @@ def generateTestHashValues(self, testdata): # Create the 32 byte hasn, consisten with Javascript hasher = hashlib.sha1() - hasher.update(test_no_label_string.encode("utf_8")) + hasher.update(test_no_label_string.encode("utf-8")) hex_digest = hasher.hexdigest() test['hexhash'] = hex_digest @@ -82,7 +82,7 @@ def saveJsonFile(self, filename, data, indent=None): filename) output_path = os.path.join(self.icu_version, filename) - output_file = open(output_path, "w", encoding="utf_8") + output_file = open(output_path, "w", encoding="utf-8") json.dump(data, output_file, indent=indent) output_file.close() @@ -132,7 +132,7 @@ def readFile(self, filename, version="", filetype="txt"): if version: path = os.path.join(version, filename) try: - with open(path, "r", encoding="utf_8") as testdata: + with open(path, "r", encoding="utf-8") as testdata: return json.load(testdata) if filetype == "json" else testdata.read() except BaseException as err: logging.warning("** readFile: %s", err) diff --git a/testgen/generators/collation_short.py b/testgen/generators/collation_short.py index 6523e17a..45fe2062 100644 --- a/testgen/generators/collation_short.py +++ b/testgen/generators/collation_short.py @@ -99,7 +99,7 @@ def insert_collation_header(self, test_objs): ) # ??? Pass in the attributes defined, adding them to each test ?? - def check_parse_compare(self, line_index, lines): + def check_parse_compare(self, line_index, lines, filename): # Handles lines in a compare region # Test sections ("* compare") are terminated by # definitions of new collators, changing attributes, or new test sections. @@ -124,7 +124,8 @@ def check_parse_compare(self, line_index, lines): string1 = '' line_index += 1 while line_index < len(lines): - line_in = lines[line_index] + # Use Unicode escapes rather than byte escapes + line_in = lines[line_index].replace('\\x', '\\u00') # Time to end this set of comparison tests. if any([p.match(line_in) for p in breakout_patterns]): @@ -153,6 +154,7 @@ def check_parse_compare(self, line_index, lines): 'compare_type': compare_type, 's1': string1, 's2': string2, + 'source_file': filename, 'line': line_index, } @@ -193,7 +195,8 @@ def check_parse_rule(self, line_index, lines): ] while line_index < len(lines): line_index += 1 - line_in = lines[line_index] + # Use Unicode escapes rather than byte escapes + line_in = lines[line_index].replace('\\x', '\\u00') # Is it time to end this set of rule lines? if any([p.match(line_in) for p in breakout_patterns]): @@ -282,7 +285,8 @@ def generateCollTestData2(self, filename, icu_version, start_count=0): # Handle rules section, to be applied in subsequent tests # reset_rules will only be TRUE if new rule set is returned # Otherwise, the line didn't include "@ rules" - reset_rules, new_rules, new_rule_comments, line_number = self.check_parse_rule(line_number, raw_testdata_list) + reset_rules, new_rules, new_rule_comments, line_number = self.check_parse_rule(line_number, + raw_testdata_list) if reset_rules: # Reset test parameters rule_comments = new_rule_comments # Not used! @@ -316,7 +320,9 @@ def generateCollTestData2(self, filename, icu_version, start_count=0): # Check if this is the start of a *compare* section. If so, get the next set of tests # ??? Can we pass in other info, e.g., the rules, attributes, locale, etc? - new_tests, line_number, conversion_errors = self.check_parse_compare(line_number, raw_testdata_list) + new_tests, line_number, conversion_errors = self.check_parse_compare(line_number, + raw_testdata_list, + filename) if new_tests: # Fill in the test cases found @@ -376,9 +382,7 @@ def generateCollTestData2(self, filename, icu_version, start_count=0): logging.info("Coll Test: %s (%s) %d lines processed", filename, icu_version, len(test_list)) return test_list, verify_list, encode_errors - def generateCollTestDataObjects( - self, filename, icu_version, ignorePunctuation, start_count=0 - ): + def generateCollTestDataObjects(self, filename, icu_version, ignorePunctuation, start_count=0): test_list = [] verify_list = [] data_errors = [] # Items with malformed Unicode @@ -423,7 +427,7 @@ def generateCollTestDataObjects( continue label = str(count).rjust(max_digits, "0") - new_test = {"label": label, "s1": prev, "s2": next, "line": line_number} + new_test = {"label": label, "s1": prev, "s2": next, "line": line_number, "source_file": filename} if ignorePunctuation: new_test["ignorePunctuation"] = True test_list.append(new_test)