Skip to content

Commit

Permalink
Node and collation: fix errors, add 'actual_options' (unicode-org#364)
Browse files Browse the repository at this point in the history
  • Loading branch information
sven-oly authored Dec 19, 2024
1 parent 3db6051 commit 9f2e051
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 21 deletions.
27 changes: 19 additions & 8 deletions executors/node/collator.js
Original file line number Diff line number Diff line change
Expand Up @@ -57,18 +57,29 @@ module.exports = {
outputLine['error_detail'] = 'No rules';
outputLine['error'] = 'rules';
}
else if (compare_type) {
outputLine['unsupported'] = 'Compare type not supported';
outputLine['error_detail'] = 'No comparison';
outputLine['error'] = 'compare_type';
else {
outputLine['actual_options'] = JSON.stringify(coll.resolvedOptions()); //.toString();
outputLine['compare_result'] = compared;
outputLine['result'] = result_bool;
}
}

} catch (error) {
outputLine = {'label': json['label'],
'error_message': 'LABEL: ' + json['label'] + ' ' + error.message,
'error': 'Collator compare failed'
};
const error_message = error.message;

if (testLocale == "root" || error_message == "Incorrect locale information provided") {
outputLine = {'label': json['label'],
'unsupported': 'root locale',
'error_detail': error_message + ': ' + testLocale,
'error': 'Unsupported locale'
};
} else {
outputLine = {'label': json['label'],
'error_message': error_message,
'error_detail': testLocale,
'error': 'Something wrong'
};
}
}
return outputLine;
}
Expand Down
2 changes: 1 addition & 1 deletion schema/collation_short/result_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
"type": "string"
},
"actual_options": {
"description": "What was sent to the collation function",
"description": "Options used by collation as a string",
"type": "string"
},
"input_data": {
Expand Down
6 changes: 3 additions & 3 deletions testgen/generators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def generateTestHashValues(self, testdata):

# Create the 32 byte hasn, consisten with Javascript
hasher = hashlib.sha1()
hasher.update(test_no_label_string.encode("utf_8"))
hasher.update(test_no_label_string.encode("utf-8"))
hex_digest = hasher.hexdigest()
test['hexhash'] = hex_digest

Expand All @@ -82,7 +82,7 @@ def saveJsonFile(self, filename, data, indent=None):
filename)

output_path = os.path.join(self.icu_version, filename)
output_file = open(output_path, "w", encoding="utf_8")
output_file = open(output_path, "w", encoding="utf-8")
json.dump(data, output_file, indent=indent)
output_file.close()

Expand Down Expand Up @@ -132,7 +132,7 @@ def readFile(self, filename, version="", filetype="txt"):
if version:
path = os.path.join(version, filename)
try:
with open(path, "r", encoding="utf_8") as testdata:
with open(path, "r", encoding="utf-8") as testdata:
return json.load(testdata) if filetype == "json" else testdata.read()
except BaseException as err:
logging.warning("** readFile: %s", err)
Expand Down
22 changes: 13 additions & 9 deletions testgen/generators/collation_short.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def insert_collation_header(self, test_objs):
)

# ??? Pass in the attributes defined, adding them to each test ??
def check_parse_compare(self, line_index, lines):
def check_parse_compare(self, line_index, lines, filename):
# Handles lines in a compare region
# Test sections ("* compare") are terminated by
# definitions of new collators, changing attributes, or new test sections.
Expand All @@ -124,7 +124,8 @@ def check_parse_compare(self, line_index, lines):
string1 = ''
line_index += 1
while line_index < len(lines):
line_in = lines[line_index]
# Use Unicode escapes rather than byte escapes
line_in = lines[line_index].replace('\\x', '\\u00')

# Time to end this set of comparison tests.
if any([p.match(line_in) for p in breakout_patterns]):
Expand Down Expand Up @@ -153,6 +154,7 @@ def check_parse_compare(self, line_index, lines):
'compare_type': compare_type,
's1': string1,
's2': string2,
'source_file': filename,
'line': line_index,
}

Expand Down Expand Up @@ -193,7 +195,8 @@ def check_parse_rule(self, line_index, lines):
]
while line_index < len(lines):
line_index += 1
line_in = lines[line_index]
# Use Unicode escapes rather than byte escapes
line_in = lines[line_index].replace('\\x', '\\u00')

# Is it time to end this set of rule lines?
if any([p.match(line_in) for p in breakout_patterns]):
Expand Down Expand Up @@ -282,7 +285,8 @@ def generateCollTestData2(self, filename, icu_version, start_count=0):
# Handle rules section, to be applied in subsequent tests
# reset_rules will only be TRUE if new rule set is returned
# Otherwise, the line didn't include "@ rules"
reset_rules, new_rules, new_rule_comments, line_number = self.check_parse_rule(line_number, raw_testdata_list)
reset_rules, new_rules, new_rule_comments, line_number = self.check_parse_rule(line_number,
raw_testdata_list)
if reset_rules:
# Reset test parameters
rule_comments = new_rule_comments # Not used!
Expand Down Expand Up @@ -316,7 +320,9 @@ def generateCollTestData2(self, filename, icu_version, start_count=0):

# Check if this is the start of a *compare* section. If so, get the next set of tests
# ??? Can we pass in other info, e.g., the rules, attributes, locale, etc?
new_tests, line_number, conversion_errors = self.check_parse_compare(line_number, raw_testdata_list)
new_tests, line_number, conversion_errors = self.check_parse_compare(line_number,
raw_testdata_list,
filename)

if new_tests:
# Fill in the test cases found
Expand Down Expand Up @@ -376,9 +382,7 @@ def generateCollTestData2(self, filename, icu_version, start_count=0):
logging.info("Coll Test: %s (%s) %d lines processed", filename, icu_version, len(test_list))
return test_list, verify_list, encode_errors

def generateCollTestDataObjects(
self, filename, icu_version, ignorePunctuation, start_count=0
):
def generateCollTestDataObjects(self, filename, icu_version, ignorePunctuation, start_count=0):
test_list = []
verify_list = []
data_errors = [] # Items with malformed Unicode
Expand Down Expand Up @@ -423,7 +427,7 @@ def generateCollTestDataObjects(
continue

label = str(count).rjust(max_digits, "0")
new_test = {"label": label, "s1": prev, "s2": next, "line": line_number}
new_test = {"label": label, "s1": prev, "s2": next, "line": line_number, "source_file": filename}
if ignorePunctuation:
new_test["ignorePunctuation"] = True
test_list.append(new_test)
Expand Down

0 comments on commit 9f2e051

Please sign in to comment.