diff --git a/verifier/detail_template.html b/verifier/detail_template.html index 88f88dd4..96941a42 100644 --- a/verifier/detail_template.html +++ b/verifier/detail_template.html @@ -100,34 +100,42 @@ // Stores info from the json data fetched asynchronously. let test_results = { 'pass': { - json: null, + json: null, // JSON characterized data + all_labels: new Set(), // All the labels for this category count: 0, characterized: null, - check_boxes: [], + check_boxes: [], // The widgets associated with characterized items + widget_label_sets: [], // A parallel array of sets of labels for each. box_labels: [], selected_set: null }, 'fail': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null }, 'error': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null }, 'unsupported': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null, } @@ -135,9 +143,6 @@ // Get the JSON data from the tests. - // Characterizations of non-passing. - let fail_char; - // Data dynamically created from test failures // on selection of some criteria. let p1 = fetch('./pass.json'); @@ -150,134 +155,146 @@ let p_error_char = fetch('./error_characterized.json') let p_unsupported_char = fetch('./unsupported_characterized.json') - // Synchronize all the data loading and charts / pagination - Promise.all([ - p1.then((response) => response.json()) - .then((data) => { - test_results['pass'].json = data}), - p2.then((response) => response.json()) - .then((data) => { - test_results['fail'].json = data}), - p3.then((response) => response.json()) - .then((data) => { - test_results['error'].json = data}), - p4.then((response) => response.json()) - .then((data) => { - test_results['unsupported'].json = data}), - - p_pass_char.then((response) => response.json()) - .then((data) => { - test_results['pass'].characterized = data}), - p_fail_char.then((response) => response.json()) - .then((data) => { - test_results['fail'].characterized = data}), - p_error_char.then((response) => response.json()) - .then((data) => { - test_results['error'].characterized = data}), - p_unsupported_char.then((response) => response.json()) - .then((data) => { - test_results['unsupported'].characterized = data}), - - new Promise((resolve, reject) => { - $(document).ready(resolve); - }) - ]).then(([p1, p2, p3, p4, _ready]) => { + // Synchronize all the data loading and charts / pagination + Promise.all([ + p1.then((response) => response.json()) + .then((data) => { + test_results['pass'].json = data}), + p2.then((response) => response.json()) + .then((data) => { + test_results['fail'].json = data}), + p3.then((response) => response.json()) + .then((data) => { + test_results['error'].json = data}), + p4.then((response) => response.json()) + .then((data) => { + test_results['unsupported'].json = data}), + + // TODO: Make a separate promise.all for this? + p_pass_char.then((response) => response.json()) + .then((data) => { + test_results['pass'].characterized = data}), + p_fail_char.then((response) => response.json()) + .then((data) => { + test_results['fail'].characterized = data}), + p_error_char.then((response) => response.json()) + .then((data) => { + test_results['error'].characterized = data}), + p_unsupported_char.then((response) => response.json()) + .then((data) => { + test_results['unsupported'].characterized = data}), + + new Promise((resolve, reject) => { + $(document).ready(resolve); + }) + ]).then(([p1, p2, p3, p4, _ready]) => { // Load the Visualization API and the corechart package. google.charts.load('current', {'packages':['corechart']}); // Set a callback to run when the Google Visualization API is loaded. google.charts.setOnLoadCallback(drawChart); onloadFn(); // To call the pagination setup. - }); - - // Callback that creates and populates a data table - // Instantiates it with data, and draws it. - // TODO: Update with all the data loaded - function drawChart() { - // For each test type: - // Get a div for the chart - let chart_output_area = document.getElementById('chart_div'); - let input_data = [ - ['Results', 'Count', {role:'style'}], - ['Passing', test_results['pass'].json.length, '#44ff77'], - ['Failing', test_results['fail'].json.length, '#ff0000'], - ['Errors', test_results['error'].json.length, '#ffdd00'], - ['Unsupported', test_results['unsupported'].json.length, '#777777'] - ]; - const chart = new google.visualization.BarChart(chart_output_area); - let chart_data = google.visualization.arrayToDataTable(input_data); - if (chart && chart_data) { - let chart_options = { - legend: {position: 'bottom', maxLines: 3}, - isStacked: true, - width: 600, height:200, bar: {groupWidth: '90%' } - }; - chart.draw(chart_data, chart_options); - } - } - - let selectedSet = null; - let intersection_labels = []; - let accum_boxes = []; - function checkboxChanged(box) { - // Update the group selected with the intersection of values - const box_class = box.className; - const test_data = test_results[box_class]; - test_data.selected_set = null; - - // TODO: Finish this - let characterized_labels = test_data.box_labels; - for (let check_box of test_data.check_boxes) { - // For each checkbox - if (check_box.checked) { - const labels_string = check_box.value; - const str_len = labels_string.length; - const labels = labels_string.split(","); - const newSet = new Set(labels); - if (! test_data.selected_set) { - // Start the set of selected items. - test_data.selected_set = newSet; - } else { - // Update by intersection with this set. - test_data.selected_set = - new Set([...test_data.selected_set].filter((x) => newSet.has(x))); - } - } + }); + + // Callback that creates and populates a data table + // Instantiates it with data, and draws it. + function drawChart() { + // For each test type: + // Get a div for the chart + let chart_output_area = document.getElementById('chart_div'); + let input_data = [ + ['Results', 'Count', {role:'style'}], + ['Passing', test_results['pass'].json.length, '#44ff77'], + ['Failing', test_results['fail'].json.length, '#ff0000'], + ['Errors', test_results['error'].json.length, '#ffdd00'], + ['Unsupported', test_results['unsupported'].json.length, '#777777'] + ]; + const chart = new google.visualization.BarChart(chart_output_area); + let chart_data = google.visualization.arrayToDataTable(input_data); + if (chart && chart_data) { + let chart_options = { + legend: {position: 'bottom', maxLines: 3}, + isStacked: true, + width: 600, height:200, bar: {groupWidth: '90%' } + }; + chart.draw(chart_data, chart_options); } - // Do something with the selected set. - // Get id of the correct count item. - const class_name = box_class; // get from a parent. - const element_name = 'selectedCount'; - const selected_count_items = document.getElementsByName(element_name); + } - // Spread this out into an array for filtering. + // Constants for display of tristate widget + const question_mark = '\u2753'; + const check_mark = '\u2705'; + const cross_mark = '\u274C'; + + function widgetChanged(widget) { + // Update the group selected with the intersection or inclusion of label sets + const class_name = widget.className; + const test_data = test_results[class_name]; + + // Start with all the tests, intersecting or removing data as needed. + test_data.selected_set = new Set(test_data.all_labels); + + const selected_count_items = document.getElementsByName('selectedCount'); + + // Update the number of results size + let newSize = test_data.selected_set == null ? 0 : test_data.selected_set.size; const output = [...selected_count_items].filter(elem => elem.className == class_name); - const newSize = test_data.selected_set == null ? 0 : test_data.selected_set.size; + if (output) { + // Set the current count of items + output[0].innerHTML = output[0].innerText = newSize; + } + + let excluded_set = new Set(); // Remove these label after the loop. + + // For each widget, if active, update the state of the selected and exclued sets + for (let index in test_data.check_boxes) { + const widget = test_data.check_boxes[index]; + const label_set = test_data.widget_label_sets[index]; + + // This is a tristate item. + const which_state = widget.value; + + // Choose the value depending on the status of the control + if (which_state == check_mark) { + // Update by intersection with this set. + test_data.selected_set = + new Set([...test_data.selected_set].filter((x) => label_set.has(x))); + } + else if (which_state == cross_mark) { + // Update by removing all these labels, i.e., intersect with the inverse + for (let label of label_set) { + excluded_set.add(label); + } + } + } + + // Remove the excluded values from selected set. + excluded_set.forEach((label) => { + test_data.selected_set.delete(label); + }); + // Get update the count item in the UI + newSize = test_data.selected_set.size; + if (output) { + // Set the current count of items + output[0].innerHTML = output[0].innerText = newSize; + } + // Get the characterized data from the correct set of items if (newSize == 0) { - // Turn on all the check boxes in this group - for (let index in test_data.check_boxes) { - const check_item = test_data.check_boxes[index]; - const check_div = check_item.parentElement; - check_div.style.display = 'block'; - check_item.checked = false; - check_div.attributeStyleMap.clear(); - } - // Reset the count to all? + // Reset all widgets in this group + clearSelectedItems(null, class_name); } else { - // Consider all the sets that intersect with selected. - let intersection_checkboxes = []; + // Find all the sets that intersect with the selected. for (let index in test_data.check_boxes) { - const check_item = test_data.check_boxes[index]; - const label_string = check_item.value; - const str_len = label_string.length; - const labels = label_string.split(","); - const newSet = new Set(labels); - let intersectSet = new Set([...newSet].filter(i => test_data.selected_set.has(i))); - - const div_for_checkbox = check_item.parentElement; + const widget = test_data.check_boxes[index]; + const label_set = test_data.widget_label_sets[index]; + + // Compute overlap between the selected set and this widgetss data. + let intersectSet = new Set( + [...label_set].filter(i => test_data.selected_set.has(i))); + + const div_for_checkbox = widget.parentElement; if (intersectSet.size > 0) { - intersection_checkboxes.push(check_item); div_for_checkbox.style.setProperty("text-decoration", ""); div_for_checkbox.attributeStyleMap.clear(); } else { @@ -285,10 +302,6 @@ } } } - if (output) { - // Set the current count of items - output[0].innerHTML = output[0].innerText = newSize; - } return newSize; } @@ -298,11 +311,11 @@ for (const item of test_results[item_type].json) { if (test_results[item_type].selected_set) { - let label = item['label']; - if (test_results[item_type].selected_set.has(label)) { - selected_json_data.push(item); + let label = item['label']; + if (test_results[item_type].selected_set.has(label)) { + selected_json_data.push(item); + } } - } } fill_pagination("#characterized-pagination-container_" + item_type, "#characterized-data-container_" + item_type, @@ -311,37 +324,37 @@ } // UL Template for pagination.js - function simpleTemplating(data, c_type) { - let possible_fields = ['label', 'expected', 'result', 'error', 'error_detail', - 'options', 'input_data', 'actual_options']; - let table_opening = - '
' + key +' | '); - } - } - - html.push('
---|
' + output +' | '); - } - } - html.push("
' + key +' | '); + } + } + + html.push('
' + output +' | '); + } + } + html.push("
Filtered count = 0 +
Filtered count = unknown
diff --git a/verifier/testreport.py b/verifier/testreport.py index e64858e7..7ec9d08d 100644 --- a/verifier/testreport.py +++ b/verifier/testreport.py @@ -81,7 +81,6 @@ def __init__(self, report_path, report_html_path): self.verifier_obj = None - self.timestamp = None self.results = None self.verify = None @@ -512,6 +511,8 @@ def characterize_failures_by_options(self, failing_tests): results['locale'] = {} # Dictionary of labels for each locale for test in failing_tests: # Get input_data, if available + input_data = test.get('input_data') + try: label = test['label'] except: @@ -533,65 +534,60 @@ def characterize_failures_by_options(self, failing_tests): except: locale = None if locale: - if locale in results['locale']: - results['locale'][locale].add(label) - else: - results['locale'][locale] = set([label]) + if locale not in results['locale']: + results['locale'][locale] = set() + results['locale'][locale].add(label) options = input_data.get('options') if options: # Get each combo of key/value - for key, value in options.items(): - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - - # Try fields in language_names - for key in ['language_label', 'locale_label']: - try: - if input_data.get(key): - value = input_data[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue - - # Try fields in likely_subtags - for key in ['option', 'locale']: - try: - if input_data.get(key): - value = input_data[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue - - for key in ['language_label', 'ignorePunctuation', 'compare_result', 'compare_type', 'test_description']: - try: - if test.get(key): # For collation results - value = test[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue + for k, value in options.items(): + if k not in results: + results[k] = {} + if value not in results[k]: + results[k][value] = set() + results[k][value].add(label) + + # Try fields in language_names + for key in ['language_label', 'locale_label']: + try: + if input_data.get(key): + value = input_data[key] + if key not in results: + results[key] = {} + if value in results[key]: + results[key][value] = set() + results[key][value].add(label) + except: + continue + + # Try fields in likely_subtags + for key in ['option', 'locale']: + try: + if input_data.get(key): + value = input_data[key] + if key not in results: + results[key] = {} + if value not in results[key]: + results[key][value] = set() + results[key][value].add(label) + except: + continue + + for key in ['language_label', 'ignorePunctuation', 'compare_result', 'compare_type', 'test_description']: + try: + if test.get(key): # For collation results + value = test[key] + if key not in results: + results[key] = {} + if value not in results[key]: + results[key][value] = set() + results[key][value] = set(label) + except: + continue # Look at the input_data part of the test result - # TODO: Check the error_detail and error pars, too. + # TODO: Check the error_detail and error parts, too. key_list = [ 'compare_type', 'error_detail', @@ -603,7 +599,7 @@ def characterize_failures_by_options(self, failing_tests): 'test_description', 'unsupported_options', ] - input_data = test.get('input_data') + self.add_to_results_by_key(label, results, input_data, test, key_list) # Special case for input_data / options. @@ -630,7 +626,7 @@ def add_to_results_by_key(self, label, results, input_data, test, key_list): if input_data: for key in key_list: try: - if (input_data.get(key)): # For collation results + if input_data.get(key): # For collation results value = input_data.get(key) if key == 'rules': value = 'RULE' # A special case to avoid over-characterization @@ -733,7 +729,7 @@ def check_simple_text_diffs(self): if x[2] in ['+', '0', '+0']: results['exponent_diff'].add(label) - # Check for substtituted types of parentheses, brackets, brackes + # Check for substituted types of parentheses, brackets, braces if '[' in expected and '(' in actual: actual_parens = actual.replace('(', '[').replace(')', ']') if actual_parens == expected: @@ -757,7 +753,8 @@ def save_characterized_file(self, characterized_data, characterized_type): file.write(json_data) file.close() except BaseException as error: - logging.error("CANNOT WRITE CHARACTERIZE FILE FOR %s at ", characterized_type, character_file_path) + logging.error("%s: CANNOT WRITE CHARACTERIZE FILE FOR %s at ", + error, characterized_type, character_file_path) return def create_html_diff_report(self): @@ -819,9 +816,9 @@ def summarize_failures(self): logging.info('--------- %s %s %d failures-----------', self.exec, self.test_type, len(self.failing_tests)) logging.debug(' SINGLE SUBSTITUTIONS: %s', - sort_dict_by_count(self.diff_summary.single_diffs)) + sort_dict_by_count(self.diff_summary.single_diffs)) logging.debug(' PARAMETER DIFFERENCES: %s', - sort_dict_by_count(self.diff_summary.params_diff)) + sort_dict_by_count(self.diff_summary.params_diff)) def analyze_simple(self, test): # This depends on test_type @@ -968,21 +965,21 @@ def summarize_reports(self): executor = '' icu_version = os.path.basename(os.path.dirname(dir_path)) - results = defaultdict(lambda : defaultdict(list)) + results = defaultdict(lambda: defaultdict(list)) test_type = None try: executor = test_environment['test_language'] test_type = test_environment['test_type'] if 'cldr_version' in platform: - cldrVersion = platform['cldrVersion'] + cldr_version = platform['cldrVersion'] else: - cldrVersion = 'unspecified' + cldr_version = 'unspecified' test_results = { 'exec': executor, 'exec_version': '%s_%s\n%s' % (executor, platform['platformVersion'], icu_version), 'exec_icu_version': platform['icuVersion'], - 'exec_cldr_version': cldrVersion, + 'exec_cldr_version': cldr_version, 'test_type': test_type, 'date_time': test_environment['datetime'], 'test_count': int(test_environment['test_count']), @@ -1008,7 +1005,7 @@ def summarize_reports(self): try: # Categorize by executor and test_type # TODO: Add detail of version, too - test_version_info = test_results['version'] + test_version_info = test_results['version'] slot = '%s_%s' % (executor, test_version_info['platformVersion']) if executor not in self.exec_summary: # TESTING @@ -1024,19 +1021,7 @@ def summarize_reports(self): except BaseException as err: logging.error('SUMMARIZE REPORTS in exec_summary %s, %s. Error: %s', - executor, test_type, err) - - def get_stats(self, entry): - # Process items in a map to give HTML table value - out_list = [ - 'Test count: %s' % '{:,}'.format(entry['test_count']), - 'Succeeded: %s' % '{:,}'.format(entry['pass_count']), - 'Failed: %s' % '{:,}'.format(entry['fail_count']), - 'Unsupported: %s' % '{:,}'.format(entry['error_count']), - 'Missing verify: %s' % '{:,}'.format(entry['missing_verify_count']), - 'Details' % entry['html_file_name'] - ] - return ' \n