diff --git a/verifier/detail_template.html b/verifier/detail_template.html index 88f88dd4..96941a42 100644 --- a/verifier/detail_template.html +++ b/verifier/detail_template.html @@ -100,34 +100,42 @@ // Stores info from the json data fetched asynchronously. let test_results = { 'pass': { - json: null, + json: null, // JSON characterized data + all_labels: new Set(), // All the labels for this category count: 0, characterized: null, - check_boxes: [], + check_boxes: [], // The widgets associated with characterized items + widget_label_sets: [], // A parallel array of sets of labels for each. box_labels: [], selected_set: null }, 'fail': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null }, 'error': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null }, 'unsupported': { json: null, + all_labels: new Set(), count: 0, characterized: null, check_boxes: [], + widget_label_sets: [], box_labels: [], selected_set: null, } @@ -135,9 +143,6 @@ // Get the JSON data from the tests. - // Characterizations of non-passing. - let fail_char; - // Data dynamically created from test failures // on selection of some criteria. let p1 = fetch('./pass.json'); @@ -150,134 +155,146 @@ let p_error_char = fetch('./error_characterized.json') let p_unsupported_char = fetch('./unsupported_characterized.json') - // Synchronize all the data loading and charts / pagination - Promise.all([ - p1.then((response) => response.json()) - .then((data) => { - test_results['pass'].json = data}), - p2.then((response) => response.json()) - .then((data) => { - test_results['fail'].json = data}), - p3.then((response) => response.json()) - .then((data) => { - test_results['error'].json = data}), - p4.then((response) => response.json()) - .then((data) => { - test_results['unsupported'].json = data}), - - p_pass_char.then((response) => response.json()) - .then((data) => { - test_results['pass'].characterized = data}), - p_fail_char.then((response) => response.json()) - .then((data) => { - test_results['fail'].characterized = data}), - p_error_char.then((response) => response.json()) - .then((data) => { - test_results['error'].characterized = data}), - p_unsupported_char.then((response) => response.json()) - .then((data) => { - test_results['unsupported'].characterized = data}), - - new Promise((resolve, reject) => { - $(document).ready(resolve); - }) - ]).then(([p1, p2, p3, p4, _ready]) => { + // Synchronize all the data loading and charts / pagination + Promise.all([ + p1.then((response) => response.json()) + .then((data) => { + test_results['pass'].json = data}), + p2.then((response) => response.json()) + .then((data) => { + test_results['fail'].json = data}), + p3.then((response) => response.json()) + .then((data) => { + test_results['error'].json = data}), + p4.then((response) => response.json()) + .then((data) => { + test_results['unsupported'].json = data}), + + // TODO: Make a separate promise.all for this? + p_pass_char.then((response) => response.json()) + .then((data) => { + test_results['pass'].characterized = data}), + p_fail_char.then((response) => response.json()) + .then((data) => { + test_results['fail'].characterized = data}), + p_error_char.then((response) => response.json()) + .then((data) => { + test_results['error'].characterized = data}), + p_unsupported_char.then((response) => response.json()) + .then((data) => { + test_results['unsupported'].characterized = data}), + + new Promise((resolve, reject) => { + $(document).ready(resolve); + }) + ]).then(([p1, p2, p3, p4, _ready]) => { // Load the Visualization API and the corechart package. google.charts.load('current', {'packages':['corechart']}); // Set a callback to run when the Google Visualization API is loaded. google.charts.setOnLoadCallback(drawChart); onloadFn(); // To call the pagination setup. - }); - - // Callback that creates and populates a data table - // Instantiates it with data, and draws it. - // TODO: Update with all the data loaded - function drawChart() { - // For each test type: - // Get a div for the chart - let chart_output_area = document.getElementById('chart_div'); - let input_data = [ - ['Results', 'Count', {role:'style'}], - ['Passing', test_results['pass'].json.length, '#44ff77'], - ['Failing', test_results['fail'].json.length, '#ff0000'], - ['Errors', test_results['error'].json.length, '#ffdd00'], - ['Unsupported', test_results['unsupported'].json.length, '#777777'] - ]; - const chart = new google.visualization.BarChart(chart_output_area); - let chart_data = google.visualization.arrayToDataTable(input_data); - if (chart && chart_data) { - let chart_options = { - legend: {position: 'bottom', maxLines: 3}, - isStacked: true, - width: 600, height:200, bar: {groupWidth: '90%' } - }; - chart.draw(chart_data, chart_options); - } - } - - let selectedSet = null; - let intersection_labels = []; - let accum_boxes = []; - function checkboxChanged(box) { - // Update the group selected with the intersection of values - const box_class = box.className; - const test_data = test_results[box_class]; - test_data.selected_set = null; - - // TODO: Finish this - let characterized_labels = test_data.box_labels; - for (let check_box of test_data.check_boxes) { - // For each checkbox - if (check_box.checked) { - const labels_string = check_box.value; - const str_len = labels_string.length; - const labels = labels_string.split(","); - const newSet = new Set(labels); - if (! test_data.selected_set) { - // Start the set of selected items. - test_data.selected_set = newSet; - } else { - // Update by intersection with this set. - test_data.selected_set = - new Set([...test_data.selected_set].filter((x) => newSet.has(x))); - } - } + }); + + // Callback that creates and populates a data table + // Instantiates it with data, and draws it. + function drawChart() { + // For each test type: + // Get a div for the chart + let chart_output_area = document.getElementById('chart_div'); + let input_data = [ + ['Results', 'Count', {role:'style'}], + ['Passing', test_results['pass'].json.length, '#44ff77'], + ['Failing', test_results['fail'].json.length, '#ff0000'], + ['Errors', test_results['error'].json.length, '#ffdd00'], + ['Unsupported', test_results['unsupported'].json.length, '#777777'] + ]; + const chart = new google.visualization.BarChart(chart_output_area); + let chart_data = google.visualization.arrayToDataTable(input_data); + if (chart && chart_data) { + let chart_options = { + legend: {position: 'bottom', maxLines: 3}, + isStacked: true, + width: 600, height:200, bar: {groupWidth: '90%' } + }; + chart.draw(chart_data, chart_options); } - // Do something with the selected set. - // Get id of the correct count item. - const class_name = box_class; // get from a parent. - const element_name = 'selectedCount'; - const selected_count_items = document.getElementsByName(element_name); + } - // Spread this out into an array for filtering. + // Constants for display of tristate widget + const question_mark = '\u2753'; + const check_mark = '\u2705'; + const cross_mark = '\u274C'; + + function widgetChanged(widget) { + // Update the group selected with the intersection or inclusion of label sets + const class_name = widget.className; + const test_data = test_results[class_name]; + + // Start with all the tests, intersecting or removing data as needed. + test_data.selected_set = new Set(test_data.all_labels); + + const selected_count_items = document.getElementsByName('selectedCount'); + + // Update the number of results size + let newSize = test_data.selected_set == null ? 0 : test_data.selected_set.size; const output = [...selected_count_items].filter(elem => elem.className == class_name); - const newSize = test_data.selected_set == null ? 0 : test_data.selected_set.size; + if (output) { + // Set the current count of items + output[0].innerHTML = output[0].innerText = newSize; + } + + let excluded_set = new Set(); // Remove these label after the loop. + + // For each widget, if active, update the state of the selected and exclued sets + for (let index in test_data.check_boxes) { + const widget = test_data.check_boxes[index]; + const label_set = test_data.widget_label_sets[index]; + + // This is a tristate item. + const which_state = widget.value; + + // Choose the value depending on the status of the control + if (which_state == check_mark) { + // Update by intersection with this set. + test_data.selected_set = + new Set([...test_data.selected_set].filter((x) => label_set.has(x))); + } + else if (which_state == cross_mark) { + // Update by removing all these labels, i.e., intersect with the inverse + for (let label of label_set) { + excluded_set.add(label); + } + } + } + + // Remove the excluded values from selected set. + excluded_set.forEach((label) => { + test_data.selected_set.delete(label); + }); + // Get update the count item in the UI + newSize = test_data.selected_set.size; + if (output) { + // Set the current count of items + output[0].innerHTML = output[0].innerText = newSize; + } + // Get the characterized data from the correct set of items if (newSize == 0) { - // Turn on all the check boxes in this group - for (let index in test_data.check_boxes) { - const check_item = test_data.check_boxes[index]; - const check_div = check_item.parentElement; - check_div.style.display = 'block'; - check_item.checked = false; - check_div.attributeStyleMap.clear(); - } - // Reset the count to all? + // Reset all widgets in this group + clearSelectedItems(null, class_name); } else { - // Consider all the sets that intersect with selected. - let intersection_checkboxes = []; + // Find all the sets that intersect with the selected. for (let index in test_data.check_boxes) { - const check_item = test_data.check_boxes[index]; - const label_string = check_item.value; - const str_len = label_string.length; - const labels = label_string.split(","); - const newSet = new Set(labels); - let intersectSet = new Set([...newSet].filter(i => test_data.selected_set.has(i))); - - const div_for_checkbox = check_item.parentElement; + const widget = test_data.check_boxes[index]; + const label_set = test_data.widget_label_sets[index]; + + // Compute overlap between the selected set and this widgetss data. + let intersectSet = new Set( + [...label_set].filter(i => test_data.selected_set.has(i))); + + const div_for_checkbox = widget.parentElement; if (intersectSet.size > 0) { - intersection_checkboxes.push(check_item); div_for_checkbox.style.setProperty("text-decoration", ""); div_for_checkbox.attributeStyleMap.clear(); } else { @@ -285,10 +302,6 @@ } } } - if (output) { - // Set the current count of items - output[0].innerHTML = output[0].innerText = newSize; - } return newSize; } @@ -298,11 +311,11 @@ for (const item of test_results[item_type].json) { if (test_results[item_type].selected_set) { - let label = item['label']; - if (test_results[item_type].selected_set.has(label)) { - selected_json_data.push(item); + let label = item['label']; + if (test_results[item_type].selected_set.has(label)) { + selected_json_data.push(item); + } } - } } fill_pagination("#characterized-pagination-container_" + item_type, "#characterized-data-container_" + item_type, @@ -311,37 +324,37 @@ } // UL Template for pagination.js - function simpleTemplating(data, c_type) { - let possible_fields = ['label', 'expected', 'result', 'error', 'error_detail', - 'options', 'input_data', 'actual_options']; - let table_opening = - ''; + function simpleTemplating(data, c_type) { + let possible_fields = ['label', 'expected', 'result', 'error', 'error_detail', + 'options', 'input_data', 'actual_options']; + let table_opening = + '
'; let html = [table_opening]; // Sets up table if (data.length > 0) { - html.push(''); - for (let key of possible_fields) { - if (key in data[0]) { - html.push(''); - } - } - - html.push(''); - $.each(data, function(index, item){ - html.push(""); - for (let key of possible_fields) { - if (key in data[0]) { - let output; - if (typeof item[key] == 'object') { - output = JSON.stringify(item[key]); - } else { - output = item[key]; - } - html.push(''); - } - } - html.push(""); - }); + html.push(''); + for (let key of possible_fields) { + if (key in data[0]) { + html.push(''); + } + } + + html.push(''); + $.each(data, function(index, item){ + html.push(""); + for (let key of possible_fields) { + if (key in data[0]) { + let output; + if (typeof item[key] == 'object') { + output = JSON.stringify(item[key]); + } else { + output = item[key]; + } + html.push(''); + } + } + html.push(""); + }); } html.push('
' + key +'
' + output +'
' + key +'
' + output +'
'); @@ -349,7 +362,7 @@ } function onloadFn() { - // Set up the pagination of each set of results + // Set up for pagination of each set of results // Do this for each class of results. const container_types = ['pass', 'fail', 'error', 'unsupported']; let total_summary_count = 0; @@ -394,133 +407,195 @@ } - /* set up areas for error and unsupported detail */ - create_checkbox_area('pass'); - create_checkbox_area('fail'); - create_checkbox_area('error'); - create_checkbox_area('unsupported'); + // Tristate controls for each characterization. + create_widget_area('pass', create_tristate_area); + create_widget_area('fail', create_tristate_area); + create_widget_area('error', create_tristate_area); + create_widget_area('unsupported', create_tristate_area); + } + + function fill_pagination(pagination_container_name, + data_container_name, + data_json, container_type) { + let pagination_container = $(pagination_container_name); + let data_container = $(data_container_name); + pagination_container.pagination({ + dataSource: data_json, + pageSize: 10, + showSizeChanger: true, + showGoInput: true, + showGoButton: true, + showNavigator: true, + formatGoInput: 'Go to page <%= input %>', + formatNavigator: '<%= rangeStart %>-<%= rangeEnd %> of <%= totalNumber %> items', + position: 'top', + callback: function(data, pagination) { + // template method of yourself + // Create the HTML for the + var html = simpleTemplating(data, container_type); + data_container.html(html); + } + }); } - function fill_pagination(pagination_container_name, - data_container_name, - data_json, container_type) { - let pagination_container = $(pagination_container_name); - let data_container = $(data_container_name); - pagination_container.pagination({ - dataSource: data_json, - pageSize: 10, - showSizeChanger: true, - showGoInput: true, - showGoButton: true, - showNavigator: true, - formatGoInput: 'Go to page <%= input %>', - formatNavigator: '<%= rangeStart %>-<%= rangeEnd %> of <%= totalNumber %> items', - position: 'top', - callback: function(data, pagination) { - // template method of yourself - // Create the HTML for the - var html = simpleTemplating(data, container_type); - data_container.html(html); - } - }); - } - - function create_checkbox_area(data_class) { - /* Given a set of labels and named characteristics, create a set of checkboxes with - labels for selecting a subset of items. Put this in the proper div. - Create "Other" category for any non-categorized tests. - */ + // ----------------------- + function create_tristate_area( + id, data_class, characterization, count, container) { + // Make a tristate control for the items in this data class + // Reference: https://jsfiddle.net/wf_bitplan_com/941std72/8/ + // Creates this widget, storing the result in the data_class + // + + let box = document.createElement("INPUT"); + box.setAttribute("type", "button"); + box.setAttribute("id", id); + box.setAttribute("name", id); + box.setAttribute("value", "\u2753"); + + // Update the state of this control and handle implications. + box.setAttribute("onclick", "tristate1(this);"); + + box.className = data_class; + let box_label = document.createElement("label"); + box_label.htmlFor = id; + const text = " " + count + ":" + characterization; + box_label.appendChild(document.createTextNode(text)); + + let box_div = document.createElement("div"); + box_div.setAttribute("id", "div_" + id); + box_div.appendChild(box); + box_div.appendChild(box_label); + box_div.appendChild(document.createElement('br')); + + if (container) { + container.appendChild(box_div); + } + return box; + } + + function tristate1(control) { + // Question, Check, Cross + // Get the next state of the control. + tristate(control, question_mark, check_mark, cross_mark); + + // Handle the new state of the tristate or check box. + widgetChanged(control); + } + + /** + * loops thru the given 3 values for the given control + */ + function tristate(control, value1, value2, value3) { + switch (control.value.charAt(0)) { + case value1: + control.value = value2; + break; + case value2: + control.value = value3; + break; + case value3: + control.value = value1; + break; + default: + // display the current value if it's unexpected + alert(control.value); + } + } + + function create_widget_area(data_class, make_widget_function) { + /* Given a set of labels and named characteristics, create a set of checkboxes with + labels for selecting a subset of items. Put this in the proper div. + Create "Other" category for any non-categorized tests. + */ const test_info = test_results[data_class]; + const selected_count_items = document.getElementsByName('selectedCount'); + let leftover_labels = new Set(); - const json = test_info['json']; + const json = test_info.json; for (const item of json) { - leftover_labels.add(item['label']); + const label = item['label']; + test_info.all_labels.add(label); + leftover_labels.add(label); } + // Update the number of results size + const newSize = test_info.all_labels.size; + const output = [...selected_count_items].filter(elem => elem.className == data_class); + if (output) { + output[0].innerHTML = output[0].innerText = newSize; + } + let div_name = data_class + '_characterized'; let container = document.getElementById(div_name); for (const characterization in test_info.characterized) { let id = div_name + "_" + characterization; const dict_values = test_info.characterized[characterization]; - for (const label of dict_values) { - leftover_labels.delete(label); - } let count = 0; + // Dict values could be array items or single values + let all_labels = new Set(); for (let key of Object.keys(dict_values)) { const values = dict_values[key]; if (Array.isArray(values)) { - count += values.length; + count += values.length; + all_labels.add(values); } else { - count += 1; + count += 1; + all_labels.add(values); } } + if (count > 0) { - const new_box = make_characterized_box( - id, dict_values, data_class, characterization, + // Reduce the set of leftovers and make a new widget. + all_labels.forEach((label) => { + leftover_labels.delete(label); + }); + + const new_widget = make_widget_function( + id, data_class, characterization, count, container); - test_info.box_labels.push(characterization); - test_info.check_boxes.push(new_box); + test_info.widget_label_sets.push(all_labels); + test_info.box_labels.push(characterization); + test_info.check_boxes.push(new_widget); } } + + // Handle any non-characterized tests. if (leftover_labels.size > 0) { // Create an item for non-categorized results. const characterization = "others"; let id = div_name + "_" + characterization; - const new_box = make_characterized_box( - id, leftover_labels, data_class, characterization, + const new_widget = make_widget_function( + id, data_class, characterization, leftover_labels.size, container); - + test_info.widget_label_sets.push(leftover_labels); test_info.box_labels.push(characterization); - test_info.check_boxes.push(new_box); + test_info.check_boxes.push(new_widget); } } - /* - Create a check box with the labels for a characterization. - id: data_class plus the characterization - dict_values: the set of labels attached to this check box - data_class: one of "pass", "fail", etc. - characterization: string describing the identified attribute(s) - count: how many are in the list of labels - container: the HTML object containing the div of this checkbox - */ - function make_characterized_box( - id, dict_values, data_class, characterization, count, container) { - let box = document.createElement("INPUT"); - box.setAttribute("type", "checkbox"); - box.setAttribute("id", id); - box.setAttribute("name", id); - box.setAttribute("value", [...dict_values]); - box.setAttribute("onclick", "checkboxChanged(this);"); - box.className = data_class; - let box_label = document.createElement("label"); - box_label.htmlFor = id; - const text = count + ":" + characterization; - box_label.appendChild(document.createTextNode(text)); + function clearSelectedItems(the_button, test_class) { + // Clear all the check boxes for this test_class. + const test_data = test_results[test_class]; + test_data.check_boxes.forEach((widget) => { + widget.value = question_mark; + const div_for_checkbox = widget.parentElement; + div_for_checkbox.attributeStyleMap.clear(); + }); - let box_div = document.createElement("div"); - box_div.setAttribute("id", "div_" + id); - box_div.appendChild(box); - box_div.appendChild(box_label); - box_div.appendChild(document.createElement('br')); + + // reset the number of results size + const selected_count_items = document.getElementsByName('selectedCount'); + const newSize = test_data.selected_set == null ? 0 : test_data.all_labels.size; - if (container) { - container.appendChild(box_div); + const output = [...selected_count_items].filter(elem => elem.className == test_class); + if (output) { + // Set the current count of items + output[0].innerHTML = output[0].innerText = newSize; } - return box; - } - - function clearSelectedItems(the_button, test_class) { - // Clear all the check boxes for this test_class. - const test_data = test_results[test_class]; - test_data.check_boxes.forEach((box) => { - box.checked = false; - const div_for_checkbox = box.parentElement; - div_for_checkbox.attributeStyleMap.clear(); - }); } + // For getting contents of output into json string for testing function captureInputDataOnClick(element) { const text = element.innerHTML; let output = ''; @@ -534,9 +609,9 @@ navigator.clipboard.writeText(output); } - - - + + +

Verification report: $test_type on $library_name

$platform_info

@@ -566,7 +641,7 @@

Test details

Passing tests characterized
-

Filtered count = 0 +

Filtered count = unknown

diff --git a/verifier/testreport.py b/verifier/testreport.py index e64858e7..7ec9d08d 100644 --- a/verifier/testreport.py +++ b/verifier/testreport.py @@ -81,7 +81,6 @@ def __init__(self, report_path, report_html_path): self.verifier_obj = None - self.timestamp = None self.results = None self.verify = None @@ -512,6 +511,8 @@ def characterize_failures_by_options(self, failing_tests): results['locale'] = {} # Dictionary of labels for each locale for test in failing_tests: # Get input_data, if available + input_data = test.get('input_data') + try: label = test['label'] except: @@ -533,65 +534,60 @@ def characterize_failures_by_options(self, failing_tests): except: locale = None if locale: - if locale in results['locale']: - results['locale'][locale].add(label) - else: - results['locale'][locale] = set([label]) + if locale not in results['locale']: + results['locale'][locale] = set() + results['locale'][locale].add(label) options = input_data.get('options') if options: # Get each combo of key/value - for key, value in options.items(): - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - - # Try fields in language_names - for key in ['language_label', 'locale_label']: - try: - if input_data.get(key): - value = input_data[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue - - # Try fields in likely_subtags - for key in ['option', 'locale']: - try: - if input_data.get(key): - value = input_data[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue - - for key in ['language_label', 'ignorePunctuation', 'compare_result', 'compare_type', 'test_description']: - try: - if test.get(key): # For collation results - value = test[key] - if key not in results: - results[key] = {} - if value in results[key]: - results[key][value].add(label) - else: - results[key][value] = set(label) - except: - continue + for k, value in options.items(): + if k not in results: + results[k] = {} + if value not in results[k]: + results[k][value] = set() + results[k][value].add(label) + + # Try fields in language_names + for key in ['language_label', 'locale_label']: + try: + if input_data.get(key): + value = input_data[key] + if key not in results: + results[key] = {} + if value in results[key]: + results[key][value] = set() + results[key][value].add(label) + except: + continue + + # Try fields in likely_subtags + for key in ['option', 'locale']: + try: + if input_data.get(key): + value = input_data[key] + if key not in results: + results[key] = {} + if value not in results[key]: + results[key][value] = set() + results[key][value].add(label) + except: + continue + + for key in ['language_label', 'ignorePunctuation', 'compare_result', 'compare_type', 'test_description']: + try: + if test.get(key): # For collation results + value = test[key] + if key not in results: + results[key] = {} + if value not in results[key]: + results[key][value] = set() + results[key][value] = set(label) + except: + continue # Look at the input_data part of the test result - # TODO: Check the error_detail and error pars, too. + # TODO: Check the error_detail and error parts, too. key_list = [ 'compare_type', 'error_detail', @@ -603,7 +599,7 @@ def characterize_failures_by_options(self, failing_tests): 'test_description', 'unsupported_options', ] - input_data = test.get('input_data') + self.add_to_results_by_key(label, results, input_data, test, key_list) # Special case for input_data / options. @@ -630,7 +626,7 @@ def add_to_results_by_key(self, label, results, input_data, test, key_list): if input_data: for key in key_list: try: - if (input_data.get(key)): # For collation results + if input_data.get(key): # For collation results value = input_data.get(key) if key == 'rules': value = 'RULE' # A special case to avoid over-characterization @@ -733,7 +729,7 @@ def check_simple_text_diffs(self): if x[2] in ['+', '0', '+0']: results['exponent_diff'].add(label) - # Check for substtituted types of parentheses, brackets, brackes + # Check for substituted types of parentheses, brackets, braces if '[' in expected and '(' in actual: actual_parens = actual.replace('(', '[').replace(')', ']') if actual_parens == expected: @@ -757,7 +753,8 @@ def save_characterized_file(self, characterized_data, characterized_type): file.write(json_data) file.close() except BaseException as error: - logging.error("CANNOT WRITE CHARACTERIZE FILE FOR %s at ", characterized_type, character_file_path) + logging.error("%s: CANNOT WRITE CHARACTERIZE FILE FOR %s at ", + error, characterized_type, character_file_path) return def create_html_diff_report(self): @@ -819,9 +816,9 @@ def summarize_failures(self): logging.info('--------- %s %s %d failures-----------', self.exec, self.test_type, len(self.failing_tests)) logging.debug(' SINGLE SUBSTITUTIONS: %s', - sort_dict_by_count(self.diff_summary.single_diffs)) + sort_dict_by_count(self.diff_summary.single_diffs)) logging.debug(' PARAMETER DIFFERENCES: %s', - sort_dict_by_count(self.diff_summary.params_diff)) + sort_dict_by_count(self.diff_summary.params_diff)) def analyze_simple(self, test): # This depends on test_type @@ -968,21 +965,21 @@ def summarize_reports(self): executor = '' icu_version = os.path.basename(os.path.dirname(dir_path)) - results = defaultdict(lambda : defaultdict(list)) + results = defaultdict(lambda: defaultdict(list)) test_type = None try: executor = test_environment['test_language'] test_type = test_environment['test_type'] if 'cldr_version' in platform: - cldrVersion = platform['cldrVersion'] + cldr_version = platform['cldrVersion'] else: - cldrVersion = 'unspecified' + cldr_version = 'unspecified' test_results = { 'exec': executor, 'exec_version': '%s_%s\n%s' % (executor, platform['platformVersion'], icu_version), 'exec_icu_version': platform['icuVersion'], - 'exec_cldr_version': cldrVersion, + 'exec_cldr_version': cldr_version, 'test_type': test_type, 'date_time': test_environment['datetime'], 'test_count': int(test_environment['test_count']), @@ -1008,7 +1005,7 @@ def summarize_reports(self): try: # Categorize by executor and test_type # TODO: Add detail of version, too - test_version_info = test_results['version'] + test_version_info = test_results['version'] slot = '%s_%s' % (executor, test_version_info['platformVersion']) if executor not in self.exec_summary: # TESTING @@ -1024,19 +1021,7 @@ def summarize_reports(self): except BaseException as err: logging.error('SUMMARIZE REPORTS in exec_summary %s, %s. Error: %s', - executor, test_type, err) - - def get_stats(self, entry): - # Process items in a map to give HTML table value - out_list = [ - 'Test count: %s' % '{:,}'.format(entry['test_count']), - 'Succeeded: %s' % '{:,}'.format(entry['pass_count']), - 'Failed: %s' % '{:,}'.format(entry['fail_count']), - 'Unsupported: %s' % '{:,}'.format(entry['error_count']), - 'Missing verify: %s' % '{:,}'.format(entry['missing_verify_count']), - 'Details' % entry['html_file_name'] - ] - return ' \n
'.join(out_list) + '' + executor, test_type, err) def create_summary_html(self): # Generate HTML page containing this information @@ -1101,7 +1086,7 @@ def create_summary_html(self): {'report_detail': icu_version_and_link}) except BaseException as err: logging.error('&&& TEST: %s, EXEC: %s, row_items: %s, index: %s. Error = %s', - test, executor, row_items, index, error) + test, executor, row_items, index, err) index += 1 data_rows.append(self.line_template.safe_substitute( diff --git a/verifier/verifier.py b/verifier/verifier.py index 04132e89..9b056f38 100644 --- a/verifier/verifier.py +++ b/verifier/verifier.py @@ -64,6 +64,7 @@ def open_verify_files(self, vplan): vplan.result_file = open(vplan.result_path, encoding='utf-8', mode='r') file_time = os.path.getmtime(vplan.result_path) vplan.result_time_stamp = datetime.datetime.fromtimestamp(file_time).strftime('%Y-%m-%d %H:%M') + vplan.report.timestamp = vplan.result_time_stamp except BaseException as err: logging.error(' *** Cannot open results file %s:\n %s', vplan.result_path, err) return None diff --git a/verifier/verify_plan.py b/verifier/verify_plan.py index b40d7c66..8d28de8c 100644 --- a/verifier/verify_plan.py +++ b/verifier/verify_plan.py @@ -51,7 +51,7 @@ def read_verify_files(self): file_time = os.path.getmtime(self.result_path) self.result_time_stamp = datetime.datetime.fromtimestamp( file_time).strftime('%Y-%m-%d %H:%M') - + self.report.timestamp = self.result_time_stamp self.resultData = json.loads(result_file.read()) self.test_results = self.resultData['tests'] except BaseException as err: