Unverified Commit 886921c5 authored by Jeffrey Booher-Kaeding's avatar Jeffrey Booher-Kaeding Committed by GitHub
Browse files

Merge pull request #7 from vstehle/for-jeffrey

From Vincent:
Lowercase change from
JSON output
Sorting command line option
Internal changes, to prepare for configuration/filtering: detect values & combined databases
parents 3276c39d 62552997
# SCT_Parser # SCT_Parser
This is an external Parser script for UEFI SCT. (WIP) This is an external parser script for UEFI SCT. (WIP)
It's designed to read a `.ekl` results log from an UEFI SCT run, and a generated `.seq` from UEFI SCT configurator. It's designed to read a `.ekl` results log from an UEFI SCT run, and a generated `.seq` from UEFI SCT configurator.
...@@ -15,15 +15,22 @@ The output filename can be specified with `--md <filename>`. ...@@ -15,15 +15,22 @@ The output filename can be specified with `--md <filename>`.
An online help is available with the `-h` option. An online help is available with the `-h` option.
### Custom search ### Custom search
For a custom Key:value search, the next two arguments *MUST be included together.* The program will search and display files that met that constraint, without the crosscheck, and display the names, guid, and key:value to the command line. `python3 Parser.py <file.ekl> <file.seq> <search key> <search value>` For a custom Key:value search, the next two arguments *MUST be included together.* The program will search and display files that met that constraint, without the crosscheck, and display the names, guid, and key:value to the command line. `python3 parser.py <file.ekl> <file.seq> <search key> <search value>`
you can use the `test_dict` below to see available keys. you can use the `test_dict` below to see available keys.
### Sorting data
It is possible to sort the tests data before output using
the `--sort <key1,key2,...>` option.
Sorting the test data helps when comparing results with diff.
Example command:
``` {.sh}
$ ./parser.py --sort \
'group,descr,set guid,test set,sub set,guid,name,log' ...
```
## Notes ## Notes
### Known Issues: ### Known Issues:
...@@ -75,3 +82,34 @@ seq_dict = { ...@@ -75,3 +82,34 @@ seq_dict = {
"Order": "some hex/num" "Order": "some hex/num"
} }
``` ```
#### Spurious tests
Spurious tests are tests, which were run according to the log file but were not
meant to be run according to the sequence file.
We force the "result" fields of those tests to "SPURIOUS".
#### Dropped tests sets
Dropped tests sets are the tests sets, which were were meant to be run according
to the sequence file but for which no test have been run according to the log
file.
We create artificial tests entries for those dropped tests sets, with the
"result" fields set to "DROPPED". We convert some fields coming from the
sequence file, and auto-generate others:
``` {.python}
dropped_test_dict = {
"name": "",
"result": "DROPPED",
"group": "Unknown",
"test set": "",
"sub set": <name from sequence file>,
"set guid": <guid from sequence file>,
"revision": <rev from sequence file>,
"guid": "",
"log": ""
}
```
...@@ -6,6 +6,7 @@ import sys ...@@ -6,6 +6,7 @@ import sys
import argparse import argparse
import csv import csv
import logging import logging
import json
#based loosley on https://stackoverflow.com/a/4391978 #based loosley on https://stackoverflow.com/a/4391978
...@@ -164,6 +165,16 @@ def dict_2_md(input_list,file): ...@@ -164,6 +165,16 @@ def dict_2_md(input_list,file):
file.write("\n\n") file.write("\n\n")
# Sort tests data in-place
# sort_keys is a comma-separated list
# The first key has precedence, then the second, etc.
# To use python list in-place sorting, we use the keys in reverse order.
def sort_data(cross_check, sort_keys):
logging.debug(f"Sorting on `{sort_keys}'")
for k in reversed(sort_keys.split(',')):
cross_check.sort(key=lambda x: x[k])
# Generate csv # Generate csv
def gen_csv(cross_check, filename): def gen_csv(cross_check, filename):
# Find keys # Find keys
...@@ -182,17 +193,102 @@ def gen_csv(cross_check, filename): ...@@ -182,17 +193,102 @@ def gen_csv(cross_check, filename):
writer.writerows(cross_check) writer.writerows(cross_check)
# Generate json
def gen_json(cross_check, filename):
logging.debug(f'Generate {filename}')
with open(filename, 'w') as jsonfile:
json.dump(cross_check, jsonfile, sort_keys=True, indent=2)
# Combine or two databases db1 and db2 coming from ekl and seq files
# respectively into a single cross_check database
# Tests in db1, which were not meant to be run according to db2 have their
# results forced to SPURIOUS.
# Tests sets in db2, which were not run according to db1 have an artificial
# test entry created with result DROPPED.
def combine_dbs(db1, db2):
cross_check = db1
# Do a pass to verify that all tests in db1 were meant to be run.
# Otherwise, force the result to SPURIOUS.
s = set()
for x in db2:
s.add(x['guid'])
n = 0
for i in range(len(cross_check)):
if cross_check[i]['set guid'] not in s:
logging.debug(f"Spurious test {i} `{cross_check[i]['name']}'")
cross_check[i]['result'] = 'SPURIOUS'
n += 1
if n:
logging.debug(f'{n} spurious test(s)')
# Do a pass to autodetect all tests fields in case we need to merge dropped
# tests sets entries
f = {}
for x in cross_check:
for k in x.keys():
f[k] = ''
logging.debug(f'Test fields: {f.keys()}')
# Do a pass to find the test sets that did not run for whatever reason.
s = set()
for x in cross_check:
s.add(x['set guid'])
n = 0
for i in range(len(db2)):
x = db2[i]
if not x['guid'] in s:
logging.debug(f"Dropped test set {i} `{x['name']}'")
# Create an artificial test entry to reflect the dropped test set
cross_check.append({
**f,
'sub set': x['name'],
'set guid': x['guid'],
'revision': x['rev'],
'group': 'Unknown',
'result': 'DROPPED',
})
n += 1
if n:
logging.debug(f'{n} dropped test set(s)')
return cross_check
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Process SCT results.' description='Process SCT results.'
' This program takes the SCT summary and sequence files,' ' This program takes the SCT summary and sequence files,'
' and generates a nice report in mardown format.', ' and generates a nice report in mardown format.',
epilog='When sorting is requested, tests data are sorted'
' according to the first sort key, then the second, etc.'
' Sorting happens after update by the configuration rules.'
' Useful example: --sort'
' "group,descr,set guid,test set,sub set,guid,name,log"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--csv', help='Output .csv filename') parser.add_argument('--csv', help='Output .csv filename')
parser.add_argument('--json', help='Output .json filename')
parser.add_argument( parser.add_argument(
'--md', help='Output .md filename', default='result.md') '--md', help='Output .md filename', default='result.md')
parser.add_argument( parser.add_argument(
'--debug', action='store_true', help='Turn on debug messages') '--debug', action='store_true', help='Turn on debug messages')
parser.add_argument(
'--sort', help='Comma-separated list of keys to sort output on')
parser.add_argument( parser.add_argument(
'log_file', nargs='?', default='sample.ekl', 'log_file', nargs='?', default='sample.ekl',
help='Input .ekl filename') help='Input .ekl filename')
...@@ -225,23 +321,34 @@ def main(): ...@@ -225,23 +321,34 @@ def main():
logging.debug('{} test set(s)'.format(len(db2))) logging.debug('{} test set(s)'.format(len(db2)))
#cross check is filled only with tests labled as "run" int the seq file # Produce a single cross_check database from our two db1 and db2 databases.
cross_check = list() cross_check = combine_dbs(db1, db2)
#combine a list of test sets that did not run for whatever reason.
would_not_run = list()
for x in db2: #for each "set guid" in db2
temp_dict = key_value_find(db1,"set guid",x["guid"])#find tests in db1 with given set guid
if bool(temp_dict): #if its not empty, apprend it to our dict
cross_check = (cross_check +temp_dict)
else: #if it is empty, this test set was not run.
would_not_run.append(x)
#search for failures and warnings & passes, # Sort tests data in-place, if requested
if args.sort is not None:
sort_data(cross_check, args.sort)
failures = key_value_find(cross_check,"result","FAILURE") # search for failures, warnings, passes & others
warnings = key_value_find(cross_check,"result","WARNING") # We detect all present keys in additions to the expected ones. This is
passes = key_value_find(cross_check,"result","PASS") # handy with config rules overriding the result field with arbitrary values.
res_keys = set(['DROPPED', 'FAILURE', 'WARNING', 'PASS'])
for x in cross_check:
res_keys.add(x['result'])
# Now we fill some bins with tests according to their result
bins = {}
for k in res_keys:
bins[k] = key_value_find(cross_check, "result", k)
# Print a one-line summary
s = map(
lambda k: '{} {}(s)'.format(len(bins[k]), k.lower()),
sorted(res_keys))
logging.info(', '.join(s))
# generate MD summary # generate MD summary
logging.debug(f'Generate {args.md}') logging.debug(f'Generate {args.md}')
...@@ -250,28 +357,33 @@ def main(): ...@@ -250,28 +357,33 @@ def main():
resultfile.write("# SCT Summary \n\n") resultfile.write("# SCT Summary \n\n")
resultfile.write("| | |\n") resultfile.write("| | |\n")
resultfile.write("|--|--|\n") resultfile.write("|--|--|\n")
resultfile.write("|Dropped:|" + str(len(would_not_run)) + "|\n")
resultfile.write("|Failures:|" + str(len(failures)) + "|\n")
resultfile.write("|Warnings:|" + str(len(warnings)) + "|\n")
resultfile.write("|Passes:|" + str(len(passes)) + "|\n")
resultfile.write("\n\n")
resultfile.write("## 1. Silently dropped or missing") # Loop on all the result values we found for the summary
dict_2_md(would_not_run,resultfile) for k in sorted(res_keys):
resultfile.write(
"|{}:|{}|\n".format(k.title(), len(bins[k])))
resultfile.write("## 4. Failure by group")
resultfile.write("\n\n") resultfile.write("\n\n")
key_tree_2_md(failures,resultfile,"group")
# Loop on all the result values we found (except PASS) for the sections
# listing the tests by group
n = 1
res_keys_np = set(res_keys)
res_keys_np.remove('PASS')
resultfile.write("## 3. Warnings by group") for k in sorted(res_keys_np):
resultfile.write("\n\n") resultfile.write("## {}. {} by group\n\n".format(n, k.title()))
key_tree_2_md(warnings,resultfile,"group") key_tree_2_md(bins[k], resultfile, "group")
n += 1
# Generate csv if requested # Generate csv if requested
if args.csv is not None: if args.csv is not None:
gen_csv(cross_check, args.csv) gen_csv(cross_check, args.csv)
# Generate json if requested
if args.json is not None:
gen_json(cross_check, args.json)
#command line argument 3&4, key are to support a key & value search. #command line argument 3&4, key are to support a key & value search.
#these will be displayed in CLI #these will be displayed in CLI
if args.find_key is not None and args.find_value is not None: if args.find_key is not None and args.find_value is not None:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment