gitlab.arm.com will be in the maintainance mode on Wednesday June 29th 01:00 - 10:00 (UTC+1). Repositories is read only during the maintainance.

Commit 6410b6de authored by Vincent Stehlé's avatar Vincent Stehlé
Browse files

Add checkers and fix all errors



Add a checker for python scripts and a checker for shell scripts to the
`make check' target. Fix all issues reported by the checker in parser.py.
Signed-off-by: Vincent Stehlé's avatarVincent Stehlé <vincent.stehle@arm.com>
parent cde02292
...@@ -6,7 +6,8 @@ all: doc ...@@ -6,7 +6,8 @@ all: doc
help: help:
@echo 'Targets:' @echo 'Targets:'
@echo ' all' @echo ' all'
@echo ' check Perform sanity checks (currently yamllint)' @echo ' check Perform sanity checks'
@echo ' (currently yamllint, shellcheck and flake8)'
@echo ' clean' @echo ' clean'
@echo ' doc Generate README.pdf' @echo ' doc Generate README.pdf'
@echo ' help Print this help.' @echo ' help Print this help.'
...@@ -18,6 +19,8 @@ doc: README.pdf ...@@ -18,6 +19,8 @@ doc: README.pdf
check: check:
yamllint . yamllint .
shellcheck $$(find -name '*.sh')
flake8
clean: clean:
-rm -f README.pdf -rm -f README.pdf
...@@ -250,8 +250,18 @@ It is possible to convert this `README.md` into `README.pdf` with pandoc using ...@@ -250,8 +250,18 @@ It is possible to convert this `README.md` into `README.pdf` with pandoc using
### Sanity checks ### Sanity checks
To perform sanity checks, run `make check`. For the moment this runs `yamllint`, To perform sanity checks, run `make check`. It runs a number of checkers and
which will inspect all [YAML] files and report errors. See `make help`. reports errors:
-------------------------------
Checker Target
------------- ----------------
`flake8` Python scripts.
`yamllint` [YAML] files.
`shellcheck` Shell scripts.
-------------------------------
See `make help`.
### db structure: ### db structure:
......
#!/usr/bin/env python3 #!/usr/bin/env python3
#SCT log parser # SCT log parser
import sys import sys
...@@ -33,7 +33,7 @@ else: ...@@ -33,7 +33,7 @@ else:
yaml_load_args = {} yaml_load_args = {}
#based loosley on https://stackoverflow.com/a/4391978 # based loosley on https://stackoverflow.com/a/4391978
# returns a filtered dict of dicts that meet some Key-value pair. # returns a filtered dict of dicts that meet some Key-value pair.
# I.E. key="result" value="FAILURE" # I.E. key="result" value="FAILURE"
def key_value_find(list_1, key, value): def key_value_find(list_1, key, value):
...@@ -44,21 +44,25 @@ def key_value_find(list_1, key, value): ...@@ -44,21 +44,25 @@ def key_value_find(list_1, key, value):
return found return found
#Were we intrept test logs into test dicts # Were we intrept test logs into test dicts
def test_parser(string, current): def test_parser(string, current):
test_list = { test_list = {
"name": string[2], #FIXME:Sometimes, SCT has name and Description, "name": string[2],
"result": string[1], # FIXME:Sometimes, SCT has name and Description,
**current, "result": string[1],
"guid": string[0], #FIXME:GUID's overlap **current,
#"comment": string[-1], #FIXME:need to hash this out, sometime there is no comments "guid": string[0],
"log": ' '.join(string[3:]) # FIXME:GUID's overlap
# "comment": string[-1], # FIXME:need to hash this out,
# sometime there is no comments
"log": ' '.join(string[3:])
} }
return test_list return test_list
#Parse the ekl file, and create a map of the tests
def ekl_parser (file): # Parse the ekl file, and create a map of the tests
#create our "database" dict def ekl_parser(file):
# create our "database" dict
temp_list = list() temp_list = list()
# All tests are grouped by the "HEAD" line, which precedes them. # All tests are grouped by the "HEAD" line, which precedes them.
current = {} current = {}
...@@ -122,14 +126,17 @@ def ekl_parser (file): ...@@ -122,14 +126,17 @@ def ekl_parser (file):
'device path': '|'.join(split_line[13:]), 'device path': '|'.join(split_line[13:]),
} }
#FIXME:? EKL file has an inconsistent line structure, # FIXME:? EKL file has an inconsistent line structure,
# sometime we see a line that consits ' dump of GOP->I\n' # sometime we see a line that consits ' dump of GOP->I\n'
#easiest way to skip is check for blank space in the first char # easiest way to skip is check for blank space in the first char
elif split_line[0] != '' and split_line[0][0] != " ": elif split_line[0] != '' and split_line[0][0] != " ":
try: try:
#deliminiate on ':' for tests # deliminiate on ':' for tests
split_test = [new_string for old_string in split_line for new_string in old_string.split(':')] split_test = [new_string for old_string in
#put the test into a dict, and then place that dict in another dict with GUID as key split_line for new_string in
old_string.split(':')]
# put the test into a dict, and then place that dict in another
# dict with GUID as key
tmp_dict = test_parser(split_test, current) tmp_dict = test_parser(split_test, current)
temp_list.append(tmp_dict) temp_list.append(tmp_dict)
n += 1 n += 1
...@@ -144,74 +151,87 @@ def ekl_parser (file): ...@@ -144,74 +151,87 @@ def ekl_parser (file):
return temp_list return temp_list
#Parse Seq file, used to tell which tests should run.
# Parse Seq file, used to tell which tests should run.
def seq_parser(file): def seq_parser(file):
temp_dict = list() temp_dict = list()
lines=file.readlines() lines = file.readlines()
magic=7 #a test in a seq file is 7 lines, if not mod7, something wrong.. magic = 7
if len(lines)%magic != 0: # a test in a seq file is 7 lines, if not mod7, something wrong..
if len(lines) % magic != 0:
sys.exit("seqfile cut short, should be mod7") sys.exit("seqfile cut short, should be mod7")
#the utf-16 char makes this looping a bit harder, so we use x+(i) where i is next 0-6th # the utf-16 char makes this looping a bit harder, so we use x+(i) where i
for x in range(0,len(lines),magic): #loop ever "7 lines" # is next 0-6th
#(x+0)[Test Case] # loop ever "7 lines"
#(x+1)Revision=0x10000 for x in range(0, len(lines), magic):
#(x+2)Guid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX # (x+0)[Test Case]
#(x+3)Name=InstallAcpiTableFunction # (x+1)Revision=0x10000
#(x+4)Order=0xFFFFFFFF # (x+2)Guid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
#(x+5)Iterations=0xFFFFFFFF # (x+3)Name=InstallAcpiTableFunction
#(x+6)(utf-16 char) # (x+4)Order=0xFFFFFFFF
#currently only add tests that are supposed to run, should add all? # (x+5)Iterations=0xFFFFFFFF
#0xFFFFFFFF in "Iterations" means the test is NOT supposed to run # (x+6)(utf-16 char)
if not "0xFFFFFFFF" in lines[x+5]: # currently only add tests that are supposed to run, should add all?
# 0xFFFFFFFF in "Iterations" means the test is NOT supposed to run
if "0xFFFFFFFF" not in lines[x + 5]:
seq_dict = { seq_dict = {
"name": lines[x+3][5:-1],#from after "Name=" to end (5char long) # from after "Name=" to end (5char long)
"guid": lines[x+2][5:-1],#from after"Guid=" to the end, (5char long) "name": lines[x + 3][5:-1],
"Iteration": lines[x+5][11:-1],#from after "Iterations=" (11char long) # from after"Guid=" to the end, (5char long)
"rev": lines[x+1][9:-1],#from after "Revision=" (9char long) "guid": lines[x + 2][5:-1],
"Order": lines[x+4][6:-1]#from after "Order=" (6char long) # from after "Iterations=" (11char long)
"Iteration": lines[x + 5][11:-1],
# from after "Revision=" (9char long)
"rev": lines[x + 1][9:-1],
# from after "Order=" (6char long)
"Order": lines[x + 4][6:-1]
} }
temp_dict.append(seq_dict) #put in a dict based on guid # put in a dict based on guid
temp_dict.append(seq_dict)
return temp_dict return temp_dict
#group items by key, and print by key
#we slowly iterate through the list, group and print groups # group items by key, and print by key
def key_tree_2_md(input_list,file,key): # we slowly iterate through the list, group and print groups
#make a copy so we don't destroy the first list. def key_tree_2_md(input_list, file, key):
# make a copy so we don't destroy the first list.
temp_list = input_list.copy() temp_list = input_list.copy()
while temp_list: while temp_list:
test_dict = temp_list.pop() test_dict = temp_list.pop()
found, not_found = [test_dict],[] found, not_found = [test_dict], []
#go through whole list looking for key match # go through whole list looking for key match
while temp_list: while temp_list:
next_dict = temp_list.pop() next_dict = temp_list.pop()
if next_dict[key] == test_dict[key]: #if match add to found # if match add to found
if next_dict[key] == test_dict[key]:
found.append(next_dict) found.append(next_dict)
else: #else not found # else not found
else:
not_found.append(next_dict) not_found.append(next_dict)
temp_list = not_found #start over with found items removed # start over with found items removed
temp_list = not_found
file.write("### " + test_dict[key]) file.write("### " + test_dict[key])
dict_2_md(found,file) dict_2_md(found, file)
#generic writer, takes a list of dicts and turns the dicts into an MD table. # generic writer, takes a list of dicts and turns the dicts into an MD table.
def dict_2_md(input_list,file): def dict_2_md(input_list, file):
if len(input_list) > 0: if len(input_list) > 0:
file.write("\n\n") file.write("\n\n")
#create header for MD table using dict keys # create header for MD table using dict keys
temp_string1, temp_string2 = "|", "|" temp_string1, temp_string2 = "|", "|"
for x in (input_list[0].keys()): for x in (input_list[0].keys()):
temp_string1 += (x + "|") temp_string1 += (x + "|")
temp_string2 += ("---|") temp_string2 += ("---|")
file.write(temp_string1+"\n"+temp_string2+"\n") file.write(temp_string1 + "\n" + temp_string2 + "\n")
#print each item from the dict into the table # print each item from the dict into the table
for x in input_list: for x in input_list:
test_string = "|" test_string = "|"
for y in x.keys(): for y in x.keys():
test_string += (x[y] + "|") test_string += (x[y] + "|")
file.write(test_string+'\n') file.write(test_string + '\n')
#seprate table from other items in MD # seprate table from other items in MD
file.write("\n\n") file.write("\n\n")
...@@ -674,7 +694,8 @@ if __name__ == '__main__': ...@@ -674,7 +694,8 @@ if __name__ == '__main__':
# search for failures, warnings, passes & others # search for failures, warnings, passes & others
# We detect all present keys in additions to the expected ones. This is # We detect all present keys in additions to the expected ones. This is
# handy with config rules overriding the result field with arbitrary values. # handy with config rules overriding the result field
# with arbitrary values.
res_keys = set(['DROPPED', 'FAILURE', 'WARNING', 'PASS']) res_keys = set(['DROPPED', 'FAILURE', 'WARNING', 'PASS'])
for x in cross_check: for x in cross_check:
...@@ -755,8 +776,8 @@ if __name__ == '__main__': ...@@ -755,8 +776,8 @@ if __name__ == '__main__':
# these will be displayed in CLI # these will be displayed in CLI
if args.find_key is not None and args.find_value is not None: if args.find_key is not None and args.find_value is not None:
found = key_value_find(cross_check, args.find_key, args.find_value) found = key_value_find(cross_check, args.find_key, args.find_value)
#print the dict # print the dict
print("found:",len(found),"items with search constraints") print("found:", len(found), "items with search constraints")
for x in found: for x in found:
print( print(
x["guid"], ":", x["name"], "with", args.find_key, ":", x["guid"], ":", x["name"], "with", args.find_key, ":",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment