Test more criteria during PVS analysis
This commit: - Adds a separate analysis run against the MIRSA (Motor Industry Software Reliability Association) criteria, which is extremely thorough. This tally is not summarized or considered fatal to the workflow. It runs virtually instantly, and the results are very interesting; however are too numerous to include in our general analysis (ie: over 13,000 issues). - Changes the PVS summary script output to a tally-per-file instead of trying to summarize the nature of the issue, which was mostly unhelpful without the full text. - Adds the full list of possible supressible issue to the report directory, so if further suppressions are needed then these will be easy to find and use. - Adds one dr_flac suppression per the resolution here: mackron/dr_libs#117
This commit is contained in:
parent
ffe3c5ab7f
commit
65d8187595
3 changed files with 42 additions and 29 deletions
27
.github/workflows/analysis.yml
vendored
27
.github/workflows/analysis.yml
vendored
|
@ -103,22 +103,31 @@ jobs:
|
|||
- name: Analyze
|
||||
run: |
|
||||
set -xeu
|
||||
pvs-studio-analyzer analyze -s .pvs-suppress -o pvs-analysis.log -j "$(nproc)"
|
||||
criteria="GA:1,2;64:1;OP:1,2,3;CS:1;MISRA:1,2"
|
||||
plog-converter -a "${criteria}" -d V1042 -t csv -o pvs-report.csv pvs-analysis.log
|
||||
mkdir -p pvs-analysis-report
|
||||
log="pvs-analysis.log"
|
||||
mirsa_criteria="MISRA:1,2"
|
||||
general_criteria="GA:1,2;64:1;OP:1,2,3;CS:1"
|
||||
stamp="$(date +'%Y-%m-%d_T%H%M')-${GITHUB_SHA:0:8}"
|
||||
plog-converter -a "${criteria}" -d V1042 -t fullhtml -p dosbox-staging \
|
||||
-v "${GITHUB_SHA:0:8}" -o "pvs-analysis-report/pvs-analysis-report-${stamp}" \
|
||||
pvs-analysis.log
|
||||
reportdir="pvs-report/pvs-report-${stamp}"
|
||||
mkdir -p "${reportdir}"
|
||||
pvs-studio-analyzer analyze -a 63 -s .pvs-suppress -o "${log}" -j "$(nproc)"
|
||||
plog-converter -a "${general_criteria}" -d V1042 -t fullhtml -p dosbox-staging \
|
||||
-v "${GITHUB_SHA:0:8}" -o "${reportdir}" "${log}"
|
||||
mv "${reportdir}/fullhtml" "${reportdir}/general"
|
||||
plog-converter -a "${mirsa_criteria}" -d V1042 -t fullhtml -p dosbox-staging \
|
||||
-v "${GITHUB_SHA:0:8}" -o "${reportdir}" "${log}"
|
||||
mv "${reportdir}/fullhtml" "${reportdir}/mirsa"
|
||||
plog-converter -a "${general_criteria}" -d V1042 -t csv -o pvs-report.csv "${log}"
|
||||
cp -l pvs-report.csv "${reportdir}/general/"
|
||||
pvs-studio-analyzer suppress -a "${general_criteria}" \
|
||||
-o "${reportdir}/general/supressible-list.json" "${log}"
|
||||
- name: Upload report
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: pvs-analysis-report
|
||||
path: pvs-analysis-report
|
||||
path: pvs-report
|
||||
- name: Summarize report
|
||||
env:
|
||||
MAX_BUGS: 304
|
||||
MAX_BUGS: 627
|
||||
run: |
|
||||
echo "Full report is included in build Artifacts"
|
||||
echo
|
||||
|
|
|
@ -33,6 +33,14 @@
|
|||
"FileName": "xxhash.c",
|
||||
"Message": "A call of the 'memcpy' function will lead to underflow of the buffer '& state'."
|
||||
},
|
||||
{
|
||||
"CodeCurrent": 3039254062,
|
||||
"CodeNext": 3404253786,
|
||||
"CodePrev": 3282303307,
|
||||
"ErrorCode": "V560",
|
||||
"FileName": "dr_flac.h",
|
||||
"Message": "A part of conditional expression is always true: blockSize >= _."
|
||||
},
|
||||
{
|
||||
"CodeCurrent": 2009695132,
|
||||
"CodeNext": 17733,
|
||||
|
|
|
@ -25,20 +25,20 @@ import sys
|
|||
|
||||
def parse_issues(filename):
|
||||
"""
|
||||
Returns a dict of int keys and a list of string values, where the:
|
||||
- keys are V### PVS-Studio error codes
|
||||
- values are the message of the issue as found in a specific file
|
||||
Returns a dict of source filename keys having occurrence-count values
|
||||
|
||||
"""
|
||||
issues = collections.defaultdict(list)
|
||||
cwd = os.getcwd()
|
||||
issues = collections.defaultdict(int)
|
||||
with open(filename) as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
for row in reader:
|
||||
full = row['ErrorCode'] # extract the full code as an URL string
|
||||
code = full[full.rfind('V'):full.rfind('"')] # get the trailing "V###" code
|
||||
if code.startswith('V'):
|
||||
# Convert the V### string into an integer for easy sorting
|
||||
issues[int(code[1:])].append(row['Message'])
|
||||
sourcefile = os.path.realpath(row['FilePath'])
|
||||
# Skip non-file lines
|
||||
if not sourcefile.startswith('/'):
|
||||
continue
|
||||
sourcefile = os.path.relpath(sourcefile, cwd)
|
||||
issues[sourcefile] += 1
|
||||
return issues
|
||||
|
||||
|
||||
|
@ -48,20 +48,16 @@ def main(argv):
|
|||
|
||||
# Get the issues and the total tally
|
||||
issues = parse_issues(argv[1])
|
||||
tally = sum(len(messages) for messages in issues.values())
|
||||
tally = sum(issues.values())
|
||||
|
||||
if tally > 0:
|
||||
# Step through the codes and summarize
|
||||
print("Issues are tallied and sorted by code:\n")
|
||||
print(" code | issue-string in common to all instances | tally")
|
||||
print(" ----- --------------------------------------------- -----")
|
||||
# find the longest source filename
|
||||
longest_name = max(len(sourcefile) for sourcefile in issues.keys())
|
||||
# Print the source filenames and their issue counts
|
||||
print("Sorted by issue count:\n")
|
||||
|
||||
for code in sorted(issues.keys()):
|
||||
messages = issues[code]
|
||||
in_common = os.path.commonprefix(messages)[:45]
|
||||
if len(in_common.split(' ')) < 4:
|
||||
in_common = 'N/A (too little in-common between issues)'
|
||||
print(f' [{code:4}] {in_common:45} : {len(messages)}')
|
||||
for sourcefile in sorted(issues, key=issues.get, reverse=True):
|
||||
print(f' {sourcefile:{longest_name}} : {issues[sourcefile]}')
|
||||
|
||||
# Print the tally against the desired maximum
|
||||
if len(sys.argv) == 3:
|
||||
|
|
Loading…
Add table
Reference in a new issue