⬆️ updated fastcov

This commit is contained in:
Niels Lohmann 2019-04-06 00:24:19 +02:00
parent 0a1ddd6882
commit b4b06d89b5
No known key found for this signature in database
GPG key ID: 7F3CEA63AE251B69
2 changed files with 192 additions and 63 deletions

View file

@ -55,7 +55,7 @@ if(JSON_Coverage)
# add target to collect coverage information and generate HTML file # add target to collect coverage information and generate HTML file
# (filter script from https://stackoverflow.com/a/43726240/266378) # (filter script from https://stackoverflow.com/a/43726240/266378)
add_custom_target(lcov_html2 add_custom_target(lcov_html2
COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/fastcov/fastcov.py --branch-coverage --lcov -o json.info --gcov ${GCOV_BIN} --compiler-directory ${CMAKE_BINARY_DIR} #--source-files ${SOURCE_FILES} COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/fastcov/fastcov.py --branch-coverage --lcov -o json.info --gcov ${GCOV_BIN} --compiler-directory ${CMAKE_BINARY_DIR} --source-files ${SOURCE_FILES}
COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/imapdl/filterbr.py json.info > json.info.filtered.noexcept COMMAND ${CMAKE_SOURCE_DIR}/test/thirdparty/imapdl/filterbr.py json.info > json.info.filtered.noexcept
COMMAND genhtml --title "JSON for Modern C++" --legend --demangle-cpp --output-directory html --show-details --branch-coverage json.info.filtered.noexcept COMMAND genhtml --title "JSON for Modern C++" --legend --demangle-cpp --output-directory html --show-details --branch-coverage json.info.filtered.noexcept
COMMENT "Generating HTML report test/html/index.html" COMMENT "Generating HTML report test/html/index.html"

View file

@ -22,15 +22,17 @@ import os
import sys import sys
import glob import glob
import json import json
import time
import argparse import argparse
import threading import threading
import subprocess import subprocess
import multiprocessing import multiprocessing
MINIMUM_GCOV = (9,0,0) MINIMUM_GCOV = (9,0,0)
MINIMUM_CHUNK_SIZE = 10 MINIMUM_CHUNK_SIZE = 5
# Interesting metrics # Interesting metrics
START_TIME = time.time()
GCOVS_TOTAL = [] GCOVS_TOTAL = []
GCOVS_SKIPPED = [] GCOVS_SKIPPED = []
@ -39,13 +41,28 @@ def chunks(l, n):
for i in range(0, len(l), n): for i in range(0, len(l), n):
yield l[i:i + n] yield l[i:i + n]
def stopwatch():
"""Return number of seconds since last time this was called"""
global START_TIME
end_time = time.time()
delta = end_time - START_TIME
START_TIME = end_time
return delta
def parseVersionFromLine(version_str):
"""Given a string containing a dotted integer version, parse out integers and return as tuple"""
version = re.search(r'(\d+\.\d+\.\d+)[^\.]', version_str)
if not version:
return (0,0,0)
return tuple(map(int, version.group(1).split(".")))
def getGcovVersion(gcov): def getGcovVersion(gcov):
p = subprocess.Popen([gcov, "-v"], stdout=subprocess.PIPE) p = subprocess.Popen([gcov, "-v"], stdout=subprocess.PIPE)
output = p.communicate()[0].decode('UTF-8') output = p.communicate()[0].decode('UTF-8')
p.wait() p.wait()
version_str = re.search(r'\s([\d.]+)\s', output.split("\n")[0]).group(1) return parseVersionFromLine(output.split("\n")[0])
version = tuple(map(int, version_str.split(".")))
return version
def removeFiles(files): def removeFiles(files):
for file in files: for file in files:
@ -72,7 +89,7 @@ def gcovWorker(cwd, gcov, files, chunk, gcov_filter_options, branch_coverage):
p = subprocess.Popen([gcov, gcov_args] + chunk, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) p = subprocess.Popen([gcov, gcov_args] + chunk, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
for line in iter(p.stdout.readline, b''): for line in iter(p.stdout.readline, b''):
intermediate_json = json.loads(line.decode(sys.stdout.encoding)) intermediate_json = json.loads(line.decode(sys.stdout.encoding))
intermediate_json_files = processGcovs(intermediate_json["files"], gcov_filter_options) intermediate_json_files = processGcovs(cwd, intermediate_json["files"], gcov_filter_options)
for f in intermediate_json_files: for f in intermediate_json_files:
files.append(f) #thread safe, there might be a better way to do this though files.append(f) #thread safe, there might be a better way to do this though
GCOVS_TOTAL.append(len(intermediate_json["files"])) GCOVS_TOTAL.append(len(intermediate_json["files"]))
@ -89,17 +106,19 @@ def processGcdas(cwd, gcov, jobs, gcda_files, gcov_filter_options, branch_covera
threads.append(t) threads.append(t)
t.start() t.start()
log("Spawned %d gcov processes each processing at most %d gcda files" % (len(threads), chunk_size)) log("Spawned %d gcov threads, each processing at most %d gcda files" % (len(threads), chunk_size))
for t in threads: for t in threads:
t.join() t.join()
return intermediate_json_files return intermediate_json_files
def processGcov(gcov, files, gcov_filter_options): def processGcov(cwd, gcov, files, gcov_filter_options):
# Add absolute path
gcov["file_abs"] = os.path.abspath(os.path.join(cwd, gcov["file"]))
# If explicit sources were passed, check for match # If explicit sources were passed, check for match
source_file = os.path.abspath(gcov["file"])
if gcov_filter_options["sources"]: if gcov_filter_options["sources"]:
if source_file in gcov_filter_options["sources"]: if gcov["file_abs"] in gcov_filter_options["sources"]:
files.append(gcov) files.append(gcov)
return return
@ -118,65 +137,155 @@ def processGcov(gcov, files, gcov_filter_options):
files.append(gcov) files.append(gcov)
def processGcovs(gcov_files, gcov_filter_options): def processGcovs(cwd, gcov_files, gcov_filter_options):
files = [] files = []
for gcov in gcov_files: for gcov in gcov_files:
processGcov(gcov, files, gcov_filter_options) processGcov(cwd, gcov, files, gcov_filter_options)
return files return files
def dumpBranchCoverageToLcovInfo(f, source): def dumpBranchCoverageToLcovInfo(f, branches):
branch_miss = 0 branch_miss = 0
branch_total = 0 for line_num, branch_counts in branches.items():
for line in source["lines"]: for i, count in enumerate(branch_counts):
if not line["branches"]:
continue
branch_total += len(line["branches"])
for i, branch in enumerate(line["branches"]):
#Branch (<line number>, <block number>, <branch number>, <taken>) #Branch (<line number>, <block number>, <branch number>, <taken>)
f.write("BRDA:%d,%d,%d,%d\n" % (line["line_number"], int(i/2), i, branch["count"])) f.write("BRDA:%s,%d,%d,%d\n" % (line_num, int(i/2), i, count))
branch_miss += int(branch["count"] == 0) branch_miss += int(count == 0)
f.write("BRF:%d\n" % branch_total) #Branches Found f.write("BRF:%d\n" % len(branches)) #Branches Found
f.write("BRH:%d\n" % (branch_total - branch_miss)) #Branches Hit f.write("BRH:%d\n" % (len(branches) - branch_miss)) #Branches Hit
def dumpToLcovInfo(cwd, intermediate, output, branch_coverage): def dumpToLcovInfo(fastcov_json, output):
with open(output, "w") as f: with open(output, "w") as f:
for source in intermediate: for sf, data in fastcov_json["sources"].items():
#Convert to absolute path so it plays nice with genhtml
sf = source["file"]
if not os.path.isabs(source["file"]):
sf = os.path.abspath(os.path.join(cwd, source["file"]))
f.write("SF:%s\n" % sf) #Source File f.write("SF:%s\n" % sf) #Source File
fn_miss = 0 fn_miss = 0
for function in source["functions"]: for function, fdata in data["functions"].items():
f.write("FN:%d,%s\n" % (function["start_line"], function["name"])) #Function Start Line f.write("FN:%d,%s\n" % (fdata["start_line"], function)) #Function Start Line
f.write("FNDA:%d,%s\n" % (function["execution_count"], function["name"])) #Function Hits f.write("FNDA:%d,%s\n" % (fdata["execution_count"], function)) #Function Hits
fn_miss += int(function["execution_count"] == 0) fn_miss += int(fdata["execution_count"] == 0)
f.write("FNF:%d\n" % len(source["functions"])) #Functions Found f.write("FNF:%d\n" % len(data["functions"])) #Functions Found
f.write("FNH:%d\n" % (len(source["functions"]) - fn_miss)) #Functions Hit f.write("FNH:%d\n" % (len(data["functions"]) - fn_miss)) #Functions Hit
if branch_coverage: if data["branches"]:
dumpBranchCoverageToLcovInfo(f, source) dumpBranchCoverageToLcovInfo(f, data["branches"])
line_miss = 0 line_miss = 0
for line in source["lines"]: for line_num, count in data["lines"].items():
f.write("DA:%d,%d\n" % (line["line_number"], line["count"])) #Line f.write("DA:%s,%d\n" % (line_num, count)) #Line
line_miss += int(line["count"] == 0) line_miss += int(count == 0)
f.write("LF:%d\n" % len(source["lines"])) #Lines Found f.write("LF:%d\n" % len(data["lines"])) #Lines Found
f.write("LH:%d\n" % (len(source["lines"]) - line_miss)) #Lines Hit f.write("LH:%d\n" % (len(data["lines"]) - line_miss)) #Lines Hit
f.write("end_of_record\n") f.write("end_of_record\n")
def dumpToGcovJson(intermediate, output): def exclMarkerWorker(fastcov_sources, chunk):
for source in chunk:
# If there are no covered lines, skip
if not fastcov_sources[source]["lines"]:
continue
start_line = 0
end_line = 0
with open(source) as f:
for i, line in enumerate(f, 1): #Start enumeration at line 1
if not "LCOV_EXCL" in line:
continue
if "LCOV_EXCL_LINE" in line:
if str(i) in fastcov_sources[source]["lines"]:
del fastcov_sources[source]["lines"][str(i)]
if str(i) in fastcov_sources[source]["branches"]:
del fastcov_sources[source]["branches"][str(i)]
elif "LCOV_EXCL_START" in line:
start_line = i
elif "LCOV_EXCL_STOP" in line:
end_line = i
if not start_line:
end_line = 0
continue
for key in ["lines", "branches"]:
for line_num in list(fastcov_sources[source][key].keys()):
if int(line_num) <= end_line and int(line_num) >= start_line:
del fastcov_sources[source][key][line_num]
start_line = end_line = 0
def scanExclusionMarkers(fastcov_json, jobs):
chunk_size = max(MINIMUM_CHUNK_SIZE, int(len(fastcov_json["sources"]) / jobs) + 1)
threads = []
for chunk in chunks(list(fastcov_json["sources"].keys()), chunk_size):
t = threading.Thread(target=exclMarkerWorker, args=(fastcov_json["sources"], chunk))
threads.append(t)
t.start()
log("Spawned %d threads each scanning at most %d source files" % (len(threads), chunk_size))
for t in threads:
t.join()
def distillFunction(function_raw, functions):
function_name = function_raw["name"]
if function_name not in functions:
functions[function_name] = {
"start_line": function_raw["start_line"],
"execution_count": function_raw["execution_count"]
}
else:
functions[function_name]["execution_count"] += function_raw["execution_count"]
def distillLine(line_raw, lines, branches):
line_number = str(line_raw["line_number"])
if line_number not in lines:
lines[line_number] = line_raw["count"]
else:
lines[line_number] += line_raw["count"]
for i, branch in enumerate(line_raw["branches"]):
if line_number not in branches:
branches[line_number] = []
blen = len(branches[line_number])
glen = len(line_raw["branches"])
if blen < glen:
branches[line_number] += [0] * (glen - blen)
branches[line_number][i] += branch["count"]
def distillSource(source_raw, sources):
source_name = source_raw["file_abs"]
if source_name not in sources:
sources[source_name] = {
"functions": {},
"branches": {},
"lines": {},
}
for function in source_raw["functions"]:
distillFunction(function, sources[source_name]["functions"])
for line in source_raw["lines"]:
distillLine(line, sources[source_name]["lines"], sources[source_name]["branches"])
def distillReport(report_raw):
report_json = {
"sources": {}
}
for source in report_raw:
distillSource(source, report_json["sources"])
return report_json
def dumpToJson(intermediate, output):
with open(output, "w") as f: with open(output, "w") as f:
json.dump(intermediate, f) json.dump(intermediate, f)
def log(line): def log(line):
if not args.quiet: if not args.quiet:
print(line) print("[{:.3f}s] {}".format(stopwatch(), line))
def getGcovFilterOptions(args): def getGcovFilterOptions(args):
return { return {
"sources": set([os.path.abspath(s) for s in args.sources]), #Make paths absolute "sources": set([os.path.abspath(s) for s in args.sources]), #Make paths absolute, use set for fast lookups
"include": args.includepost, "include": args.includepost,
"exclude": args.excludepost, "exclude": args.excludepost,
} }
@ -188,33 +297,47 @@ def main(args):
sys.stderr.write("Minimum gcov version {} required, found {}\n".format(".".join(map(str, MINIMUM_GCOV)), ".".join(map(str, current_gcov_version)))) sys.stderr.write("Minimum gcov version {} required, found {}\n".format(".".join(map(str, MINIMUM_GCOV)), ".".join(map(str, current_gcov_version))))
exit(1) exit(1)
# Get list of gcda files to process
gcda_files = getGcdaFiles(args.directory, args.gcda_files) gcda_files = getGcdaFiles(args.directory, args.gcda_files)
log("%d .gcda files" % len(gcda_files)) log("Found {} .gcda files ".format(len(gcda_files)))
# If gcda filtering is enabled, filter them out now
if args.excludepre: if args.excludepre:
gcda_files = getFilteredGcdaFiles(gcda_files, args.excludepre) gcda_files = getFilteredGcdaFiles(gcda_files, args.excludepre)
log("%d .gcda files after filtering" % len(gcda_files)) log("{} .gcda files after filtering".format(len(gcda_files)))
# We "zero" the "counters" by simply deleting all gcda files # We "zero" the "counters" by simply deleting all gcda files
if args.zerocounters: if args.zerocounters:
removeFiles(gcda_files) removeFiles(gcda_files)
log("%d .gcda files removed" % len(gcda_files)) log("{} .gcda files removed".format(len(gcda_files)))
return return
# Fire up one gcov per cpu and start processing gcdas
gcov_filter_options = getGcovFilterOptions(args) gcov_filter_options = getGcovFilterOptions(args)
intermediate_json_files = processGcdas(args.cdirectory, args.gcov, args.jobs, gcda_files, gcov_filter_options, args.branchcoverage) intermediate_json_files = processGcdas(args.cdirectory, args.gcov, args.jobs, gcda_files, gcov_filter_options, args.branchcoverage)
# Summarize processing results
gcov_total = sum(GCOVS_TOTAL) gcov_total = sum(GCOVS_TOTAL)
gcov_skipped = sum(GCOVS_SKIPPED) gcov_skipped = sum(GCOVS_SKIPPED)
log("%d .gcov files generated by gcov" % gcov_total) log("Processed {} .gcov files ({} total, {} skipped)".format(gcov_total - gcov_skipped, gcov_total, gcov_skipped))
log("%d .gcov files processed by fastcov (%d skipped)" % (gcov_total - gcov_skipped, gcov_skipped))
# Distill all the extraneous info gcov gives us down to the core report
fastcov_json = distillReport(intermediate_json_files)
log("Aggregated raw gcov JSON into fastcov JSON report")
# Dump to desired file format
if args.lcov: if args.lcov:
dumpToLcovInfo(args.cdirectory, intermediate_json_files, args.output, args.branchcoverage) scanExclusionMarkers(fastcov_json, args.jobs)
log("Created lcov info file '%s'" % args.output) log("Scanned {} source files for exclusion markers".format(len(fastcov_json["sources"])))
dumpToLcovInfo(fastcov_json, args.output)
log("Created lcov info file '{}'".format(args.output))
elif args.gcov_raw:
dumpToJson(intermediate_json_files, args.output)
log("Created gcov raw json file '{}'".format(args.output))
else: else:
dumpToGcovJson(intermediate_json_files, args.output) dumpToJson(fastcov_json, args.output)
log("Created gcov json file '%s'" % args.output) log("Created fastcov json file '{}'".format(args.output))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation') parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation')
@ -224,20 +347,26 @@ if __name__ == '__main__':
parser.add_argument('-b', '--branch-coverage', dest='branchcoverage', action="store_true", help='Include branch counts in the coverage report') parser.add_argument('-b', '--branch-coverage', dest='branchcoverage', action="store_true", help='Include branch counts in the coverage report')
# Filtering Options # Filtering Options
parser.add_argument('-s', '--source-files', dest='sources', nargs="+", default=[], help='Filter: Specify exactly which source files should be included in the final report. Paths must be either absolute or relative to current directory.') parser.add_argument('-s', '--source-files', dest='sources', nargs="+", metavar='', default=[], help='Filter: Specify exactly which source files should be included in the final report. Paths must be either absolute or relative to current directory.')
parser.add_argument('-e', '--exclude', dest='excludepost', nargs="+", default=[], help='Filter: Exclude source files from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)') parser.add_argument('-e', '--exclude', dest='excludepost', nargs="+", metavar='', default=[], help='Filter: Exclude source files from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-i', '--include', dest='includepost', nargs="+", default=[], help='Filter: Only include source files in final report that contain one of the provided substrings (i.e. src/ etc.)') parser.add_argument('-i', '--include', dest='includepost', nargs="+", metavar='', default=[], help='Filter: Only include source files in final report that contain one of the provided substrings (i.e. src/ etc.)')
parser.add_argument('-f', '--gcda-files', dest='gcda_files', nargs="+", default=[], help='Filter: Specify exactly which gcda files should be processed instead of recursively searching the search directory.') parser.add_argument('-f', '--gcda-files', dest='gcda_files', nargs="+", metavar='', default=[], help='Filter: Specify exactly which gcda files should be processed instead of recursively searching the search directory.')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", default=[], help='Filter: Exclude gcda files from being processed via simple find matching (not regex)') parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", metavar='', default=[], help='Filter: Exclude gcda files from being processed via simple find matching (not regex)')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='Which gcov binary to use') parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='Which gcov binary to use')
parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)') parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)')
parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default=".", help='Base directory compiler was invoked from (default: .)') parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default=".", help='Base directory compiler was invoked from (default: .) \
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: %d).' % multiprocessing.cpu_count()) This needs to be set if invoking fastcov from somewhere other than the base compiler directory.')
parser.add_argument('-o', '--output', dest='output', default="coverage.json", help='Name of output file (default: coverage.json)') parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: %d).' % multiprocessing.cpu_count())
parser.add_argument('-l', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of gcov json') parser.add_argument('-m', '--minimum-chunk-size', dest='minimum_chunk', type=int, default=5, help='Minimum number of files a thread should process (default: 5). \
If you have only 4 gcda files but they are monstrously huge, you could change this value to a 1 so that each thread will only process 1 gcda. Otherise fastcov will spawn only 1 thread to process all of them.')
parser.add_argument('-l', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of fastcov json')
parser.add_argument('-r', '--gcov-raw', dest='gcov_raw', action="store_true", help='Output in gcov raw json instead of fastcov json')
parser.add_argument('-o', '--output', dest='output', default="coverage.json", help='Name of output file (default: coverage.json)')
parser.add_argument('-q', '--quiet', dest='quiet', action="store_true", help='Suppress output to stdout') parser.add_argument('-q', '--quiet', dest='quiet', action="store_true", help='Suppress output to stdout')
args = parser.parse_args() args = parser.parse_args()
main(args) main(args)