Skip to content

Commit

Permalink
test: Generate incremental code coverage data
Browse files Browse the repository at this point in the history
This generates a more detailed JSON coverage file containing maps of
exactly what statements were executed by tests.  This also extracts
the data from this file and updates the PR with a comment showing how
much of the changed code is covered by tests.
  • Loading branch information
joeyparrish committed Aug 13, 2022
1 parent ca29b0b commit c9c8bb5
Show file tree
Hide file tree
Showing 4 changed files with 235 additions and 29 deletions.
55 changes: 26 additions & 29 deletions .github/workflows/build-and-test.yaml
Expand Up @@ -125,47 +125,44 @@ jobs:
--reporters spec --spec-hide-passed \
${{ matrix.extra_flags }}
- name: Find coverage report
- name: Find coverage reports
id: coverage
if: always() # Even on failure of an earlier step.
shell: bash
run: |
# If the directory exists...
# If the "coverage" directory exists...
if [ -d coverage ]; then
# Find the path to the coverage report. It includes the exact
# browser version in the path, so it will vary. Having a single
# path will make the artifact zip simpler, whereas using a wildcard
# in the upload step will result in a zip file with internal
# directories. In case there are multiple folders (there shouldn't
# be), this shell script will extract just a single path.
coverage_report="$( (ls coverage/*/coverage.json || true) | head -1 )"
# Show what's there, for debugging purposes.
ls -l coverage/
if [ -f "$coverage_report" ]; then
echo "Found coverage report: $coverage_report"
echo "::set-output name=coverage_report::$coverage_report"
else
echo "Could not locate coverage report!"
exit 1
fi
# Find the path to the coverage output folder. It includes the
# exact browser version in the path, so it will vary.
coverage_folder="$( (ls -d coverage/* || true) | head -1 )"
# Build a folder to stage all the coverage artifacts with
# predictable paths. The resulting zip file will not have any
# internal directories.
mkdir coverage/staging/
cp "$coverage_folder"/coverage.json coverage/staging/
cp "$coverage_folder"/coverage-details.json coverage/staging/
echo "${{ github.event.number }}" > coverage/staging/pr-number.json
echo "::set-output name=coverage_found::true"
echo "Coverage report staged."
else
echo "No coverage report generated."
fi
- uses: actions/upload-artifact@v3
# If there's a coverage report, upload it, even if a previous step
# failed.
if: ${{ always() && steps.coverage.outputs.coverage_report }}
- name: Upload coverage reports
uses: actions/upload-artifact@v3
if: ${{ always() && steps.coverage.outputs.coverage_found }}
with:
# This will create a download called coverage.zip containing only
# coverage.json.
path: ${{ steps.coverage.outputs.coverage_report }}
name: coverage
# This will create a download called coverage.zip containing all of
# these files, with no internal folders.
path: |
coverage/staging/coverage.json
coverage/staging/coverage-details.json
coverage/staging/pr-number.json
# Since we've already filtered this step for instances where there is
# an environment variable set for this, the file should definitely be
# there.
# an environment variable set, the file should definitely be there.
if-no-files-found: error

build_in_docker:
Expand Down
169 changes: 169 additions & 0 deletions .github/workflows/compute-incremental-coverage.py
@@ -0,0 +1,169 @@
#!/usr/bin/env python3
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import io
import json
import re
import subprocess
import zipfile

def StripGitDir(path):
# Strip the path to the git clone, leaving only the source path within the
# repo.
return re.sub(r'.*?/(lib|ui)/', r'\1/', path)

def RunCommand(args, text=True):
# print("Running command", args, file=sys.stderr)
proc = subprocess.run(args, capture_output=True, text=text)
if proc.returncode != 0:
raise RuntimeError("Command failed:", args, proc.stdout, proc.stderr)
return proc.stdout

def GitHubApi(repo, path, text=True):
args = ["gh", "api", "/repos/%s/%s" % (repo, path)]
output = RunCommand(args, text)
if text:
return json.loads(output)
else:
return output

def GetCoverageArtifacts(repo, run_id):
api_path = "actions/runs/%s/artifacts" % run_id
results = GitHubApi(repo, api_path)["artifacts"]
artifact = list(filter(lambda x: x["name"] == "coverage", results))[0]

api_path = "actions/artifacts/%s/zip" % artifact["id"]
zip_data = GitHubApi(repo, api_path, text=False)
return zipfile.ZipFile(io.BytesIO(zip_data), 'r')

class CoverageDetails(object):
def __init__(self, file_data):
json_data = json.loads(file_data)

self.files = {}

for path, path_data in json_data.items():
path = StripGitDir(path)

statement_to_lines = {}
instrumented_lines = set()
for key, value in path_data["statementMap"].items():
statement_to_lines[key] = []

start_line = value["start"]["line"]
end_line = value["start"]["line"]
for line in range(start_line, end_line + 1):
statement_to_lines[key].append(line)
instrumented_lines.add(line)

executed_lines = set()
for key, executed in path_data["s"].items():
if executed:
for line in statement_to_lines[key]:
executed_lines.add(line)

self.files[path] = {
"instrumented": instrumented_lines,
"executed": executed_lines,
}

class PullRequest(object):
def __init__(self, repo, number):
data = GitHubApi(repo, "pulls/%d" % number)
sha = data["merge_commit_sha"]

self.number = number
self.changes = {}

files = GitHubApi(repo, "commits/%s" % sha)["files"]

for file_data in files:
# The patch field is missing for binary files. Skip those.
if "patch" not in file_data:
continue

filename = file_data["filename"]
patch = file_data["patch"]

# Parse through the unified diff in "patch" to find the touched line
# numbers.
touched_lines = []
line_number = None
for line in patch.split("\n"):
if line[0] == "@":
# Turns a header like "@@ -749,7 +757,19 @@ foo" into line number 757.
# Note that the last part of the new file range could be omitted:
# "@@ -0,0 +1 @@ foo"
new_file_range = line.split("+")[1].split(" @@")[0]
line_number = int(new_file_range.split(",")[0])
elif line[0] == " ":
line_number += 1
elif line[0] == "+":
touched_lines.append(line_number)
line_number += 1

self.changes[filename] = touched_lines

def IncrementalCoverage(pr, coverage_details):
num_changed = 0
num_covered = 0

for path in pr.changes:
if path in coverage_details.files:
changed_lines = pr.changes[path]
instrumented_lines = coverage_details.files[path]["instrumented"]
executed_lines = coverage_details.files[path]["executed"]

for line in changed_lines:
# Only count the instrumented lines, not whitespace or comments.
if line in instrumented_lines:
num_changed += 1
if line in executed_lines:
num_covered += 1

if num_changed == 0:
return None
return num_covered / num_changed

def main():
parser = argparse.ArgumentParser(
description="Compute incremental code coverage for a PR",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--repo",
required=True,
help="The GitHub repo, such as shaka-project/shaka-player")
parser.add_argument(
"--run-id",
required=True,
help="The workflow run ID to download coverage data from")
args = parser.parse_args()

artifacts = GetCoverageArtifacts(args.repo, args.run_id)
coverage_details = CoverageDetails(artifacts.read("coverage-details.json"))
pr_number = json.loads(artifacts.read("pr-number.json"))
pr = PullRequest(args.repo, pr_number)
coverage = IncrementalCoverage(pr, coverage_details)

print("::set-output name=pr_number::%d" % pr_number)
if coverage is None:
print("::set-output name=coverage::No instrumented code was changed.")
else:
print("::set-output name=coverage::%.2f%%" % (coverage * 100.0))

if __name__ == "__main__":
main()
39 changes: 39 additions & 0 deletions .github/workflows/report-incremental-coverage.yaml
@@ -0,0 +1,39 @@
name: Report Incremental Coverage

# Runs when the build and test workflow completes. This will run with full
# privileges, even if the other workflow doesn't. That allows us to leave PR
# comments, when we would not be able to do so otherwise.
on:
workflow_run:
workflows: [Build and Test PR]
types: [completed]

jobs:
report:
if: ${{ github.event.workflow_run.event == 'pull_request' }}
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Compute incremental code coverage
id: compute
shell: bash
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Fetches the coverage data from the run that triggered the report,
# parses it, compares it to the changed lines in the PR, and computes
# the incremental code coverage.
run: |
python .github/workflows/compute-incremental-coverage.py \
--repo ${{ github.repository }} \
--run-id ${{ github.event.workflow_run.id }}
- name: Report incremental code coverage
uses: thollander/actions-comment-pull-request@686ab1cab89e0f715a44a0d04b9fdfdd4f33d751
with:
message: "Incremental code coverage: ${{ steps.compute.outputs.coverage }}"
comment_includes: "Incremental code coverage: "
pr_number: ${{ steps.compute.outputs.pr_number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
1 change: 1 addition & 0 deletions karma.conf.js
Expand Up @@ -403,6 +403,7 @@ module.exports = (config) => {
{type: 'html', dir: 'coverage'},
{type: 'cobertura', dir: 'coverage', file: 'coverage.xml'},
{type: 'json-summary', dir: 'coverage', file: 'coverage.json'},
{type: 'json', dir: 'coverage', file: 'coverage-details.json'},
],
},
});
Expand Down

0 comments on commit c9c8bb5

Please sign in to comment.