Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI] Add script that performs diff on skipped tests between main #2

Open
wants to merge 11 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions .github/workflows/tests_bot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@

name: tests-bot
on:
[status, push]
jobs:
run-tests-bot:
# if: ${{ github.repository == 'apache/tvm' && github.event.state == 'success' && github.event.context == 'tvm-ci/pr-head' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Comment link to docs
env:
AWS_ACCESS_KEY_ID: ${{ secrets.CI_RESOURCES_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_RESOURCES_AWS_SECRET_ACCESS_KEY }}
AWS_SESSION_TOKEN: ${{ secrets.CI_RESOURCES_AWS_SECRET_SESSION_KEY }}
AWS_DEFAULT_REGION: us-west-2
COMMIT_SHA: ${{ github.event.sha }}
TARGET_URL: 'https://github.com/apache/tvm/pull/PR-12436/1'
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -eux
python tests/scripts/github_skipped_tests_comment.py
141 changes: 141 additions & 0 deletions tests/python/ci/test_ci.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
"""Test various CI scripts and GitHub Actions workflows"""
import os
import shutil
import subprocess
import json
import textwrap
Expand All @@ -33,6 +35,145 @@ def parameterize_named(*values):
return pytest.mark.parametrize(",".join(keys), [tuple(d.values()) for d in values])


@tvm.testing.skip_if_wheel_test
@pytest.mark.parametrize(
# pylint: disable=line-too-long
"main_xml_file,main_xml_content,pr_xml_file,pr_xml_content,target_url,s3_prefix,"
"jenkins_prefix,commit_sha,expected_url,expected_body",
[
(
"unittest/file1.xml",
"""<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
</testcase>
</testsuite>
</testsuites>
""",
"unittest/file2.xml",
"""<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
<testcase classname="ctypes.tests.python.unittest.test_roofline"
name="test_estimate_peak_bandwidth[cuda]" time="4.679">
<skipped message="This is another skippe test" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"tvm-jenkins-artifacts-prod",
"ci.tlcpack.ai",
"SHA",
"issues/11594/comments",
"""<!---skipped-tests-comment-->\n\nThe list below shows some tests that ran in main but were skipped in the CI build of SHA:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\nunittest -> ctypes.tests.python.unittest.test_roofline#test_estimate_peak_bandwidth[cuda]\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).""",
),
(
"unittest/file1.xml",
"""<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"unittest/file2.xml",
"""<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"tvm-jenkins-artifacts-prod",
"ci.tlcpack.ai",
"SHA",
"issues/11594/comments",
"""<!---skipped-tests-comment-->\n\nNo additional skipped tests found in this branch for commit SHA.""",
),
],
)
# pylint: enable=line-too-long
def test_skipped_tests_comment(
tmpdir_factory,
main_xml_file,
main_xml_content,
pr_xml_file,
pr_xml_content,
target_url,
s3_prefix,
jenkins_prefix,
commit_sha,
expected_url,
expected_body,
):
"""
Test that a comment with a link to the docs is successfully left on PRs
"""
skipped_tests_script = REPO_ROOT / "tests" / "scripts" / "github_skipped_tests_comment.py"

def write_xml_file(root_dir, xml_file, xml_content):
shutil.rmtree(root_dir, ignore_errors=True)
file = f"""{root_dir}/{xml_file}"""
os.makedirs(os.path.dirname(file))
with open(file, "w") as f:
f.write(textwrap.dedent(xml_content))

git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
git.run("init")
git.run("checkout", "-b", "main")
git.run("remote", "add", "origin", "https://github.com/apache/tvm.git")

pr_test_report_dir = git.cwd + "/pr-reports"
write_xml_file(pr_test_report_dir, pr_xml_file, pr_xml_content)
main_test_report_dir = git.cwd + "/main-reports"
write_xml_file(main_test_report_dir, main_xml_file, main_xml_content)

proc = subprocess.run(
[
str(skipped_tests_script),
"--dry-run",
f"--s3-prefix={s3_prefix}",
f"--jenkins-prefix={jenkins_prefix}",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={"TARGET_URL": target_url, "COMMIT_SHA": commit_sha},
encoding="utf-8",
cwd=git.cwd,
check=False,
)
if proc.returncode != 0:
raise RuntimeError(f"Process failed:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}")

assert f"Dry run, would have posted {expected_url} with data {expected_body}." in proc.stderr


@tvm.testing.skip_if_wheel_test
@pytest.mark.parametrize(
"target_url,base_url,commit_sha,expected_url,expected_body",
Expand Down
211 changes: 211 additions & 0 deletions tests/scripts/github_skipped_tests_comment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import os
import logging
import argparse
import subprocess
import sys
from urllib import error
from xml.etree import ElementTree

import requests

from git_utils import git, GitHubRepo, parse_remote
from cmd_utils import init_log

SKIPPED_TESTS_COMMENT_MARKER = "<!---skipped-tests-comment-->\n\n"
GITHUB_ACTIONS_BOT_LOGIN = "github-actions[bot]"

PR_TEST_REPORT_DIR = "pr-reports"
MAIN_TEST_REPORT_DIR = "main-reports"


def retrieve_test_report(s3_url, target_dir):
command = f"aws s3 cp {s3_url} {target_dir} --recursive"
logging.info(f"Running command {command}")
proc = subprocess.run(command, shell=True, stdout=subprocess.PIPE, encoding="utf-8")
if proc.returncode != 0:
raise RuntimeError(f"Command failed {command}:\nstdout:\n{proc.stdout}")


def retrieve_test_reports(pr_number, build_number, s3_prefix, jenkins_prefix):
cur_build_s3_link = (
f"s3://{s3_prefix}/tvm/PR-{str(pr_number)}/{str(build_number)}/pytest-results"
)
retrieve_test_report(cur_build_s3_link, PR_TEST_REPORT_DIR)

latest_main_build = requests.get(
f"https://{jenkins_prefix}/job/tvm/job/main/lastSuccessfulBuild/buildNumber"
).text
latest_build_s3_link = f"s3://{s3_prefix}/tvm/main/{latest_main_build}/pytest-results"
retrieve_test_report(latest_build_s3_link, MAIN_TEST_REPORT_DIR)


def get_pr_and_build_numbers(target_url):
target_url = target_url[target_url.find("PR-") : len(target_url)]
split = target_url.split("/")
pr_number = split[0].strip("PR-")
build_number = split[1]
return {"pr_number": pr_number, "build_number": build_number}


def build_test_set(directory):
subdir_to_skipped = {}
subdirs = [
item for item in os.listdir(directory) if os.path.isdir(os.path.join(directory, item))
]
for subdir in subdirs:
subdir_to_skipped[subdir] = set()
for root, _, files in os.walk(directory + "/" + subdir):
for file in files:
test_report = ElementTree.parse(root + "/" + file)
for testcase in test_report.iter("testcase"):
skipped = testcase.find("skipped")
if skipped is not None:
key = testcase.attrib["classname"] + "#" + testcase.attrib["name"]
subdir_to_skipped[subdir].add(key)
return subdir_to_skipped


def to_node_name(dir_name: str):
return dir_name.replace("_", ": ", 1)


def build_comment(skipped_list, pr_number, build_number, commit_sha, jenkins_prefix):
if len(skipped_list) == 0:
return f"{SKIPPED_TESTS_COMMENT_MARKER}No additional skipped tests found in this branch for commit {commit_sha}."

text = (
f"{SKIPPED_TESTS_COMMENT_MARKER}The list below shows some tests that ran in main but were skipped in the "
f"CI build of {commit_sha}:\n"
f"```\n"
)
for skip in skipped_list:
text += skip + "\n"
text += (
f"```\nA detailed report of ran tests is [here](https://{jenkins_prefix}/job/tvm/job/PR-{str(pr_number)}"
f"/{str(build_number)}/testReport/)."
)
return text


def get_pr_comments(github, url):
try:
return github.get(url)
except error.HTTPError as e:
logging.exception(f"Failed to retrieve PR comments: {url}: {e}")
return []


def search_for_docs_comment(comments):
for comment in comments:
if (
comment["user"]["login"] == GITHUB_ACTIONS_BOT_LOGIN
and SKIPPED_TESTS_COMMENT_MARKER in comment["body"]
):
return comment
return None


if __name__ == "__main__":
help = (
"Compares the skipped tests of this PR against the last successful build on main. Also comments on the PR "
"issue when tests are skipped in this PR and not on main."
)
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--remote", default="origin", help="ssh remote to parse")
parser.add_argument("--s3-prefix", default="tvm-jenkins-artifacts-prod")
parser.add_argument("--jenkins-prefix", default="ci.tlcpack.ai")
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="run but don't send any request to GitHub",
)
args = parser.parse_args()
init_log()

remote = git(["config", "--get", f"remote.{args.remote}.url"])
user, repo = parse_remote(remote)

target_url = os.environ["TARGET_URL"]
# pr_and_build = get_pr_and_build_numbers(target_url)
pr_and_build = {"pr_number": 2, "build_number": 1}

# commit_sha = os.environ["COMMIT_SHA"]
commit_sha = "commit_sha"

if not args.dry_run:
retrieve_test_reports(
# pr_number=pr_and_build["pr_number"],
pr_number=12436,
# build_number=pr_and_build["build_number"],
build_number=2,
s3_prefix=args.s3_prefix,
jenkins_prefix=args.jenkins_prefix,
)

main_tests = build_test_set(MAIN_TEST_REPORT_DIR)
build_tests = build_test_set(PR_TEST_REPORT_DIR)

skipped_list = []
for subdir, skipped_set in build_tests.items():
skipped_main = main_tests[subdir]
if skipped_main is None:
logging.warning(f"Could not find directory {subdir} in main.")
continue

diff_set = skipped_set - skipped_main
if len(diff_set) != 0:
for test in diff_set:
skipped_list.append(f"{to_node_name(subdir)} -> {test}")

if len(skipped_list) == 0:
logging.info("No skipped tests found.")

body = build_comment(
skipped_list,
pr_and_build["pr_number"],
pr_and_build["build_number"],
commit_sha,
args.jenkins_prefix,
)
url = f'issues/{pr_and_build["pr_number"]}/comments'
if not args.dry_run:
github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)

# For now, only comment for PRs open by driazati, gigiblender and areusch.
get_pr_url = f'pulls/{pr_and_build["pr_number"]}'
pull_request_body = github.get(get_pr_url)
author = pull_request_body["user"]["login"]
if author not in ["driazati", "gigiblender", "areusch"]:
logging.info(f"Skipping this action for user {author}")
sys.exit(0)

pr_comments = get_pr_comments(github, url)
comment = search_for_docs_comment(pr_comments)

if comment is not None:
comment_url = comment["url"]
comment_id = comment_url[comment_url.find("comments/"): len(comment_url)].strip("comments/")
github.patch(f'issues/comments/{comment_id}', {"body": body})
else:
github.post(url, {"body": body})
else:
logging.info(f"Dry run, would have posted {url} with data {body}.")