generic-poky/scripts/oe-build-perf-test

327 lines
12 KiB
Python
Executable File

#!/usr/bin/python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Build performance test script"""
import argparse
import errno
import fcntl
import json
import logging
import os
import re
import shutil
import sys
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
import oeqa.buildperf
from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
BuildPerfTestRunner, KernelDropCaches)
from oeqa.utils.commands import runCmd
from oeqa.utils.git import GitRepo, GitError
from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
# Set-up logging
LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger()
def acquire_lock(lock_f):
"""Acquire flock on file"""
log.debug("Acquiring lock %s", os.path.abspath(lock_f.name))
try:
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as err:
if err.errno == errno.EAGAIN:
return False
raise
log.debug("Lock acquired")
return True
def pre_run_sanity_check():
"""Sanity check of build environment"""
build_dir = os.environ.get("BUILDDIR")
if not build_dir:
log.error("BUILDDIR not set. Please run the build environmnent setup "
"script.")
return False
if os.getcwd() != build_dir:
log.error("Please run this script under BUILDDIR (%s)", build_dir)
return False
ret = runCmd('which bitbake', ignore_status=True)
if ret.status:
log.error("bitbake command not found")
return False
return True
def init_git_repo(path):
"""Check/create Git repository where to store results"""
path = os.path.abspath(path)
if os.path.isfile(path):
log.error("Invalid Git repo %s: path exists but is not a directory", path)
return False
if not os.path.isdir(path):
try:
os.mkdir(path)
except (FileNotFoundError, PermissionError) as err:
log.error("Failed to mkdir %s: %s", path, err)
return False
if not os.listdir(path):
log.info("Initializing a new Git repo at %s", path)
GitRepo.init(path)
try:
GitRepo(path, is_topdir=True)
except GitError:
log.error("No Git repository but a non-empty directory found at %s.\n"
"Please specify a Git repository, an empty directory or "
"a non-existing directory", path)
return False
return True
def setup_file_logging(log_file):
"""Setup loggin to file"""
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
formatter = logging.Formatter(LOG_FORMAT)
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
log.addHandler(handler)
def archive_build_conf(out_dir):
"""Archive build/conf to test results"""
src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
tgt_dir = os.path.join(out_dir, 'build', 'conf')
os.makedirs(os.path.dirname(tgt_dir))
shutil.copytree(src_dir, tgt_dir)
def git_commit_results(repo_dir, results_dir, branch, tag, metadata):
"""Commit results into a Git repository"""
repo = GitRepo(repo_dir, is_topdir=True)
distro_branch = metadata['layers']['meta']['branch']
distro_commit = metadata['layers']['meta']['commit']
distro_commit_count = metadata['layers']['meta']['commit_count']
# Replace keywords
branch = branch.format(git_branch=distro_branch,
tester_host=metadata['hostname'])
log.info("Committing test results into %s %s", repo_dir, branch)
tmp_index = os.path.join(repo_dir, '.git', 'index.oe-build-perf')
try:
# Create new commit object from the new results
env_update = {'GIT_INDEX_FILE': tmp_index,
'GIT_WORK_TREE': results_dir}
repo.run_cmd('add .', env_update)
tree = repo.run_cmd('write-tree', env_update)
parent = repo.rev_parse(branch)
msg = "Results of {}:{}\n".format(distro_branch, distro_commit)
git_cmd = ['commit-tree', tree, '-m', msg]
if parent:
git_cmd += ['-p', parent]
commit = repo.run_cmd(git_cmd, env_update)
# Update branch head
git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
if parent:
git_cmd.append(parent)
repo.run_cmd(git_cmd)
# Update current HEAD, if we're on branch 'branch'
if repo.get_current_branch() == branch:
log.info("Updating %s HEAD to latest commit", repo_dir)
repo.run_cmd('reset --hard')
# Create (annotated) tag
if tag:
# Find tags matching the pattern
tag_keywords = dict(git_branch=distro_branch,
git_commit=distro_commit,
git_commit_count=distro_commit_count,
tester_host=metadata['hostname'],
tag_num='[0-9]{1,5}')
tag_re = re.compile(tag.format(**tag_keywords) + '$')
tag_keywords['tag_num'] = 0
for existing_tag in repo.run_cmd('tag').splitlines():
if tag_re.match(existing_tag):
tag_keywords['tag_num'] += 1
tag = tag.format(**tag_keywords)
msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'],
distro_branch,
distro_commit)
repo.run_cmd(['tag', '-a', '-m', msg, tag, commit])
finally:
if os.path.exists(tmp_index):
os.unlink(tmp_index)
def update_globalres_file(result_obj, filename, metadata):
"""Write results to globalres csv file"""
# Map test names to time and size columns in globalres
# The tuples represent index and length of times and sizes
# respectively
gr_map = {'test1': ((0, 1), (8, 1)),
'test12': ((1, 1), (None, None)),
'test13': ((2, 1), (9, 1)),
'test2': ((3, 1), (None, None)),
'test3': ((4, 3), (None, None)),
'test4': ((7, 1), (10, 2))}
values = ['0'] * 12
for status, test, _ in result_obj.all_results():
if status in ['ERROR', 'SKIPPED']:
continue
(t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
if t_ind is not None:
values[t_ind:t_ind + t_len] = test.times
if s_ind is not None:
values[s_ind:s_ind + s_len] = test.sizes
log.debug("Writing globalres log to %s", filename)
rev_info = metadata['layers']['meta']
with open(filename, 'a') as fobj:
fobj.write('{},{}:{},{},'.format(metadata['hostname'],
rev_info['branch'],
rev_info['commit'],
rev_info['commit']))
fobj.write(','.join(values) + '\n')
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-D', '--debug', action='store_true',
help='Enable debug level logging')
parser.add_argument('--globalres-file',
type=os.path.abspath,
help="Append results to 'globalres' csv file")
parser.add_argument('--lock-file', default='./oe-build-perf.lock',
metavar='FILENAME', type=os.path.abspath,
help="Lock file to use")
parser.add_argument('-o', '--out-dir', default='results-{date}',
type=os.path.abspath,
help="Output directory for test results")
parser.add_argument('-x', '--xml', action='store_true',
help='Enable JUnit xml output')
parser.add_argument('--log-file',
default='{out_dir}/oe-build-perf-test.log',
help="Log file of this script")
parser.add_argument('--run-tests', nargs='+', metavar='TEST',
help="List of tests to run")
parser.add_argument('--commit-results', metavar='GIT_DIR',
type=os.path.abspath,
help="Commit result data to a (local) git repository")
parser.add_argument('--commit-results-branch', metavar='BRANCH',
default="{git_branch}",
help="Commit results to branch BRANCH.")
parser.add_argument('--commit-results-tag', metavar='TAG',
default="{git_branch}/{git_commit_count}-g{git_commit}/{tag_num}",
help="Tag results commit with TAG.")
return parser.parse_args(argv)
def main(argv=None):
"""Script entry point"""
args = parse_args(argv)
# Set-up log file
out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
setup_file_logging(args.log_file.format(out_dir=out_dir))
if args.debug:
log.setLevel(logging.DEBUG)
lock_f = open(args.lock_file, 'w')
if not acquire_lock(lock_f):
log.error("Another instance of this script is running, exiting...")
return 1
if not pre_run_sanity_check():
return 1
if args.commit_results:
if not init_git_repo(args.commit_results):
return 1
# Check our capability to drop caches and ask pass if needed
KernelDropCaches.check()
# Load build perf tests
loader = BuildPerfTestLoader()
if args.run_tests:
suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
else:
suite = loader.loadTestsFromModule(oeqa.buildperf)
# Save test metadata
metadata = metadata_from_bb()
log.info("Testing Git revision branch:commit %s:%s (%s)",
metadata['layers']['meta']['branch'],
metadata['layers']['meta']['commit'],
metadata['layers']['meta']['commit_count'])
if args.xml:
write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
else:
with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
json.dump(metadata, fobj, indent=2)
archive_build_conf(out_dir)
runner = BuildPerfTestRunner(out_dir, verbosity=2)
# Suppress logger output to stderr so that the output from unittest
# is not mixed with occasional logger output
log.handlers[0].setLevel(logging.CRITICAL)
# Run actual tests
result = runner.run(suite)
# Restore logger output to stderr
log.handlers[0].setLevel(log.level)
if args.xml:
result.write_results_xml()
else:
result.write_results_json()
if args.globalres_file:
update_globalres_file(result, args.globalres_file, metadata)
if args.commit_results:
git_commit_results(args.commit_results, out_dir,
args.commit_results_branch, args.commit_results_tag,
metadata)
if result.wasSuccessful():
return 0
return 2
if __name__ == '__main__':
sys.exit(main())