aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/attr.py
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2012-10-31 15:52:47 +0100
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-10-31 16:20:58 -0200
commit52502bf201a85b5b51a384037a002d0b39093df0 (patch)
treec4d2bcbc4c1f7c10693b132ad70951c06c2cee86 /tools/perf/tests/attr.py
parentperf tests: Move test objects into 'tests' directory (diff)
downloadlinux-dev-52502bf201a85b5b51a384037a002d0b39093df0.tar.xz
linux-dev-52502bf201a85b5b51a384037a002d0b39093df0.zip
perf tests: Add framework for automated perf_event_attr tests
The idea is run perf session with kidnapping sys_perf_event_open function. For each sys_perf_event_open call we store the perf_event_attr data to the file to be checked later against what we expect. You can run this by: $ python ./tests/attr.py -d ./tests/attr/ -p ./perf -v v2 changes: - preserve errno value in the hook Signed-off-by: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20121031145247.GB1027@krava.brq.redhat.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/tests/attr.py')
-rw-r--r--tools/perf/tests/attr.py313
1 files changed, 313 insertions, 0 deletions
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
new file mode 100644
index 000000000000..e98c726ac811
--- /dev/null
+++ b/tools/perf/tests/attr.py
@@ -0,0 +1,313 @@
+#! /usr/bin/python
+
+import os
+import sys
+import glob
+import optparse
+import tempfile
+import logging
+import shutil
+import ConfigParser
+
+class Fail(Exception):
+ def __init__(self, test, msg):
+ self.msg = msg
+ self.test = test
+ def getMsg(self):
+ return '\'%s\' - %s' % (self.test.path, self.msg)
+
+class Unsup(Exception):
+ def __init__(self, test):
+ self.test = test
+ def getMsg(self):
+ return '\'%s\'' % self.test.path
+
+class Event(dict):
+ terms = [
+ 'flags',
+ 'type',
+ 'size',
+ 'config',
+ 'sample_period',
+ 'sample_type',
+ 'read_format',
+ 'disabled',
+ 'inherit',
+ 'pinned',
+ 'exclusive',
+ 'exclude_user',
+ 'exclude_kernel',
+ 'exclude_hv',
+ 'exclude_idle',
+ 'mmap',
+ 'comm',
+ 'freq',
+ 'inherit_stat',
+ 'enable_on_exec',
+ 'task',
+ 'watermask',
+ 'precise_ip',
+ 'mmap_data',
+ 'sample_id_all',
+ 'exclude_host',
+ 'exclude_guest',
+ 'exclude_callchain_kernel',
+ 'exclude_callchain_user',
+ 'wakeup_events',
+ 'bp_type',
+ 'config1',
+ 'config2',
+ 'branch_sample_type',
+ 'sample_regs_user',
+ 'sample_stack_user',
+ ]
+
+ def add(self, data):
+ for key, val in data:
+ log.debug(" %s = %s" % (key, val))
+ self[key] = val
+
+ def __init__(self, name, data, base):
+ log.info(" Event %s" % name);
+ self.name = name;
+ self.group = ''
+ self.add(base)
+ self.add(data)
+
+ def compare_data(self, a, b):
+ a_list = a.split('|')
+ b_list = b.split('|')
+
+ for a_item in a_list:
+ for b_item in b_list:
+ if (a_item == b_item):
+ return True
+ elif (a_item == '*') or (b_item == '*'):
+ return True
+
+ return False
+
+ def equal(self, other):
+ for t in Event.terms:
+ log.debug(" [%s] %s %s" % (t, self[t], other[t]));
+ if not self.has_key(t) or not other.has_key(t):
+ return False
+ if not self.compare_data(self[t], other[t]):
+ return False
+ return True
+
+ def is_optional(self):
+ if self['optional'] == '1':
+ return True
+ else:
+ return False
+
+class Test(object):
+ def __init__(self, path, options):
+ parser = ConfigParser.SafeConfigParser()
+ parser.read(path)
+
+ log.warning("running '%s'" % path)
+
+ self.path = path
+ self.test_dir = options.test_dir
+ self.perf = options.perf
+ self.command = parser.get('config', 'command')
+ self.args = parser.get('config', 'args')
+
+ try:
+ self.ret = parser.get('config', 'ret')
+ except:
+ self.ret = 0
+
+ self.expect = {}
+ self.result = {}
+ log.info(" loading expected events");
+ self.load_events(path, self.expect)
+
+ def is_event(self, name):
+ if name.find("event") == -1:
+ return False
+ else:
+ return True
+
+ def load_events(self, path, events):
+ parser_event = ConfigParser.SafeConfigParser()
+ parser_event.read(path)
+
+ for section in filter(self.is_event, parser_event.sections()):
+
+ parser_items = parser_event.items(section);
+ base_items = {}
+
+ if (':' in section):
+ base = section[section.index(':') + 1:]
+ parser_base = ConfigParser.SafeConfigParser()
+ parser_base.read(self.test_dir + '/' + base)
+ base_items = parser_base.items('event')
+
+ e = Event(section, parser_items, base_items)
+ events[section] = e
+
+ def run_cmd(self, tempdir):
+ cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
+ self.perf, self.command, tempdir, self.args)
+ ret = os.WEXITSTATUS(os.system(cmd))
+
+ log.info(" running '%s' ret %d " % (cmd, ret))
+
+ if ret != int(self.ret):
+ raise Unsup(self)
+
+ def compare(self, expect, result):
+ match = {}
+
+ log.info(" compare");
+
+ # For each expected event find all matching
+ # events in result. Fail if there's not any.
+ for exp_name, exp_event in expect.items():
+ exp_list = []
+ log.debug(" matching [%s]" % exp_name)
+ for res_name, res_event in result.items():
+ log.debug(" to [%s]" % res_name)
+ if (exp_event.equal(res_event)):
+ exp_list.append(res_name)
+ log.debug(" ->OK")
+ else:
+ log.debug(" ->FAIL");
+
+ log.info(" match: [%s] optional(%d) matches %s" %
+ (exp_name, exp_event.is_optional(), str(exp_list)))
+
+ # we did not any matching event - fail
+ if (not exp_list) and (not exp_event.is_optional()):
+ raise Fail(self, 'match failure');
+
+ match[exp_name] = exp_list
+
+ # For each defined group in the expected events
+ # check we match the same group in the result.
+ for exp_name, exp_event in expect.items():
+ group = exp_event.group
+
+ if (group == ''):
+ continue
+
+ # XXX group matching does not account for
+ # optional events as above matching does
+ for res_name in match[exp_name]:
+ res_group = result[res_name].group
+ if res_group not in match[group]:
+ raise Fail(self, 'group failure')
+
+ log.info(" group: [%s] matches group leader %s" %
+ (exp_name, str(match[group])))
+
+ log.info(" matched")
+
+ def resolve_groups(self, events):
+ for name, event in events.items():
+ group_fd = event['group_fd'];
+ if group_fd == '-1':
+ continue;
+
+ for iname, ievent in events.items():
+ if (ievent['fd'] == group_fd):
+ event.group = iname
+ log.debug('[%s] has group leader [%s]' % (name, iname))
+ break;
+
+ def run(self):
+ tempdir = tempfile.mkdtemp();
+
+ # run the test script
+ self.run_cmd(tempdir);
+
+ # load events expectation for the test
+ log.info(" loading result events");
+ for f in glob.glob(tempdir + '/event*'):
+ self.load_events(f, self.result);
+
+ # resolve group_fd to event names
+ self.resolve_groups(self.expect);
+ self.resolve_groups(self.result);
+
+ # do the expectation - results matching - both ways
+ self.compare(self.expect, self.result)
+ self.compare(self.result, self.expect)
+
+ # cleanup
+ shutil.rmtree(tempdir)
+
+
+def run_tests(options):
+ for f in glob.glob(options.test_dir + '/' + options.test):
+ try:
+ Test(f, options).run()
+ except Unsup, obj:
+ log.warning("unsupp %s" % obj.getMsg())
+
+def setup_log(verbose):
+ global log
+ level = logging.CRITICAL
+
+ if verbose == 1:
+ level = logging.WARNING
+ if verbose == 2:
+ level = logging.INFO
+ if verbose >= 3:
+ level = logging.DEBUG
+
+ log = logging.getLogger('test')
+ log.setLevel(level)
+ ch = logging.StreamHandler()
+ ch.setLevel(level)
+ formatter = logging.Formatter('%(message)s')
+ ch.setFormatter(formatter)
+ log.addHandler(ch)
+
+USAGE = '''%s [OPTIONS]
+ -d dir # tests dir
+ -p path # perf binary
+ -t test # single test
+ -v # verbose level
+''' % sys.argv[0]
+
+def main():
+ parser = optparse.OptionParser(usage=USAGE)
+
+ parser.add_option("-t", "--test",
+ action="store", type="string", dest="test")
+ parser.add_option("-d", "--test-dir",
+ action="store", type="string", dest="test_dir")
+ parser.add_option("-p", "--perf",
+ action="store", type="string", dest="perf")
+ parser.add_option("-v", "--verbose",
+ action="count", dest="verbose")
+
+ options, args = parser.parse_args()
+ if args:
+ parser.error('FAILED wrong arguments %s' % ' '.join(args))
+ return -1
+
+ setup_log(options.verbose)
+
+ if not options.test_dir:
+ print 'FAILED no -d option specified'
+ sys.exit(-1)
+
+ if not options.test:
+ options.test = 'test*'
+
+ try:
+ run_tests(options)
+
+ except Fail, obj:
+ print "FAILED %s" % obj.getMsg();
+ sys.exit(-1)
+
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()