[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/libc/glibc/glibc-2.22/benchtests/scripts/bench.py b/ap/libc/glibc/glibc-2.22/benchtests/scripts/bench.py
new file mode 100755
index 0000000..ee856db
--- /dev/null
+++ b/ap/libc/glibc/glibc-2.22/benchtests/scripts/bench.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+
+"""Benchmark program generator script
+
+This script takes a function name as input and generates a program using
+an input file located in the benchtests directory.  The name of the
+input file should be of the form foo-inputs where 'foo' is the name of
+the function.
+"""
+
+from __future__ import print_function
+import sys
+import os
+import itertools
+
+# Macro definitions for functions that take no arguments.  For functions
+# that take arguments, the STRUCT_TEMPLATE, ARGS_TEMPLATE and
+# VARIANTS_TEMPLATE are used instead.
+DEFINES_TEMPLATE = '''
+#define CALL_BENCH_FUNC(v, i) %(func)s ()
+#define NUM_VARIANTS (1)
+#define NUM_SAMPLES(v) (1)
+#define VARIANT(v) FUNCNAME "()"
+'''
+
+# Structures to store arguments for the function call.  A function may
+# have its inputs partitioned to represent distinct performance
+# characteristics or distinct flavors of the function.  Each such
+# variant is represented by the _VARIANT structure.  The ARGS structure
+# represents a single set of arguments.
+STRUCT_TEMPLATE = '''
+#define CALL_BENCH_FUNC(v, i) %(func)s (%(func_args)s)
+
+struct args
+{
+%(args)s
+  double timing;
+};
+
+struct _variants
+{
+  const char *name;
+  int count;
+  struct args *in;
+};
+'''
+
+# The actual input arguments.
+ARGS_TEMPLATE = '''
+struct args in%(argnum)d[%(num_args)d] = {
+%(args)s
+};
+'''
+
+# The actual variants, along with macros defined to access the variants.
+VARIANTS_TEMPLATE = '''
+struct _variants variants[%(num_variants)d] = {
+%(variants)s
+};
+
+#define NUM_VARIANTS %(num_variants)d
+#define NUM_SAMPLES(i) (variants[i].count)
+#define VARIANT(i) (variants[i].name)
+'''
+
+# Epilogue for the generated source file.
+EPILOGUE = '''
+#define RESULT(__v, __i) (variants[(__v)].in[(__i)].timing)
+#define RESULT_ACCUM(r, v, i, old, new) \\
+        ((RESULT ((v), (i))) = (RESULT ((v), (i)) * (old) + (r)) / ((new) + 1))
+#define BENCH_FUNC(i, j) ({%(getret)s CALL_BENCH_FUNC (i, j);})
+#define FUNCNAME "%(func)s"
+#include "bench-skeleton.c"'''
+
+
+def gen_source(func, directives, all_vals):
+    """Generate source for the function
+
+    Generate the C source for the function from the values and
+    directives.
+
+    Args:
+      func: The function name
+      directives: A dictionary of directives applicable to this function
+      all_vals: A dictionary input values
+    """
+    # The includes go in first.
+    for header in directives['includes']:
+        print('#include <%s>' % header)
+
+    for header in directives['include-sources']:
+        print('#include "%s"' % header)
+
+    # Print macros.  This branches out to a separate routine if
+    # the function takes arguments.
+    if not directives['args']:
+        print(DEFINES_TEMPLATE % {'func': func})
+        outargs = []
+    else:
+        outargs = _print_arg_data(func, directives, all_vals)
+
+    # Print the output variable definitions if necessary.
+    for out in outargs:
+        print(out)
+
+    # If we have a return value from the function, make sure it is
+    # assigned to prevent the compiler from optimizing out the
+    # call.
+    if directives['ret']:
+        print('static %s volatile ret;' % directives['ret'])
+        getret = 'ret = '
+    else:
+        getret = ''
+
+    # Test initialization.
+    if directives['init']:
+        print('#define BENCH_INIT %s' % directives['init'])
+
+    print(EPILOGUE % {'getret': getret, 'func': func})
+
+
+def _print_arg_data(func, directives, all_vals):
+    """Print argument data
+
+    This is a helper function for gen_source that prints structure and
+    values for arguments and their variants and returns output arguments
+    if any are found.
+
+    Args:
+      func: Function name
+      directives: A dictionary of directives applicable to this function
+      all_vals: A dictionary input values
+
+    Returns:
+      Returns a list of definitions for function arguments that act as
+      output parameters.
+    """
+    # First, all of the definitions.  We process writing of
+    # CALL_BENCH_FUNC, struct args and also the output arguments
+    # together in a single traversal of the arguments list.
+    func_args = []
+    arg_struct = []
+    outargs = []
+
+    for arg, i in zip(directives['args'], itertools.count()):
+        if arg[0] == '<' and arg[-1] == '>':
+            pos = arg.rfind('*')
+            if pos == -1:
+                die('Output argument must be a pointer type')
+
+            outargs.append('static %s out%d;' % (arg[1:pos], i))
+            func_args.append(' &out%d' % i)
+        else:
+            arg_struct.append('  %s volatile arg%d;' % (arg, i))
+            func_args.append('variants[v].in[i].arg%d' % i)
+
+    print(STRUCT_TEMPLATE % {'args' : '\n'.join(arg_struct), 'func': func,
+                             'func_args': ', '.join(func_args)})
+
+    # Now print the values.
+    variants = []
+    for (k, vals), i in zip(all_vals.items(), itertools.count()):
+        out = ['  {%s, 0},' % v for v in vals]
+
+        # Members for the variants structure list that we will
+        # print later.
+        variants.append('  {"%s", %d, in%d},' % (k, len(vals), i))
+        print(ARGS_TEMPLATE % {'argnum': i, 'num_args': len(vals),
+                               'args': '\n'.join(out)})
+
+    # Print the variants and the last set of macros.
+    print(VARIANTS_TEMPLATE % {'num_variants': len(all_vals),
+                               'variants': '\n'.join(variants)})
+    return outargs
+
+
+def _process_directive(d_name, d_val):
+    """Process a directive.
+
+    Evaluate the directive name and value passed and return the
+    processed value. This is a helper function for parse_file.
+
+    Args:
+      d_name: Name of the directive
+      d_val: The string value to process
+
+    Returns:
+      The processed value, which may be the string as it is or an object
+      that describes the directive.
+    """
+    # Process the directive values if necessary.  name and ret don't
+    # need any processing.
+    if d_name.startswith('include'):
+        d_val = d_val.split(',')
+    elif d_name == 'args':
+        d_val = d_val.split(':')
+
+    # Return the values.
+    return d_val
+
+
+def parse_file(func):
+    """Parse an input file
+
+    Given a function name, open and parse an input file for the function
+    and get the necessary parameters for the generated code and the list
+    of inputs.
+
+    Args:
+      func: The function name
+
+    Returns:
+      A tuple of two elements, one a dictionary of directives and the
+      other a dictionary of all input values.
+    """
+    all_vals = {}
+    # Valid directives.
+    directives = {
+            'name': '',
+            'args': [],
+            'includes': [],
+            'include-sources': [],
+            'ret': '',
+            'init': ''
+    }
+
+    try:
+        with open('%s-inputs' % func) as f:
+            for line in f:
+                # Look for directives and parse it if found.
+                if line.startswith('##'):
+                    try:
+                        d_name, d_val = line[2:].split(':', 1)
+                        d_name = d_name.strip()
+                        d_val = d_val.strip()
+                        directives[d_name] = _process_directive(d_name, d_val)
+                    except (IndexError, KeyError):
+                        die('Invalid directive: %s' % line[2:])
+
+                # Skip blank lines and comments.
+                line = line.split('#', 1)[0].rstrip()
+                if not line:
+                    continue
+
+                # Otherwise, we're an input.  Add to the appropriate
+                # input set.
+                cur_name = directives['name']
+                all_vals.setdefault(cur_name, [])
+                all_vals[cur_name].append(line)
+    except IOError as ex:
+        die("Failed to open input file (%s): %s" % (ex.filename, ex.strerror))
+
+    return directives, all_vals
+
+
+def die(msg):
+    """Exit with an error
+
+    Prints an error message to the standard error stream and exits with
+    a non-zero status.
+
+    Args:
+      msg: The error message to print to standard error
+    """
+    print('%s\n' % msg, file=sys.stderr)
+    sys.exit(os.EX_DATAERR)
+
+
+def main(args):
+    """Main function
+
+    Use the first command line argument as function name and parse its
+    input file to generate C source that calls the function repeatedly
+    for the input.
+
+    Args:
+      args: The command line arguments with the program name dropped
+
+    Returns:
+      os.EX_USAGE on error and os.EX_OK on success.
+    """
+    if len(args) != 1:
+        print('Usage: %s <function>' % sys.argv[0])
+        return os.EX_USAGE
+
+    directives, all_vals = parse_file(args[0])
+    gen_source(args[0], directives, all_vals)
+    return os.EX_OK
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/ap/libc/glibc/glibc-2.22/benchtests/scripts/benchout.schema.json b/ap/libc/glibc/glibc-2.22/benchtests/scripts/benchout.schema.json
new file mode 100644
index 0000000..affb7c1
--- /dev/null
+++ b/ap/libc/glibc/glibc-2.22/benchtests/scripts/benchout.schema.json
@@ -0,0 +1,42 @@
+{
+  "title": "benchmark",
+  "type": "object",
+  "properties": {
+    "timing_type": {
+      "type": "string"
+    },
+    "functions": {
+      "title": "Associative array of functions",
+      "type": "object",
+      "patternProperties": {
+        "^[_a-zA-Z][_a-zA-Z0-9]+$": {
+          "title": "Function names",
+          "type": "object",
+          "patternProperties": {
+            "^[_a-zA-Z0-9]*$": {
+              "title": "Function variants",
+              "type": "object",
+              "properties": {
+                "duration": {"type": "number"},
+                "iterations": {"type": "number"},
+                "max": {"type": "number"},
+                "min": {"type": "number"},
+                "mean": {"type": "number"},
+                "timings": {
+                  "type": "array",
+                  "items": {"type": "number"}
+                }
+              },
+              "required": ["duration", "iterations", "max", "min", "mean"],
+              "additionalProperties": false
+            }
+          },
+          "additionalProperties": false
+        }
+      },
+      "minProperties": 1
+    }
+  },
+  "required": ["timing_type", "functions"],
+  "additionalProperties": false
+}
diff --git a/ap/libc/glibc/glibc-2.22/benchtests/scripts/compare_bench.py b/ap/libc/glibc/glibc-2.22/benchtests/scripts/compare_bench.py
new file mode 100755
index 0000000..be5b5ca
--- /dev/null
+++ b/ap/libc/glibc/glibc-2.22/benchtests/scripts/compare_bench.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# Copyright (C) 2015 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+"""Compare two benchmark results
+
+Given two benchmark result files and a threshold, this script compares the
+benchmark results and flags differences in performance beyond a given
+threshold.
+"""
+import sys
+import os
+import pylab
+import import_bench as bench
+
+def do_compare(func, var, tl1, tl2, par, threshold):
+    """Compare one of the aggregate measurements
+
+    Helper function to compare one of the aggregate measurements of a function
+    variant.
+
+    Args:
+        func: Function name
+        var: Function variant name
+        tl1: The first timings list
+        tl2: The second timings list
+        par: The aggregate to measure
+        threshold: The threshold for differences, beyond which the script should
+        print a warning.
+    """
+    d = abs(tl2[par] - tl1[par]) * 100 / tl1[str(par)]
+    if d > threshold:
+        if tl1[par] > tl2[par]:
+            ind = '+++'
+        else:
+            ind = '---'
+        print('%s %s(%s)[%s]: (%.2lf%%) from %g to %g' %
+                (ind, func, var, par, d, tl1[par], tl2[par]))
+
+
+def compare_runs(pts1, pts2, threshold):
+    """Compare two benchmark runs
+
+    Args:
+        pts1: Timing data from first machine
+        pts2: Timing data from second machine
+    """
+
+    # XXX We assume that the two benchmarks have identical functions and
+    # variants.  We cannot compare two benchmarks that may have different
+    # functions or variants.  Maybe that is something for the future.
+    for func in pts1['functions'].keys():
+        for var in pts1['functions'][func].keys():
+            tl1 = pts1['functions'][func][var]
+            tl2 = pts2['functions'][func][var]
+
+            # Compare the consolidated numbers
+            # do_compare(func, var, tl1, tl2, 'max', threshold)
+            do_compare(func, var, tl1, tl2, 'min', threshold)
+            do_compare(func, var, tl1, tl2, 'mean', threshold)
+
+            # Skip over to the next variant or function if there is no detailed
+            # timing info for the function variant.
+            if 'timings' not in pts1['functions'][func][var].keys() or \
+                'timings' not in pts2['functions'][func][var].keys():
+                    return
+
+            # If two lists do not have the same length then it is likely that
+            # the performance characteristics of the function have changed.
+            # XXX: It is also likely that there was some measurement that
+            # strayed outside the usual range.  Such ouiers should not
+            # happen on an idle machine with identical hardware and
+            # configuration, but ideal environments are hard to come by.
+            if len(tl1['timings']) != len(tl2['timings']):
+                print('* %s(%s): Timing characteristics changed' %
+                        (func, var))
+                print('\tBefore: [%s]' %
+                        ', '.join([str(x) for x in tl1['timings']]))
+                print('\tAfter: [%s]' %
+                        ', '.join([str(x) for x in tl2['timings']]))
+                continue
+
+            # Collect numbers whose differences cross the threshold we have
+            # set.
+            issues = [(x, y) for x, y in zip(tl1['timings'], tl2['timings']) \
+                        if abs(y - x) * 100 / x > threshold]
+
+            # Now print them.
+            for t1, t2 in issues:
+                d = abs(t2 - t1) * 100 / t1
+                if t2 > t1:
+                    ind = '-'
+                else:
+                    ind = '+'
+
+                print("%s %s(%s): (%.2lf%%) from %g to %g" %
+                        (ind, func, var, d, t1, t2))
+
+
+def plot_graphs(bench1, bench2):
+    """Plot graphs for functions
+
+    Make scatter plots for the functions and their variants.
+
+    Args:
+        bench1: Set of points from the first machine
+        bench2: Set of points from the second machine.
+    """
+    for func in bench1['functions'].keys():
+        for var in bench1['functions'][func].keys():
+            # No point trying to print a graph if there are no detailed
+            # timings.
+            if u'timings' not in bench1['functions'][func][var].keys():
+                print('Skipping graph for %s(%s)' % (func, var))
+                continue
+
+            pylab.clf()
+            pylab.ylabel('Time (cycles)')
+
+            # First set of points
+            length = len(bench1['functions'][func][var]['timings'])
+            X = [float(x) for x in range(length)]
+            lines = pylab.scatter(X, bench1['functions'][func][var]['timings'],
+                    1.5 + 100 / length)
+            pylab.setp(lines, 'color', 'r')
+
+            # Second set of points
+            length = len(bench2['functions'][func][var]['timings'])
+            X = [float(x) for x in range(length)]
+            lines = pylab.scatter(X, bench2['functions'][func][var]['timings'],
+                    1.5 + 100 / length)
+            pylab.setp(lines, 'color', 'g')
+
+            if var:
+                filename = "%s-%s.png" % (func, var)
+            else:
+                filename = "%s.png" % func
+            print('Writing out %s' % filename)
+            pylab.savefig(filename)
+
+
+def main(args):
+    """Program Entry Point
+
+    Take two benchmark output files and compare their timings.
+    """
+    if len(args) > 4 or len(args) < 3:
+        print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
+        sys.exit(os.EX_USAGE)
+
+    bench1 = bench.parse_bench(args[1], args[0])
+    bench2 = bench.parse_bench(args[2], args[0])
+    if len(args) == 4:
+        threshold = float(args[3])
+    else:
+        threshold = 10.0
+
+    if (bench1['timing_type'] != bench2['timing_type']):
+        print('Cannot compare benchmark outputs: timing types are different')
+        return
+
+    plot_graphs(bench1, bench2)
+
+    bench.compress_timings(bench1)
+    bench.compress_timings(bench2)
+
+    compare_runs(bench1, bench2, threshold)
+
+
+if __name__ == '__main__':
+    main(sys.argv[1:])
diff --git a/ap/libc/glibc/glibc-2.22/benchtests/scripts/import_bench.py b/ap/libc/glibc/glibc-2.22/benchtests/scripts/import_bench.py
new file mode 100644
index 0000000..d37ff62
--- /dev/null
+++ b/ap/libc/glibc/glibc-2.22/benchtests/scripts/import_bench.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# Copyright (C) 2015 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+"""Functions to import benchmark data and process it"""
+
+import json
+try:
+    import jsonschema as validator
+except ImportError:
+    print('Could not find jsonschema module.')
+    raise
+
+
+def mean(lst):
+    """Compute and return mean of numbers in a list
+
+    The numpy average function has horrible performance, so implement our
+    own mean function.
+
+    Args:
+        lst: The list of numbers to average.
+    Return:
+        The mean of members in the list.
+    """
+    return sum(lst) / len(lst)
+
+
+def split_list(bench, func, var):
+    """ Split the list into a smaller set of more distinct points
+
+    Group together points such that the difference between the smallest
+    point and the mean is less than 1/3rd of the mean.  This means that
+    the mean is at most 1.5x the smallest member of that group.
+
+    mean - xmin < mean / 3
+    i.e. 2 * mean / 3 < xmin
+    i.e. mean < 3 * xmin / 2
+
+    For an evenly distributed group, the largest member will be less than
+    twice the smallest member of the group.
+    Derivation:
+
+    An evenly distributed series would be xmin, xmin + d, xmin + 2d...
+
+    mean = (2 * n * xmin + n * (n - 1) * d) / 2 * n
+    and max element is xmin + (n - 1) * d
+
+    Now, mean < 3 * xmin / 2
+
+    3 * xmin > 2 * mean
+    3 * xmin > (2 * n * xmin + n * (n - 1) * d) / n
+    3 * n * xmin > 2 * n * xmin + n * (n - 1) * d
+    n * xmin > n * (n - 1) * d
+    xmin > (n - 1) * d
+    2 * xmin > xmin + (n-1) * d
+    2 * xmin > xmax
+
+    Hence, proved.
+
+    Similarly, it is trivial to prove that for a similar aggregation by using
+    the maximum element, the maximum element in the group must be at most 4/3
+    times the mean.
+
+    Args:
+        bench: The benchmark object
+        func: The function name
+        var: The function variant name
+    """
+    means = []
+    lst = bench['functions'][func][var]['timings']
+    last = len(lst) - 1
+    while lst:
+        for i in range(last + 1):
+            avg = mean(lst[i:])
+            if avg > 0.75 * lst[last]:
+                means.insert(0, avg)
+                lst = lst[:i]
+                last = i - 1
+                break
+    bench['functions'][func][var]['timings'] = means
+
+
+def do_for_all_timings(bench, callback):
+    """Call a function for all timing objects for each function and its
+    variants.
+
+    Args:
+        bench: The benchmark object
+        callback: The callback function
+    """
+    for func in bench['functions'].keys():
+        for k in bench['functions'][func].keys():
+            if 'timings' not in bench['functions'][func][k].keys():
+                continue
+
+            callback(bench, func, k)
+
+
+def compress_timings(points):
+    """Club points with close enough values into a single mean value
+
+    See split_list for details on how the clubbing is done.
+
+    Args:
+        points: The set of points.
+    """
+    do_for_all_timings(points, split_list)
+
+
+def parse_bench(filename, schema_filename):
+    """Parse the input file
+
+    Parse and validate the json file containing the benchmark outputs.  Return
+    the resulting object.
+    Args:
+        filename: Name of the benchmark output file.
+    Return:
+        The bench dictionary.
+    """
+    with open(schema_filename, 'r') as schemafile:
+        schema = json.load(schemafile)
+        with open(filename, 'r') as benchfile:
+            bench = json.load(benchfile)
+            validator.validate(bench, schema)
+            do_for_all_timings(bench, lambda b, f, v:
+                    b['functions'][f][v]['timings'].sort())
+            return bench
diff --git a/ap/libc/glibc/glibc-2.22/benchtests/scripts/validate_benchout.py b/ap/libc/glibc/glibc-2.22/benchtests/scripts/validate_benchout.py
new file mode 100755
index 0000000..28fd023
--- /dev/null
+++ b/ap/libc/glibc/glibc-2.22/benchtests/scripts/validate_benchout.py
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+# Copyright (C) 2014-2015 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+"""Benchmark output validator
+
+Given a benchmark output file in json format and a benchmark schema file,
+validate the output against the schema.
+"""
+
+from __future__ import print_function
+import json
+import sys
+import os
+
+try:
+    import import_bench as bench
+except ImportError:
+    print('Import Error: Output will not be validated.')
+    # Return success because we don't want the bench target to fail just
+    # because the jsonschema module was not found.
+    sys.exit(os.EX_OK)
+
+
+def print_and_exit(message, exitcode):
+    """Prints message to stderr and returns the exit code.
+
+    Args:
+        message: The message to print
+        exitcode: The exit code to return
+
+    Returns:
+        The passed exit code
+    """
+    print(message, file=sys.stderr)
+    return exitcode
+
+
+def main(args):
+    """Main entry point
+
+    Args:
+        args: The command line arguments to the program
+
+    Returns:
+        0 on success or a non-zero failure code
+
+    Exceptions:
+        Exceptions thrown by validate_bench
+    """
+    if len(args) != 2:
+        return print_and_exit("Usage: %s <bench.out file> <bench.out schema>"
+                % sys.argv[0], os.EX_USAGE)
+
+    try:
+        bench.parse_bench(args[0], args[1])
+    except IOError as e:
+        return print_and_exit("IOError(%d): %s" % (e.errno, e.strerror),
+                os.EX_OSFILE)
+
+    except bench.validator.ValidationError as e:
+        return print_and_exit("Invalid benchmark output: %s" % e.message,
+            os.EX_DATAERR)
+
+    except bench.validator.SchemaError as e:
+        return print_and_exit("Invalid schema: %s" % e.message, os.EX_DATAERR)
+
+    print("Benchmark output in %s is valid." % args[0])
+    return os.EX_OK
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))