forked from OSchip/llvm-project
87 lines
2.6 KiB
Plaintext
87 lines
2.6 KiB
Plaintext
#!@Python3_EXECUTABLE@
|
|
# -*- coding: utf-8 -*-
|
|
|
|
import argparse
|
|
import datetime
|
|
import json
|
|
import os
|
|
import sys
|
|
|
|
from urllib import error as urlerror
|
|
from urllib import parse as urlparse
|
|
from urllib import request
|
|
|
|
|
|
mlir_source_root = "@MLIR_SOURCE_DIR@"
|
|
sys.path.insert(0, os.path.join(mlir_source_root, "utils", "mbr", "mbr"))
|
|
|
|
from main import main
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument(
|
|
"--machine",
|
|
required=True,
|
|
help="A platform identifier on which the "
|
|
"benchmarks are run. For example"
|
|
" <hardware>-<arch>-<optimization level>-<branch-name>"
|
|
)
|
|
parser.add_argument(
|
|
"--revision",
|
|
required=True,
|
|
help="The key used to identify different runs. "
|
|
"Could be anything as long as it"
|
|
" can be sorted by python's sort function"
|
|
)
|
|
parser.add_argument(
|
|
"--url",
|
|
help="The lnt server url to send the results to",
|
|
default="http://localhost:8000/db_default/v4/nts/submitRun"
|
|
)
|
|
parser.add_argument(
|
|
"--result-stdout",
|
|
help="Print benchmarking results to stdout instead"
|
|
" of sending it to lnt",
|
|
default=False,
|
|
action=argparse.BooleanOptionalAction
|
|
)
|
|
parser.add_argument(
|
|
"top_level_path",
|
|
help="The top level path from which to search for benchmarks",
|
|
default=os.getcwd(),
|
|
)
|
|
parser.add_argument(
|
|
"--stop_on_error",
|
|
help="Should we stop the benchmark run on errors? Defaults to false",
|
|
default=False,
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
complete_benchmark_start_time = datetime.datetime.utcnow().isoformat()
|
|
benchmark_function_dicts = main(args.top_level_path, args.stop_on_error)
|
|
complete_benchmark_end_time = datetime.datetime.utcnow().isoformat()
|
|
lnt_dict = {
|
|
"format_version": "2",
|
|
"machine": {"name": args.machine},
|
|
"run": {
|
|
"end_time": complete_benchmark_start_time,
|
|
"start_time": complete_benchmark_end_time,
|
|
"llvm_project_revision": args.revision
|
|
},
|
|
"tests": benchmark_function_dicts,
|
|
"name": "MLIR benchmark suite"
|
|
}
|
|
lnt_json = json.dumps(lnt_dict, indent=4)
|
|
if args.result_stdout is True:
|
|
print(lnt_json)
|
|
else:
|
|
request_data = urlparse.urlencode(
|
|
{"input_data": lnt_json}
|
|
).encode("ascii")
|
|
req = request.Request(args.url, request_data)
|
|
try:
|
|
resp = request.urlopen(req)
|
|
except urlerror.HTTPError as e:
|
|
print(e)
|