Skip to content

Commit

Permalink
Merge pull request #560 from swiftwasm/master
Browse files Browse the repository at this point in the history
[pull] swiftwasm from master
  • Loading branch information
pull[bot] authored Apr 4, 2020
2 parents 1f7bfed + b78ed53 commit 854c775
Show file tree
Hide file tree
Showing 146 changed files with 4,355 additions and 452 deletions.
3 changes: 3 additions & 0 deletions benchmark/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,9 @@ endif()
set(SWIFT_BENCHMARK_EXTRA_FLAGS "" CACHE STRING
"Extra options to pass to swiftc when building the benchmarks")

set(SWIFT_BENCHMARK_UNOPTIMIZED_DRIVER NO CACHE BOOL
"Build the benchmark driver utilites without optimization (default: no)")

if (SWIFT_BENCHMARK_BUILT_STANDALONE)
# This option's value must match the value of the same option used when
# building the swift runtime.
Expand Down
32 changes: 25 additions & 7 deletions benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ macro(configure_sdks)
endif()
endmacro()

function (add_swift_benchmark_library objfile_out sibfile_out)
function (add_swift_benchmark_library objfile_out sibfile_out swiftmodule_out)
cmake_parse_arguments(BENCHLIB "" "MODULE_PATH;SOURCE_DIR;OBJECT_DIR" "SOURCES;LIBRARY_FLAGS;DEPENDS" ${ARGN})

precondition(BENCHLIB_MODULE_PATH)
Expand Down Expand Up @@ -197,6 +197,7 @@ function (add_swift_benchmark_library objfile_out sibfile_out)
"-o" "${objfile}"
${sources})
set(${objfile_out} "${objfile}" PARENT_SCOPE)
set(${swiftmodule_out} "${swiftmodule}" PARENT_SCOPE)

if(SWIFT_BENCHMARK_EMIT_SIB)
precondition(sibfile_out)
Expand Down Expand Up @@ -378,10 +379,13 @@ function (swift_benchmark_compile_archopts)

set(common_swift4_options ${common_options} "-swift-version" "4")

# Always optimize the driver modules.
# Note that we compile the driver for Osize also with -Osize
# (and not with -O), because of <rdar://problem/19614516>.
string(REPLACE "Onone" "O" driver_opt "${optflag}")
# Always optimize the driver modules, unless we're building benchmarks for
# debugger testing.
if(NOT SWIFT_BENCHMARK_UNOPTIMIZED_DRIVER)
# Note that we compile the driver for Osize also with -Osize
# (and not with -O), because of <rdar://problem/19614516>.
string(REPLACE "Onone" "O" driver_opt "${optflag}")
endif()

set(common_options_driver
"-c"
Expand All @@ -401,19 +405,21 @@ function (swift_benchmark_compile_archopts)
endif()
set(bench_library_objects)
set(bench_library_sibfiles)
set(bench_library_swiftmodules)
set(opt_view_dirs)
# Build libraries used by the driver and benchmarks.
foreach(module_name_path ${BENCH_LIBRARY_MODULES})
set(sources "${srcdir}/${module_name_path}.swift")

add_swift_benchmark_library(objfile_out sibfile_out
add_swift_benchmark_library(objfile_out sibfile_out swiftmodule_out
MODULE_PATH "${module_name_path}"
SOURCE_DIR "${srcdir}"
OBJECT_DIR "${objdir}"
SOURCES ${sources}
LIBRARY_FLAGS ${common_swift4_options})
precondition(objfile_out)
list(APPEND bench_library_objects "${objfile_out}")
list(APPEND bench_library_swiftmodules "${swiftmodule_out}")
if (SWIFT_BENCHMARK_EMIT_SIB)
precondition(sibfile_out)
list(APPEND bench_library_sibfiles "${sibfile_out}")
Expand All @@ -433,7 +439,7 @@ function (swift_benchmark_compile_archopts)

set(objfile_out)
set(sibfile_out)
add_swift_benchmark_library(objfile_out sibfile_out
add_swift_benchmark_library(objfile_out sibfile_out swiftmodule_out
MODULE_PATH "${module_name_path}"
SOURCE_DIR "${srcdir}"
OBJECT_DIR "${objdir}"
Expand All @@ -442,6 +448,7 @@ function (swift_benchmark_compile_archopts)
DEPENDS ${bench_library_objects})
precondition(objfile_out)
list(APPEND bench_driver_objects "${objfile_out}")
list(APPEND bench_library_swiftmodules "${swiftmodule_out}")
if (SWIFT_BENCHMARK_EMIT_SIB)
precondition(sibfile_out)
list(APPEND bench_driver_sibfiles "${sibfile_out}")
Expand All @@ -464,6 +471,7 @@ function (swift_benchmark_compile_archopts)
set(swiftmodule "${objdir}/${module_name}.swiftmodule")
set(source "${srcdir}/${module_name_path}.swift")
list(APPEND SWIFT_BENCH_OBJFILES "${objfile}")
list(APPEND bench_library_swiftmodules "${swiftmodule}")

if ("${bench_flags}" MATCHES "-whole-module.*")
set(output_option "-o" "${objfile}")
Expand Down Expand Up @@ -622,6 +630,15 @@ function (swift_benchmark_compile_archopts)
else()
set(SWIFT_LINK_RPATH "${SWIFT_RPATH_BASE}/${BENCH_COMPILE_ARCHOPTS_PLATFORM}")
endif()

# On Darwin, we pass the *.swiftmodule paths transitively referenced by the
# driver executable to ld64. ld64 inserts N_AST references to these modules
# into the program, for later use by lldb.
set(ld64_add_ast_path_opts)
foreach(ast_path ${bench_library_swiftmodules})
list(APPEND ld64_add_ast_path_opts "-Wl,-add_ast_path,${ast_path}")
endforeach()

add_custom_command(
OUTPUT "${OUTPUT_EXEC}"
DEPENDS
Expand All @@ -647,6 +664,7 @@ function (swift_benchmark_compile_archopts)
"-Xlinker" "${SWIFT_LINK_RPATH}"
${bench_library_objects}
${bench_driver_objects}
${ld64_add_ast_path_opts}
${SWIFT_BENCH_OBJFILES}
${objcfile}
"-o" "${OUTPUT_EXEC}"
Expand Down
5 changes: 4 additions & 1 deletion benchmark/scripts/Benchmark_DTrace.in
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
stdout=subprocess.PIPE,
stderr=open("/dev/null", "w"),
env=e,
universal_newlines=True,
)
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
return [
Expand Down Expand Up @@ -136,7 +137,9 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
results.append(result_3)
results.append(single_iter)

return DTraceResult(test_name, int(not foundInstability), results)
return DTraceResult(
test_name, int(not foundInstability), results, self.csv_output
)


SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
Expand Down
67 changes: 39 additions & 28 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class `BenchmarkDoctor` analyzes performance tests, implements `check` COMMAND.
"""

import argparse
import functools
import glob
import logging
import math
Expand Down Expand Up @@ -64,7 +65,9 @@ class BenchmarkDriver(object):
os.environ["SWIFT_DETERMINISTIC_HASHING"] = "1"

def _invoke(self, cmd):
return self._subprocess.check_output(cmd, stderr=self._subprocess.STDOUT)
return self._subprocess.check_output(
cmd, stderr=self._subprocess.STDOUT, universal_newlines=True
)

@property
def test_harness(self):
Expand Down Expand Up @@ -165,7 +168,7 @@ class BenchmarkDriver(object):
)
output = self._invoke(cmd)
results = self.parser.results_from_string(output)
return results.items()[0][1] if test else results
return list(results.items())[0][1] if test else results

def _cmd_run(
self,
Expand Down Expand Up @@ -207,7 +210,7 @@ class BenchmarkDriver(object):
a.merge(b)
return a

return reduce(
return functools.reduce(
merge_results,
[
self.run(test, measure_memory=True, num_iters=1, quantile=20)
Expand Down Expand Up @@ -249,19 +252,21 @@ class BenchmarkDriver(object):
print(format(values))

def result_values(r):
return map(
str,
[
r.test_num,
r.name,
r.num_samples,
r.min,
r.samples.q1,
r.median,
r.samples.q3,
r.max,
r.max_rss,
],
return list(
map(
str,
[
r.test_num,
r.name,
r.num_samples,
r.min,
r.samples.q1,
r.median,
r.samples.q3,
r.max,
r.max_rss,
],
)
)

header = [
Expand Down Expand Up @@ -370,7 +375,12 @@ class MarkdownReportHandler(logging.StreamHandler):
msg = self.format(record)
stream = self.stream
try:
if isinstance(msg, unicode) and getattr(stream, "encoding", None):
# In Python 2 Unicode strings have a special type
unicode_type = unicode
except NameError:
unicode_type = str
try:
if isinstance(msg, unicode_type) and getattr(stream, "encoding", None):
stream.write(msg.encode(stream.encoding))
else:
stream.write(msg)
Expand Down Expand Up @@ -487,16 +497,14 @@ class BenchmarkDoctor(object):
name = measurements["name"]
setup, ratio = BenchmarkDoctor._setup_overhead(measurements)
setup = 0 if ratio < 0.05 else setup
runtime = min(
[
(result.samples.min - correction)
for i_series in [
BenchmarkDoctor._select(measurements, num_iters=i)
for correction in [(setup / i) for i in [1, 2]]
]
for result in i_series
]
)

runtimes = []
for i in range(1, 3):
correction = setup / i
i_series = BenchmarkDoctor._select(measurements, num_iters=i)
for result in i_series:
runtimes.append(result.samples.min - correction)
runtime = min(runtimes)

threshold = 1000
if threshold < runtime:
Expand Down Expand Up @@ -572,7 +580,9 @@ class BenchmarkDoctor(object):

@staticmethod
def _reasonable_setup_time(measurements):
setup = min([result.setup for result in BenchmarkDoctor._select(measurements)])
setup = min(
[result.setup or 0 for result in BenchmarkDoctor._select(measurements)]
)
if 200000 < setup: # 200 ms
BenchmarkDoctor.log_runtime.error(
"'%s' setup took at least %d μs.", measurements["name"], setup
Expand Down Expand Up @@ -857,6 +867,7 @@ def parse_args(args):
help="See COMMAND -h for additional arguments",
metavar="COMMAND",
)
subparsers.required = True

shared_benchmarks_parser = argparse.ArgumentParser(add_help=False)
benchmarks_group = shared_benchmarks_parser.add_mutually_exclusive_group()
Expand Down
3 changes: 2 additions & 1 deletion benchmark/scripts/Benchmark_QuickCheck.in
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ class QuickCheckBenchmarkDriver(perf_test_driver.BenchmarkDriver):
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
error_out = p.communicate()[1].split("\n")
result = p.returncode
Expand All @@ -76,7 +77,7 @@ class QuickCheckBenchmarkDriver(perf_test_driver.BenchmarkDriver):
try:
args = [data, num_iters]
perf_test_driver.run_with_timeout(self.run_test_inner, args)
except Exception, e:
except Exception as e:
sys.stderr.write(
"Child Process Failed! (%s,%s). Error: %s\n"
% (data["path"], data["test_name"], e)
Expand Down
3 changes: 2 additions & 1 deletion benchmark/scripts/Benchmark_RuntimeLeaksRunner.in
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
error_out = p.communicate()[1].split("\n")
result = p.returncode
Expand All @@ -102,7 +103,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
try:
args = [data, num_iters]
result = perf_test_driver.run_with_timeout(self.run_test_inner, args)
except Exception, e:
except Exception as e:
sys.stderr.write(
"Child Process Failed! (%s,%s). Error: %s\n"
% (data["path"], data["test_name"], e)
Expand Down
23 changes: 15 additions & 8 deletions benchmark/scripts/compare_perf_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ class `ReportFormatter` creates the test comparison report in specified format.
from __future__ import print_function

import argparse
import functools
import re
import sys
from bisect import bisect, bisect_left, bisect_right
Expand Down Expand Up @@ -142,7 +143,7 @@ def num_samples(self):
@property
def all_samples(self):
"""List of all samples in ascending order."""
return sorted(self.samples + self.outliers, key=lambda s: s.i)
return sorted(self.samples + self.outliers, key=lambda s: s.i or -1)

@property
def min(self):
Expand Down Expand Up @@ -189,13 +190,16 @@ def sd(self):
return 0 if self.count < 2 else sqrt(self.S_runtime / (self.count - 1))

@staticmethod
def running_mean_variance((k, M_, S_), x):
def running_mean_variance(stats, x):
"""Compute running variance, B. P. Welford's method.
See Knuth TAOCP vol 2, 3rd edition, page 232, or
https://www.johndcook.com/blog/standard_deviation/
M is mean, Standard Deviation is defined as sqrt(S/k-1)
"""

(k, M_, S_) = stats

k = float(k + 1)
M = M_ + (x - M_) / k
S = S_ + (x - M_) * (x - M)
Expand Down Expand Up @@ -247,7 +251,7 @@ def __init__(self, csv_row, quantiles=False, memory=False, delta=False, meta=Fal
runtimes = csv_row[3:mem_index] if memory or meta else csv_row[3:]
if delta:
runtimes = [int(x) if x else 0 for x in runtimes]
runtimes = reduce(
runtimes = functools.reduce(
lambda l, x: l.append(l[-1] + x) or l if l else [x], # runnin
runtimes,
None,
Expand Down Expand Up @@ -315,7 +319,8 @@ def merge(self, r):
"""
# Statistics
if self.samples and r.samples:
map(self.samples.add, r.samples.samples)
for sample in r.samples.samples:
self.samples.add(sample)
sams = self.samples
self.num_samples = sams.num_samples
self.min, self.max, self.median, self.mean, self.sd = (
Expand Down Expand Up @@ -490,7 +495,7 @@ def add_or_merge(names, r):
names[r.name].merge(r)
return names

return reduce(add_or_merge, tests, dict())
return functools.reduce(add_or_merge, tests, dict())

@staticmethod
def results_from_string(log_contents):
Expand Down Expand Up @@ -544,10 +549,12 @@ def __init__(self, old_results, new_results, delta_threshold):
def compare(name):
return ResultComparison(old_results[name], new_results[name])

comparisons = map(compare, comparable_tests)
comparisons = list(map(compare, comparable_tests))

def partition(l, p):
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], []))
return functools.reduce(
lambda x, y: x[not p(y)].append(y) or x, l, ([], [])
)

decreased, not_decreased = partition(
comparisons, lambda c: c.ratio < (1 - delta_threshold)
Expand Down Expand Up @@ -668,7 +675,7 @@ def _column_widths(self):
def max_widths(maximum, widths):
return map(max, zip(maximum, widths))

return reduce(max_widths, widths, [0] * 5)
return list(functools.reduce(max_widths, widths, [0] * 5))

def _formatted_text(
self, label_formatter, COLUMN_SEPARATOR, DELIMITER_ROW, SEPARATOR, SECTION
Expand Down
3 changes: 2 additions & 1 deletion benchmark/scripts/perf_test_driver/perf_test_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ def process_input(self, data):
def run_for_opt_level(self, binary, opt_level, test_filter):
print("testing driver at path: %s" % binary)
names = []
for l in subprocess.check_output([binary, "--list"]).split("\n")[1:]:
output = subprocess.check_output([binary, "--list"], universal_newlines=True)
for l in output.split("\n")[1:]:
m = BENCHMARK_OUTPUT_RE.match(l)
if m is None:
continue
Expand Down
Loading

0 comments on commit 854c775

Please sign in to comment.