Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 126 additions & 0 deletions benchmarks/asv.conf.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,

// The name of the project being benchmarked
"project": "sparr",

// The project's homepage
"project_url": "http://project-homepage.org/",

// The URL or local path of the source code repository for the
// project being benchmarked
"repo": "..",

// List of branches to benchmark. If not provided, defaults to "master"
// (for git) or "tip" (for mercurial).
// "branches": ["master"], // for git
// "branches": ["tip"], // for mercurial

// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
// "dvcs": "git",

// The tool to use to create environments. May be "conda",
// "virtualenv" or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "virtualenv",

// the base URL to show a commit for the project.
// "show_commit_url": "http://github.com/owner/project/commit/",

// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
// "pythons": ["2.7", "3.3"],

// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
// list or empty string indicates to just test against the default
// (latest) version. null indicates that the package is to not be
// installed. If the package to be tested is only available from
// PyPi, and the 'environment_type' is conda, then you can preface
// the package name by 'pip+', and the package will be installed via
// pip (with all the conda available packages installed first,
// followed by the pip installed packages).
//
"matrix": {
"numpy": ["1.10.4"],
"Cython": ["0.23"],
"scipy": ["0.17.0"]
},

// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
//
// An exclude entry excludes entries where all values match. The
// values are regexps that should match the whole string.
//
// An include entry adds an environment. Only the packages listed
// are installed. The 'python' key is required. The exclude rules
// do not apply to includes.
//
// In addition to package names, the following keys are available:
//
// - python
// Python version, as in the *pythons* variable above.
// - environment_type
// Environment type, as above.
// - sys_platform
// Platform, as in sys.platform. Possible values for the common
// cases: 'linux2', 'win32', 'cygwin', 'darwin'.
//
// "exclude": [
// {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
// {"environment_type": "conda", "six": null}, // don't run without six on conda
// ],
//
// "include": [
// // additional env for python2.7
// {"python": "2.7", "numpy": "1.8"},
// // additional env if run on windows+conda
// {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
// ],

// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
// "benchmark_dir": "benchmarks",

// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
// "env_dir": "env",

// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
// "results_dir": "results",

// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
// "html_dir": "html",

// The number of characters to retain in the commit hashes.
// "hash_length": 8,

// `asv` will cache wheels of the recent builds in each
// environment, making them faster to install next time. This is
// number of builds to keep, per environment.
"wheel_cache_size": 10

// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// }
}
1 change: 1 addition & 0 deletions benchmarks/benchmarks/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

100 changes: 100 additions & 0 deletions benchmarks/benchmarks/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.

from sparr import MapArray as M
from scipy.sparse import dok_matrix

try:
# https://github.com/scipy/scipy/pull/6004/
from scipy.sparse import fast_lil_matrix
except:
pass

try:
from pysparse.sparse import spmatrix
except:
pass

try:
xrange
except NameError:
# python 3
xrange = range


def map_poisson2d(n, func_name):
n2 = n*n
if func_name == "map_array":
L = M(shape=(n2, n2))
elif func_name == "dok_matrix":
L = dok_matrix((n2, n2))
elif func_name == "ll_mat":
L = spmatrix.ll_mat(n2, n2)
elif func_name == "fast_lil":
L = fast_lil_matrix((n2, n2))
for i in xrange(n):
for j in xrange(n):
k = i + n*j
L[k,k] = 4
if i > 0:
L[k,k-1] = -1
if i < n-1:
L[k,k+1] = -1
if j > 0:
L[k,k-n] = -1
if j < n-1:
L[k,k+n] = -1
return L


class BenchPoisson2D(object):

params = ([10, 100, 300], ['map_array', 'll_mat', 'fast_lil', 'dok_matrix'])

def time_poisson2d(self, n, func_name):
xxx = map_poisson2d(n, func_name)
time_poisson2d.param_names = ['n', 'class']
time_poisson2d.timeout = 120.0

def peakmem_poisson2d(self, n, func_name):
xxx = map_poisson2d(n, func_name)
peakmem_poisson2d.param_names = ['n', 'class']
peakmem_poisson2d.timeout = 120.0

def mem_poisson2d(self, n, func_name):
xxx = map_poisson2d(n, func_name)
mem_poisson2d.param_names = ['n', 'class']
mem_poisson2d.timeout = 120.0

#class TimeSuite:
# """
# An example benchmark that times the performance of various kinds
# of iterating over dictionaries in Python.
# """
# def setup(self):
# self.d = {}
# for x in range(500):
# self.d[x] = None

# def time_keys(self):
# for key in self.d.keys():
# pass

# def time_iterkeys(self):
# for key in self.d.iterkeys():
# pass

# def time_range(self):
# d = self.d
# for key in range(500):
# x = d[key]

# def time_xrange(self):
# d = self.d
# for key in xrange(500):
# x = d[key]


#class MemSuite:
# def mem_list(self):
# return [0] * 256