Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line numberDiff line numberDiff line change
Expand Up@@ -10,7 +10,7 @@
# Created by setup.py sdist
build/
dist/
pyperformance.egg-info/
*.egg-info/

# Created by the pyperformance script
venv/
Expand Down
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line numberDiff line numberDiff line change
Expand Up@@ -15,5 +15,6 @@ include pyperformance/data-files/benchmarks/MANIFEST
include pyperformance/data-files/benchmarks/bm_*/*.toml
include pyperformance/data-files/benchmarks/bm_*/*.py
include pyperformance/data-files/benchmarks/bm_*/requirements.txt
include pyperformance/data-files/benchmarks/bm_*/*.c
recursive-include pyperformance/data-files/benchmarks/bm_*/data *
recursive-exclude pyperformance/tests *
10 changes: 10 additions & 0 deletions doc/benchmarks.rst
Original file line numberDiff line numberDiff line change
Expand Up@@ -130,6 +130,16 @@ deepcopy
Benchmark the Python `copy.deepcopy` method. The `deepcopy` method is
performed on a nested dictionary and a dataclass.

ctypes
------

Benchmark to measure the function call overhead of calling C functions using ctypes.

The ``ctypes`` benchmark lets ``ctypes`` infer the argument types from the passed in
values. The ``ctypes_argtypes`` benchmark `explicitly specifies the argument types
<https://docs.python.org/3.10/library/ctypes.html?highlight=ctypes#specifying-the-required-argument-types-function-prototypes>`_,
which is slower than inferred argument types.

deltablue
---------

Expand Down
24 changes: 14 additions & 10 deletions doc/custom_benchmarks.rst
Original file line numberDiff line numberDiff line change
Expand Up@@ -324,16 +324,17 @@ All other PEP 621 fields are optional (e.g. ``requires-python = ">=3.8"``,
The ``[tool.pyperformance]`` Section
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

=============== ===== === === ===
field type R B F
=============== ===== === === ===
tool.name str X X
tool.tags [str] X
tool.extra_opts [str] X
tool.inherits file
tool.runscript file X
tool.datadir file X
=============== ===== === === ===
================== ===== === === ===
field type R B F
================== ===== === === ===
tool.name str X X
tool.tags [str] X
tool.extra_opts [str] X
tool.inherits file
tool.runscript file X
tool.datadir file X
tool.install_setup bool
================== ===== === === ===

"R": required
"B": inferred from the inherited metadata
Expand All@@ -342,3 +343,6 @@ tool.datadir file X
* tags: optional list of names to group benchmarks
* extra_opts: optional list of args to pass to ``tool.runscript``
* runscript: the benchmark script to use instead of run_benchmark.py.
* install_setup: when ``true``, run ``pip install -e .`` in the
benchmark directory to install it in the virtual environment. This has the
effect of running a ``setup.py`` file, if present.
7 changes: 7 additions & 0 deletions pyperformance/_benchmark.py
Original file line numberDiff line numberDiff line change
Expand Up@@ -164,6 +164,13 @@ def runscript(self):
def extra_opts(self):
return self._get_metadata_value('extra_opts', ())

@property
def setup_py(self):
if not self._get_metadata_value('install_setup', False):
return None
filename = os.path.join(os.path.dirname(self.metafile), 'setup.py')
return filename if os.path.exists(filename) else None

# Other metadata keys:
# * base
# * python
Expand Down
4 changes: 4 additions & 0 deletions pyperformance/_benchmark_metadata.py
Original file line numberDiff line numberDiff line change
Expand Up@@ -32,6 +32,7 @@
'datadir': None,
'runscript': None,
'extra_opts': None,
'install_setup': None,
}


Expand DownExpand Up@@ -228,6 +229,9 @@ def _resolve_value(field, value, rootdir):
for opt in value:
if not opt or not isinstance(opt, str):
raise TypeError(f'extra_opts should be a list of strings, got{value!r}')
elif field == 'install_setup':
if not isinstance(value, bool):
raise TypeError(f'install_setup should be a bool, got{value!r}')
else:
raise NotImplementedError(field)
return value
Expand Down
14 changes: 10 additions & 4 deletions pyperformance/_pip.py
Original file line numberDiff line numberDiff line change
Expand Up@@ -149,10 +149,16 @@ def install_requirements(reqs, *extra,
args = []
if upgrade:
args.append('-U') # --upgrade
for reqs in [reqs, *extra]:
if os.path.isfile(reqs) and reqs.endswith('.txt'):
args.append('-r') # --requirement
args.append(reqs)
for req in [reqs, *extra]:
if os.path.isfile(req):
name = os.path.basename(req)
if name == "setup.py":
req = os.path.dirname(req)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The reason we use the dirname isn't obvious, so it may be worth adding a comment here indicating pip's limitations.

elif name == "requirements.txt":
args.append('-r') # --requirement
else:
raise ValueError(f"pip doesn't know how to install{req}")
args.append(req)
return run_pip('install', *args, **kwargs)


Expand Down
3 changes: 3 additions & 0 deletions pyperformance/data-files/benchmarks/MANIFEST
Original file line numberDiff line numberDiff line change
Expand Up@@ -12,6 +12,8 @@ generators <local>
chameleon <local>
chaos <local>
crypto_pyaes <local>
ctypes <local>
ctypes_argtypes <local:ctypes>
deepcopy <local>
deltablue <local>
django_template <local>
Expand DownExpand Up@@ -71,6 +73,7 @@ xml_etree <local>
#apps
#math
#template
#extension


[group default]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
[project]
name = "pyperformance_bm_ctypes_argtypes"
requires-python = ">=3.7"
dependencies = ["pyperf"]
urls ={repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "ctypes_argtypes"
tags = "extension"
extra_opts = ["--argtypes"]
54 changes: 54 additions & 0 deletions pyperformance/data-files/benchmarks/bm_ctypes/cmodule.c
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
#include<Python.h>

#if defined(_WIN32) || defined(__CYGWIN__)
#defineEXPORTED_SYMBOL __declspec(dllexport)
#else
#defineEXPORTED_SYMBOL
#endif


EXPORTED_SYMBOL
voidvoid_foo_void(void){

}

EXPORTED_SYMBOL
intint_foo_int(inta){
returna+1;
}

EXPORTED_SYMBOL
voidvoid_foo_int(inta){

}

EXPORTED_SYMBOL
voidvoid_foo_int_int(inta, intb){

}

EXPORTED_SYMBOL
voidvoid_foo_int_int_int(inta, intb, intc){

}

EXPORTED_SYMBOL
voidvoid_foo_int_int_int_int(inta, intb, intc, intd){

}

EXPORTED_SYMBOL
voidvoid_foo_constchar(constchar*str){

}

PyMODINIT_FUNC
PyInit_cmodule(void){
// DELIBERATELY EMPTY

// This isn't actually a Python extension module (it's used via ctypes), so
// this entry point function will never be called. However, we are utilizing
// setuptools to build it, and on Windows, setuptools explicitly passes the
// flag /EXPORT:PyInit_cmodule, so it must be defined.
returnNULL;
}
11 changes: 11 additions & 0 deletions pyperformance/data-files/benchmarks/bm_ctypes/pyproject.toml
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
[project]
name = "pyperformance_bm_ctypes"
requires-python = ">=3.7"
dependencies = ["pyperf", "setuptools"]
urls ={repository = "https://github.com/python/pyperformance"}
dynamic = ["version"]

[tool.pyperformance]
name = "ctypes"
tags = "extension"
install_setup = true
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
setuptools==62.4.0
95 changes: 95 additions & 0 deletions pyperformance/data-files/benchmarks/bm_ctypes/run_benchmark.py
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
"""
Test the function call overhead of ctypes.
"""
import pyperf


import ctypes
import importlib.util


spec = importlib.util.find_spec("bm_ctypes.cmodule")
if spec is None:
raise ImportError("Can't find bm_ctypes.cmodule shared object file")
ext = ctypes.cdll.LoadLibrary(spec.origin)


def benchmark_argtypes(loops):
void_foo_void = ext.void_foo_void
void_foo_void.argtypes = []
void_foo_void.restype = None

int_foo_int = ext.void_foo_int
int_foo_int.argtypes = [ctypes.c_int]
int_foo_int.restype = ctypes.c_int

void_foo_int = ext.void_foo_int
void_foo_int.argtypes = [ctypes.c_int]
void_foo_int.restype = None

void_foo_int_int = ext.void_foo_int_int
void_foo_int_int.argtypes = [ctypes.c_int, ctypes.c_int]
void_foo_int_int.restype = None

void_foo_int_int_int = ext.void_foo_int_int_int
void_foo_int_int_int.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
void_foo_int_int_int.restype = None

void_foo_int_int_int_int = ext.void_foo_int_int_int_int
void_foo_int_int_int_int.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
void_foo_int_int_int_int.restype = None

void_foo_constchar = ext.void_foo_constchar
void_foo_constchar.argtypes = [ctypes.c_char_p]
void_foo_constchar.restype = None

return benchmark(loops)


def benchmark(loops):
void_foo_void = ext.void_foo_void
int_foo_int = ext.int_foo_int
void_foo_int = ext.void_foo_int
void_foo_int_int = ext.void_foo_int_int
void_foo_int_int_int = ext.void_foo_int_int_int
void_foo_int_int_int_int = ext.void_foo_int_int_int_int
void_foo_constchar = ext.void_foo_constchar

range_it = range(loops)

# Test calling the functions using the implied arguments mechanism
t0 = pyperf.perf_counter()

for _ in range_it:
void_foo_void()
int_foo_int(1)
void_foo_int(1)
void_foo_int_int(1, 2)
void_foo_int_int_int(1, 2, 3)
void_foo_int_int_int_int(1, 2, 3, 4)
void_foo_constchar(b"bytes")
Comment on lines +69 to +75

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The real benefit of micro-benchmarks is that it narrows down where performance regressions might be. With that in mind, would these different signatures have enough independent potential for regression that it would it make sense to have a separate benchmark for each? Would it be worth bothering even if they did?


return pyperf.perf_counter() - t0


def add_cmdline_args(cmd, args):
if args.argtypes:
cmd.append("--argtypes")


if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata["description"] = "ctypes function call overhead benchmark"

runner.argparser.add_argument("--argtypes", action="store_true")
options = runner.parse_args()

if options.argtypes:
runner.bench_time_func("ctypes_argtypes", benchmark_argtypes)
else:
runner.bench_time_func("ctypes", benchmark)
11 changes: 11 additions & 0 deletions pyperformance/data-files/benchmarks/bm_ctypes/setup.py
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
fromsetuptoolsimportsetup, Extension

# Compile the C shared object containing functions to call through ctypes. It
# isn't technically a Python C extension, but this is the easiest way to build
# it in a cross-platform way.

setup(
name="pyperformance_bm_ctypes",
ext_modules=[Extension("bm_ctypes.cmodule", sources=["cmodule.c"])],
package_dir={"bm_ctypes": "src"},
)
Empty file.
4 changes: 4 additions & 0 deletions pyperformance/venv.py
Original file line numberDiff line numberDiff line change
Expand Up@@ -24,6 +24,10 @@ def from_benchmarks(cls, benchmarks):
for bench in benchmarks or ():
filename = bench.requirements_lockfile
self._add_from_file(filename)
if bench.setup_py:
# pip doesn't support installing a setup.py,
# but it does support installing from the directory it is in.
Comment on lines +28 to +29

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This comment is what I was thinking of above. Consider moving it there.

self._add(bench.setup_py)
return self

def __init__(self):
Expand Down