Skip to content

Commit a19dfa2

Browse files
committed
Move benchmarks tests into separate folder and cleanup redundancies
1 parent 08bf88c commit a19dfa2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+1844
-1594
lines changed

tests/benchmarks/__init__.py

Whitespace-only changes.

tests/benchmarks/test_blas.py

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
import numpy as np
2+
import pytest
3+
4+
from pytensor import In, function
5+
from pytensor.tensor import dot, empty, matrix, outer, scalar, tensor, vector
6+
from pytensor.tensor.blas_c import CGemv
7+
8+
9+
@pytest.mark.parametrize("dtype", ("float64", "float32", "mixed"))
10+
def test_mat_vec_dot_benchmark_numba(dtype, benchmark):
11+
A = tensor("A", shape=(512, 512), dtype="float64" if dtype == "mixed" else dtype)
12+
x = tensor("x", shape=(512,), dtype="float32" if dtype == "mixed" else dtype)
13+
out = dot(A, x)
14+
15+
fn = function([A, x], out, mode="NUMBA", trust_input=True)
16+
17+
rng = np.random.default_rng(948)
18+
A_test = rng.standard_normal(size=A.type.shape).astype(A.type.dtype)
19+
x_test = rng.standard_normal(size=x.type.shape).astype(x.type.dtype)
20+
np.testing.assert_allclose(fn(A_test, x_test), np.dot(A_test, x_test), atol=1e-4)
21+
benchmark(fn, A_test, x_test)
22+
23+
24+
def _test_ger_benchmark(mode, n, inplace, benchmark):
25+
alpha = scalar("alpha")
26+
x = vector("x")
27+
y = vector("y")
28+
A = matrix("A")
29+
30+
out = alpha * outer(x, y) + A
31+
32+
fn = function(
33+
[alpha, x, y, In(A, mutable=inplace)],
34+
out,
35+
mode=mode,
36+
trust_input=True,
37+
)
38+
39+
rng = np.random.default_rng([2274, n])
40+
alpha_test = rng.normal(size=())
41+
x_test = rng.normal(size=(n,))
42+
y_test = rng.normal(size=(n,))
43+
A_test = rng.normal(size=(n, n))
44+
45+
benchmark(fn, alpha_test, x_test, y_test, A_test)
46+
47+
48+
@pytest.mark.parametrize("inplace", (True, False), ids=["inplace", "no_inplace"])
49+
@pytest.mark.parametrize("n", [2**7, 2**9, 2**13])
50+
def test_ger_benchmark_c(n, inplace, benchmark):
51+
_test_ger_benchmark("CVM", n, inplace, benchmark)
52+
53+
54+
@pytest.mark.parametrize("inplace", (True, False), ids=["inplace", "no_inplace"])
55+
@pytest.mark.parametrize("n", [2**7, 2**9, 2**13])
56+
def test_ger_benchmark_numba(n, inplace, benchmark):
57+
_test_ger_benchmark("NUMBA", n, inplace, benchmark)
58+
59+
60+
def test_cgemv_vector_dot_benchmark(benchmark):
61+
n = 400_000
62+
a = vector("A", shape=(n,))
63+
b = vector("x", shape=(n,))
64+
65+
out = CGemv(inplace=True)(
66+
empty((1,)),
67+
1.0,
68+
a[None],
69+
b,
70+
0.0,
71+
)
72+
fn = function([a, b], out, accept_inplace=True, mode="CVM", trust_input=True)
73+
74+
rng = np.random.default_rng(430)
75+
test_a = rng.normal(size=n)
76+
test_b = rng.normal(size=n)
77+
78+
np.testing.assert_allclose(
79+
fn(test_a, test_b),
80+
np.dot(test_a, test_b),
81+
)
82+
83+
benchmark(fn, test_a, test_b)
84+
85+
86+
@pytest.mark.parametrize(
87+
"neg_stride1", (True, False), ids=["neg_stride1", "pos_stride1"]
88+
)
89+
@pytest.mark.parametrize(
90+
"neg_stride0", (True, False), ids=["neg_stride0", "pos_stride0"]
91+
)
92+
@pytest.mark.parametrize("F_layout", (True, False), ids=["F_layout", "C_layout"])
93+
def test_cgemv_negative_strides_benchmark(
94+
neg_stride0, neg_stride1, F_layout, benchmark
95+
):
96+
A = matrix("A", shape=(512, 512))
97+
x = vector("x", shape=(A.type.shape[-1],))
98+
y = vector("y", shape=(A.type.shape[0],))
99+
100+
out = CGemv(inplace=False)(
101+
y,
102+
1.0,
103+
A,
104+
x,
105+
1.0,
106+
)
107+
fn = function([A, x, y], out, trust_input=True, mode="CVM")
108+
109+
rng = np.random.default_rng(430)
110+
test_A = rng.normal(size=A.type.shape)
111+
test_x = rng.normal(size=x.type.shape)
112+
test_y = rng.normal(size=y.type.shape)
113+
114+
if F_layout:
115+
test_A = test_A.T
116+
if neg_stride0:
117+
test_A = test_A[::-1]
118+
if neg_stride1:
119+
test_A = test_A[:, ::-1]
120+
assert (test_A.strides[0] < 0) == neg_stride0
121+
assert (test_A.strides[1] < 0) == neg_stride1
122+
123+
# Check result is correct by using a copy of A with positive strides
124+
res = fn(test_A, test_x, test_y)
125+
np.testing.assert_allclose(res, fn(test_A.copy(), test_x, test_y))
126+
127+
benchmark(fn, test_A, test_x, test_y)

tests/benchmarks/test_blockwise.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
import numpy as np
2+
import pytest
3+
4+
from pytensor import function
5+
from pytensor.tensor import diagonal, grad, log, tensor
6+
from pytensor.tensor.linalg import cholesky, solve_triangular
7+
8+
9+
def _test_blockwise_cholesky_benchmark(mode, benchmark):
10+
from pytensor.tensor.blockwise import Blockwise
11+
12+
x = tensor(shape=(5, 3, 3))
13+
out = cholesky(x)
14+
assert isinstance(out.owner.op, Blockwise)
15+
16+
fn = function([x], out, mode=mode, trust_input=True)
17+
x_test = np.eye(3) * np.arange(1, 6)[:, None, None]
18+
fn(x_test) # JIT compile
19+
benchmark(fn, x_test)
20+
21+
22+
def test_blockwise_cholesky_benchmark_c(benchmark):
23+
_test_blockwise_cholesky_benchmark("CVM", benchmark)
24+
25+
26+
def test_blockwise_cholesky_benchmark_numba(benchmark):
27+
_test_blockwise_cholesky_benchmark("NUMBA", benchmark)
28+
29+
30+
def _test_batched_mvnormal_logp_and_dlogp(
31+
mode, mu_batch_shape, cov_batch_shape, benchmark
32+
):
33+
rng = np.random.default_rng(sum(map(ord, "batched_mvnormal")))
34+
35+
value_batch_shape = mu_batch_shape
36+
if len(cov_batch_shape) > len(mu_batch_shape):
37+
value_batch_shape = cov_batch_shape
38+
39+
value = tensor("value", shape=(*value_batch_shape, 10))
40+
mu = tensor("mu", shape=(*mu_batch_shape, 10))
41+
cov = tensor("cov", shape=(*cov_batch_shape, 10, 10))
42+
43+
test_values = [
44+
rng.normal(size=value.type.shape),
45+
rng.normal(size=mu.type.shape),
46+
np.eye(cov.type.shape[-1]) * np.abs(rng.normal(size=cov.type.shape)),
47+
]
48+
49+
chol_cov = cholesky(cov, lower=True, on_error="raise")
50+
delta_trans = solve_triangular(chol_cov, value - mu, b_ndim=1)
51+
quaddist = (delta_trans**2).sum(axis=-1)
52+
diag = diagonal(chol_cov, axis1=-2, axis2=-1)
53+
logdet = log(diag).sum(axis=-1)
54+
k = value.shape[-1]
55+
norm = -0.5 * k * (np.log(2 * np.pi))
56+
57+
logp = norm - 0.5 * quaddist - logdet
58+
dlogp = grad(logp.sum(), wrt=[value, mu, cov])
59+
60+
fn = function([value, mu, cov], [logp, *dlogp], mode=mode, trust_input=True)
61+
benchmark(fn, *test_values)
62+
63+
64+
@pytest.mark.parametrize(
65+
"mu_batch_shape", [(), (1000,), (4, 1000)], ids=lambda arg: f"mu:{arg}"
66+
)
67+
@pytest.mark.parametrize(
68+
"cov_batch_shape", [(), (1000,), (4, 1000)], ids=lambda arg: f"cov:{arg}"
69+
)
70+
def test_batched_mvnormal_logp_and_dlogp_c(mu_batch_shape, cov_batch_shape, benchmark):
71+
_test_batched_mvnormal_logp_and_dlogp(
72+
mode="CVM",
73+
mu_batch_shape=mu_batch_shape,
74+
cov_batch_shape=cov_batch_shape,
75+
benchmark=benchmark,
76+
)
77+
78+
79+
@pytest.mark.parametrize(
80+
"mu_batch_shape", [(), (1000,), (4, 1000)], ids=lambda arg: f"mu:{arg}"
81+
)
82+
@pytest.mark.parametrize(
83+
"cov_batch_shape", [(), (1000,), (4, 1000)], ids=lambda arg: f"cov:{arg}"
84+
)
85+
def test_batched_mvnormal_logp_and_dlogp_numba(
86+
mu_batch_shape, cov_batch_shape, benchmark
87+
):
88+
_test_batched_mvnormal_logp_and_dlogp(
89+
mode="NUMBA",
90+
mu_batch_shape=mu_batch_shape,
91+
cov_batch_shape=cov_batch_shape,
92+
benchmark=benchmark,
93+
)

tests/benchmarks/test_careduce.py

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import numpy as np
2+
import pytest
3+
4+
from pytensor import function, shared
5+
6+
7+
def _test_careduce_benchmark(axis, c_contiguous, mode, benchmark):
8+
N = 256
9+
x_test = np.random.uniform(size=(N, N, N))
10+
transpose_axis = (0, 1, 2) if c_contiguous else (2, 0, 1)
11+
12+
x = shared(x_test, name="x", shape=x_test.shape)
13+
out = x.transpose(transpose_axis).sum(axis=axis)
14+
fn = function([], out, mode=mode)
15+
16+
np.testing.assert_allclose(
17+
fn(),
18+
x_test.transpose(transpose_axis).sum(axis=axis),
19+
)
20+
benchmark(fn)
21+
22+
23+
@pytest.mark.parametrize(
24+
"axis",
25+
(0, 1, 2, (0, 1), (0, 2), (1, 2), None),
26+
ids=lambda x: f"axis={x}",
27+
)
28+
@pytest.mark.parametrize(
29+
"c_contiguous",
30+
(True, False),
31+
ids=lambda x: f"c_contiguous={x}",
32+
)
33+
def test_careduce_benchmark_c(axis, c_contiguous, benchmark):
34+
_test_careduce_benchmark(
35+
axis=axis, c_contiguous=c_contiguous, mode="CVM", benchmark=benchmark
36+
)
37+
38+
39+
@pytest.mark.parametrize(
40+
"axis",
41+
(0, 1, 2, (0, 1), (0, 2), (1, 2), None),
42+
ids=lambda x: f"axis={x}",
43+
)
44+
@pytest.mark.parametrize(
45+
"c_contiguous",
46+
(True, False),
47+
ids=lambda x: f"c_contiguous={x}",
48+
)
49+
def test_careduce_benchmark_numba(axis, c_contiguous, benchmark):
50+
_test_careduce_benchmark(
51+
axis=axis, c_contiguous=c_contiguous, mode="NUMBA", benchmark=benchmark
52+
)

0 commit comments

Comments
 (0)