Skip to content

Commit

Permalink
move data reshaping out of batched_call
Browse files Browse the repository at this point in the history
this enhances the consistency of data shapes across sequential calls to samplers

also improve error message for unsupported numpy operations on cuda tensors
  • Loading branch information
LarsKue committed Jun 13, 2024
1 parent b408352 commit c5b0f98
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 18 deletions.
17 changes: 16 additions & 1 deletion bayesflow/experimental/simulators/sequential_simulator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@

import keras

from typing import Sequence

from bayesflow.experimental.types import Sampler, Shape, Tensor
Expand Down Expand Up @@ -35,7 +37,20 @@ def __init__(self, samplers: Sequence[Sampler]):

def sample(self, shape: Shape) -> dict[str, Tensor]:
data = {}

for sampler in self.samplers:
data |= batched_call(sampler, shape[0], **data)
try:
data |= batched_call(sampler, shape[0], **data)
except TypeError as e:
if keras.backend.backend() == "torch" and "device" in str(e):
raise RuntimeError(f"Encountered an unexpected device error when sampling. "
f"This can happen when you use numpy in conjunction with automatic "
f"vectorization for samplers with arguments. Note that the arguments passed "
f"to the samplers are always tensors, which may live on the GPU. "
f"Performing numpy operations on these is prohibited.") from e

for key, value in data.items():
if keras.ops.ndim(value) == 1:
data[key] = keras.ops.expand_dims(value, -1)

return data
7 changes: 0 additions & 7 deletions bayesflow/experimental/utils/dictutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,20 +50,13 @@ def batched_call(f, batch_size, *args, **kwargs):
try:
data = f((batch_size,), *args, **kwargs)
data = {key: keras.ops.convert_to_tensor(value) for key, value in data.items()}
for key, value in data.items():
if keras.ops.ndim(value) == 1:
data[key] = keras.ops.expand_dims(value, -1)

return data
except TypeError:
pass

def vectorized(elements):
data = f(*elements[1:])
data = {key: keras.ops.convert_to_tensor(value) for key, value in data.items()}
for key, value in data.items():
if keras.ops.ndim(value) == 0:
data[key] = keras.ops.expand_dims(value, 0)
return data

args = convert_args(f, *args, **kwargs)
Expand Down
44 changes: 34 additions & 10 deletions tests/test_two_moons/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import math

import keras
import numpy as np
import pytest


Expand All @@ -11,20 +12,43 @@ def batch_size():

@pytest.fixture()
def simulator():
class Simulator:
def sample(self, batch_shape):
r = keras.random.normal(shape=batch_shape + (1,), mean=0.1, stddev=0.01)
alpha = keras.random.uniform(shape=batch_shape + (1,), minval=-0.5 * math.pi, maxval=0.5 * math.pi)
theta = keras.random.uniform(shape=batch_shape + (2,), minval=-1.0, maxval=1.0)
from bayesflow.experimental.simulators import SequentialSimulator

x1 = -keras.ops.abs(theta[..., :1] + theta[..., 1:]) / keras.ops.sqrt(2.0) + r * keras.ops.cos(alpha) + 0.25
x2 = (-theta[..., :1] + theta[..., 1:]) / keras.ops.sqrt(2.0) + r * keras.ops.sin(alpha)
def contexts():
r = np.random.normal(0.1, 0.01)
alpha = np.random.uniform(-0.5 * np.pi, 0.5 * np.pi)

x = keras.ops.concatenate([x1, x2], axis=-1)
return dict(r=r, alpha=alpha)

return dict(r=r, alpha=alpha, theta=theta, x=x)
def parameters():
theta = np.random.uniform(-1.0, 1.0, size=2)

return Simulator()
return dict(theta=theta)

def observables(r, alpha, theta):
x1 = -keras.ops.abs(theta[0] + theta[1]) / np.sqrt(2.0) + r * keras.ops.cos(alpha) + 0.25
x2 = (-theta[0] + theta[1]) / np.sqrt(2.0) + r * keras.ops.sin(alpha)

return dict(x=keras.ops.stack([x1, x2]))

simulator = SequentialSimulator([contexts, parameters, observables])

return simulator

# class Simulator:
# def sample(self, batch_shape):
# r = keras.random.normal(shape=batch_shape + (1,), mean=0.1, stddev=0.01)
# alpha = keras.random.uniform(shape=batch_shape + (1,), minval=-0.5 * math.pi, maxval=0.5 * math.pi)
# theta = keras.random.uniform(shape=batch_shape + (2,), minval=-1.0, maxval=1.0)
#
# x1 = -keras.ops.abs(theta[..., :1] + theta[..., 1:]) / keras.ops.sqrt(2.0) + r * keras.ops.cos(alpha) + 0.25
# x2 = (-theta[..., :1] + theta[..., 1:]) / keras.ops.sqrt(2.0) + r * keras.ops.sin(alpha)
#
# x = keras.ops.concatenate([x1, x2], axis=-1)
#
# return dict(r=r, alpha=alpha, theta=theta, x=x)
#
# return Simulator()


@pytest.fixture()
Expand Down

0 comments on commit c5b0f98

Please sign in to comment.