Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ml processors for ann #90

Merged
merged 6 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/dspeed/processing_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -1255,7 +1255,11 @@ def _loadlh5(path_to_file, path_in_file: str) -> np.array: # noqa: N805
"""

try:
loaded_data = sto.read(path_in_file, path_to_file)[0].nda
loaded_data = sto.read(path_in_file, path_to_file)[0]
if isinstance(loaded_data, lgdo.types.Scalar):
loaded_data = loaded_data.value
else:
loaded_data = loaded_data.nda
except ValueError:
raise ProcessingChainError(f"LH5 file not found: {path_to_file}")

Expand Down
12 changes: 12 additions & 0 deletions src/dspeed/processors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,13 @@
from .linear_slope_fit import linear_slope_diff, linear_slope_fit
from .log_check import log_check
from .min_max import min_max, min_max_norm
from .ml import (
classification_layer_no_bias,
classification_layer_with_bias,
dense_layer_no_bias,
dense_layer_with_bias,
normalisation_layer,
)
from .moving_windows import (
avg_current,
moving_window_left,
Expand Down Expand Up @@ -179,4 +186,9 @@
"convolve_exp",
"convolve_damped_oscillator",
"inject_damped_oscillation",
"dense_layer_no_bias",
"dense_layer_with_bias",
"classification_layer_no_bias",
"classification_layer_with_bias",
"normalisation_layer",
]
355 changes: 355 additions & 0 deletions src/dspeed/processors/ml.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,355 @@
from __future__ import annotations

import numpy as np
from numba import guvectorize

from ..utils import numba_defaults_kwargs as nb_kwargs


@guvectorize(
["void(float32[:], float32[:])", "void(float64[:], float64[:])"],
"(n)->(n)",
**nb_kwargs,
)
def relu(x_in: np.ndarray, x_out: np.ndarray) -> None:
"""
relu activation function 0 if x_in < 0 else x_in
"""
x_out[:] = x_in * (x_in > 0)


@guvectorize(
["void(float32[:], float32[:])", "void(float64[:], float64[:])"],
"(n)->(n)",
**nb_kwargs,
)
def leaky_relu(x_in: np.ndarray, x_out: np.ndarray) -> None:
"""
leaky relu activation function 0 if x_in < 0 else 0.01 x_in
"""
x_out[:] = x_in * (x_in > 0) + 0.01 * x_in * (x_in < 0)


@guvectorize(
["void(float32[:], float32[:])", "void(float64[:], float64[:])"],
"(n)->(n)",
**nb_kwargs,
)
def sigmoid(x_in: np.ndarray, x_out: np.ndarray) -> None:
"""
sigmoid activation function
"""
x_out[:] = 1 / (1 + np.exp(-x_in))


@guvectorize(
["void(float32[:], float32[:])", "void(float64[:], float64[:])"],
"(n)->(n)",
**nb_kwargs,
)
def softmax(x_in: np.ndarray, x_out: np.ndarray) -> None:
"""
softmax activation function
"""
x_out[:] = np.log(1 + np.exp(x_in))


@guvectorize(
[
"void(float32[::1], float32[:,::1], char, float32[:])",
"void(float64[::1], float64[:,::1], char, float64[:])",
],
"(n),(n,m),()->(m)",
**nb_kwargs,
)
def dense_layer_no_bias(
x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: np.ndarray
) -> None:
"""
Basic dense neural network layer with no bias, f(x.W)

Parameters
----------
w_in
the input waveform shape n.
kernel
the matrix of weights shape (n x m).
activation_func
the activation function to use specify with char:
s - sigmoid
r - relu
l - leaky relu
m - softmax
t - tanh
x_out
the output vector shape m.

JSON Configuration Example
--------------------------

.. code-block :: json

"layer_1": {
"function": "dense_layer_no_bias",
"module": "dspeed.processors",
"args": ["wf_blsub", "db.kernel", "'s'", "layer_1"],
"unit": "ADC"
}
"""

x_out[:] = np.nan

if np.isnan(x_in).any():
return

temp = np.dot(x_in, kernel)

if activation_func == ord("s"):
sigmoid(temp, x_out)
elif activation_func == ord("r"):
relu(temp, x_out)
elif activation_func == ord("l"):
leaky_relu(temp, x_out)
elif activation_func == ord("m"):
softmax(temp, x_out)
elif activation_func == ord("t"):
x_out[:] = np.tanh(temp)


@guvectorize(
[
"void(float32[::1], float32[:,::1], float32[:], char, float32[:])",
"void(float64[::1], float64[:,::1], float64[:], char, float64[:])",
],
"(n),(n,m),(m),()->(m)",
**nb_kwargs,
)
def dense_layer_with_bias(
x_in: np.ndarray,
kernel: np.ndarray,
bias: np.ndarray,
activation_func: np.int8,
x_out: np.ndarray,
) -> None:
"""
Basic dense neural network layer with bias added f(x.W+b)

Parameters
----------
w_in
the input waveform shape n.
kernel
the matrix of weights shape (n x m).
bias
the bias with shape m.
activation_func
the activation function to use specify with char:
s - sigmoid
r - relu
l - leaky relu
m - softmax
t - tanh
x_out
the output vector shape m.

JSON Configuration Example
--------------------------

.. code-block :: json

"layer_1": {
"function": "dense_layer_with_bias",
"module": "dspeed.processors",
"args": ["wf_blsub", "db.kernel", "db.bias", "'s'", "layer_1"],
"unit": "ADC"
}
"""

x_out[:] = np.nan

if np.isnan(x_in).any():
return

temp = np.dot(x_in, kernel) + bias

if activation_func == ord("s"):
sigmoid(temp, x_out)
elif activation_func == ord("r"):
relu(temp, x_out)
elif activation_func == ord("l"):
leaky_relu(temp, x_out)
elif activation_func == ord("m"):
softmax(temp, x_out)
elif activation_func == ord("t"):
x_out[:] = np.tanh(temp)


@guvectorize(
[
"void(float32[::1], float32[::1], char, float32[:])",
"void(float64[::1], float64[::1], char, float64[:])",
],
"(n),(n),()->()",
**nb_kwargs,
)
def classification_layer_no_bias(
x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: float
) -> None:
"""
This is the same as dense_layer_no_bias but the final output is a single number f(x.W)

Parameters
----------
w_in
the input waveform shape n.
kernel
the matrix of weights shape (n x 1).
activation_func
the activation function to use specify with char:
s - sigmoid
r - relu
l - leaky relu
m - softmax
t - tanh
x_out
the output value.

JSON Configuration Example
--------------------------

.. code-block :: json

"classifier": {
"function": "dense_layer_with_bias",
"module": "dspeed.processors",
"args": ["wf_blsub", "db.kernel", "'s'", "classifier"],
"unit": "ADC"
}
"""
x_out[0] = np.nan

if np.isnan(x_in).any():
return

temp = np.zeros(1, dtype=x_out.dtype)
temp[0] = np.dot(x_in, kernel)

if activation_func == ord("s"):
sigmoid(temp, x_out)
elif activation_func == ord("r"):
relu(temp, x_out)
elif activation_func == ord("l"):
leaky_relu(temp, x_out)
elif activation_func == ord("m"):
softmax(temp, x_out)
elif activation_func == ord("t"):
x_out[0] = np.tanh(temp[0])


@guvectorize(
[
"void(float32[::1], float32[::1], float32, char, float32[:])",
"void(float64[::1], float64[::1], float64, char, float64[:])",
],
"(n),(n),(),()->()",
**nb_kwargs,
)
def classification_layer_with_bias(
x_in: np.ndarray,
kernel: np.ndarray,
bias: np.ndarray,
activation_func: np.int8,
x_out: float,
) -> None:
"""
this is the same as dense_layer_with_bias but the final output is a single number f(x.W+bs)

Parameters
----------
w_in
the input waveform shape n.
kernel
the matrix of weights shape (n x 1).
bias
the bias in this case a single value.
activation_func
the activation function to use specify with char:
s - sigmoid
r - relu
l - leaky relu
m - softmax
t - tanh
x_out
the output value.

JSON Configuration Example
--------------------------

.. code-block :: json

"classifier": {
"function": "dense_layer_with_bias",
"module": "dspeed.processors",
"args": ["wf_blsub", "db.kernel", "db.bias","'s'", "classifier"],
"unit": "ADC"
}
"""
x_out[0] = np.nan

if np.isnan(x_in).any():
return

temp = np.zeros_like(x_out)
temp[0] = np.dot(x_in, kernel) + bias

if activation_func == ord("s"):
sigmoid(temp, x_out)
elif activation_func == ord("r"):
relu(temp, x_out)
elif activation_func == ord("l"):
leaky_relu(temp, x_out)
elif activation_func == ord("m"):
softmax(temp, x_out)
elif activation_func == ord("t"):
x_out[0] = np.tanh(temp[0])


@guvectorize(
[
"void(float32[:], float32[:], float32[:], float32[:])",
"void(float64[:], float64[:], float64[:], float64[:])",
],
"(n),(n),(n)->(n)",
**nb_kwargs,
)
def normalisation_layer(
x_in: np.ndarray, means: np.ndarray, variances: np.ndarray, x_out: np.ndarray
) -> None:
"""
Normalisation layer, (x_in - mu)/np.sqrt(variance)
Note this is variance not standard deviation

Parameters
----------
w_in
the input waveform shape n.
means
array of means for each input value shape n.
variances
array of variances for each input value shape n.
x_out
the output vector shape n.

JSON Configuration Example
--------------------------

.. code-block :: json

"wf_normed": {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The doc generation is failing with " /opt/hostedtoolcache/Python/3.10.15/x64/lib/python3.10/site-packages/dspeed/processors/ml.py:docstring of dspeed.processors.ml.normalisation_layer:21: WARNING: Definition list ends without a blank line; unexpected unindent. [docutils]". I think this code block needs an extra indent

"function": "normalisation_layer",
"module": "dspeed.processors",
"args": ["wf_blsub", "db.mean", "db.variance", "wf_normed"],
"unit": "ADC"
}
"""
x_out[:] = (x_in - means) / np.sqrt(variances)
Loading