diff --git a/src/dspeed/processing_chain.py b/src/dspeed/processing_chain.py index b630dde..1995d6e 100644 --- a/src/dspeed/processing_chain.py +++ b/src/dspeed/processing_chain.py @@ -1255,7 +1255,11 @@ def _loadlh5(path_to_file, path_in_file: str) -> np.array: # noqa: N805 """ try: - loaded_data = sto.read(path_in_file, path_to_file)[0].nda + loaded_data = sto.read(path_in_file, path_to_file)[0] + if isinstance(loaded_data, lgdo.types.Scalar): + loaded_data = loaded_data.value + else: + loaded_data = loaded_data.nda except ValueError: raise ProcessingChainError(f"LH5 file not found: {path_to_file}") diff --git a/src/dspeed/processors/__init__.py b/src/dspeed/processors/__init__.py index b636513..98f1203 100644 --- a/src/dspeed/processors/__init__.py +++ b/src/dspeed/processors/__init__.py @@ -78,6 +78,13 @@ from .linear_slope_fit import linear_slope_diff, linear_slope_fit from .log_check import log_check from .min_max import min_max, min_max_norm +from .ml import ( + classification_layer_no_bias, + classification_layer_with_bias, + dense_layer_no_bias, + dense_layer_with_bias, + normalisation_layer, +) from .moving_windows import ( avg_current, moving_window_left, @@ -179,4 +186,9 @@ "convolve_exp", "convolve_damped_oscillator", "inject_damped_oscillation", + "dense_layer_no_bias", + "dense_layer_with_bias", + "classification_layer_no_bias", + "classification_layer_with_bias", + "normalisation_layer", ] diff --git a/src/dspeed/processors/ml.py b/src/dspeed/processors/ml.py new file mode 100644 index 0000000..e0a2a2e --- /dev/null +++ b/src/dspeed/processors/ml.py @@ -0,0 +1,384 @@ +""" +Module containing ml processors, the dsp config can be used to combine these into a neural +network a simple example would be: + +.. code-block :: json + + "layer_1": { + "function": "normalisation_layer", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.mean", "db.variance", "layer_1"], + "unit": "ADC" + } + + "layer_2": { + "function": "dense_layer_with_bias", + "module": "dspeed.processors", + "args": ["layer_1", "db.kernel", "db.bias", "'r'", "layer_2"], + "unit": "ADC" + } + + "classifier": { + "function": "dense_layer_with_bias", + "module": "dspeed.processors", + "args": ["layer_2", "db.kernel", "db.bias","'s'", "classifier"], + "unit": "ADC" + } + +""" + +from __future__ import annotations + +import numpy as np +from numba import guvectorize + +from ..utils import numba_defaults_kwargs as nb_kwargs + + +@guvectorize( + ["void(float32[:], float32[:])", "void(float64[:], float64[:])"], + "(n)->(n)", + **nb_kwargs, +) +def relu(x_in: np.ndarray, x_out: np.ndarray) -> None: + """ + relu activation function 0 if x_in < 0 else x_in + """ + x_out[:] = x_in * (x_in > 0) + + +@guvectorize( + ["void(float32[:], float32[:])", "void(float64[:], float64[:])"], + "(n)->(n)", + **nb_kwargs, +) +def leaky_relu(x_in: np.ndarray, x_out: np.ndarray) -> None: + """ + leaky relu activation function 0 if x_in < 0 else 0.01 x_in + """ + x_out[:] = x_in * (x_in > 0) + 0.01 * x_in * (x_in < 0) + + +@guvectorize( + ["void(float32[:], float32[:])", "void(float64[:], float64[:])"], + "(n)->(n)", + **nb_kwargs, +) +def sigmoid(x_in: np.ndarray, x_out: np.ndarray) -> None: + """ + sigmoid activation function + """ + x_out[:] = 1 / (1 + np.exp(-x_in)) + + +@guvectorize( + ["void(float32[:], float32[:])", "void(float64[:], float64[:])"], + "(n)->(n)", + **nb_kwargs, +) +def softmax(x_in: np.ndarray, x_out: np.ndarray) -> None: + """ + softmax activation function + """ + x_out[:] = np.log(1 + np.exp(x_in)) + + +@guvectorize( + [ + "void(float32[::1], float32[:,::1], char, float32[:])", + "void(float64[::1], float64[:,::1], char, float64[:])", + ], + "(n),(n,m),()->(m)", + **nb_kwargs, +) +def dense_layer_no_bias( + x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: np.ndarray +) -> None: + """ + Basic dense neural network layer with no bias, f(x.W) + + Parameters + ---------- + w_in + the input waveform shape n. + kernel + the matrix of weights shape (n x m). + activation_func + the activation function to use specify with char: + s - sigmoid + r - relu + l - leaky relu + m - softmax + t - tanh + x_out + the output vector shape m. + + JSON Configuration Example + -------------------------- + + .. code-block :: json + + "layer_1": { + "function": "dense_layer_no_bias", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.kernel", "'s'", "layer_1"], + "unit": "ADC" + } + """ + + x_out[:] = np.nan + + if np.isnan(x_in).any(): + return + + temp = np.dot(x_in, kernel) + + if activation_func == ord("s"): + sigmoid(temp, x_out) + elif activation_func == ord("r"): + relu(temp, x_out) + elif activation_func == ord("l"): + leaky_relu(temp, x_out) + elif activation_func == ord("m"): + softmax(temp, x_out) + elif activation_func == ord("t"): + x_out[:] = np.tanh(temp) + + +@guvectorize( + [ + "void(float32[::1], float32[:,::1], float32[:], char, float32[:])", + "void(float64[::1], float64[:,::1], float64[:], char, float64[:])", + ], + "(n),(n,m),(m),()->(m)", + **nb_kwargs, +) +def dense_layer_with_bias( + x_in: np.ndarray, + kernel: np.ndarray, + bias: np.ndarray, + activation_func: np.int8, + x_out: np.ndarray, +) -> None: + """ + Basic dense neural network layer with bias added f(x.W+b) + + Parameters + ---------- + w_in + the input waveform shape n. + kernel + the matrix of weights shape (n x m). + bias + the bias with shape m. + activation_func + the activation function to use specify with char: + s - sigmoid + r - relu + l - leaky relu + m - softmax + t - tanh + x_out + the output vector shape m. + + JSON Configuration Example + -------------------------- + + .. code-block :: json + + "layer_1": { + "function": "dense_layer_with_bias", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.kernel", "db.bias", "'s'", "layer_1"], + "unit": "ADC" + } + """ + + x_out[:] = np.nan + + if np.isnan(x_in).any(): + return + + temp = np.dot(x_in, kernel) + bias + + if activation_func == ord("s"): + sigmoid(temp, x_out) + elif activation_func == ord("r"): + relu(temp, x_out) + elif activation_func == ord("l"): + leaky_relu(temp, x_out) + elif activation_func == ord("m"): + softmax(temp, x_out) + elif activation_func == ord("t"): + x_out[:] = np.tanh(temp) + + +@guvectorize( + [ + "void(float32[::1], float32[::1], char, float32[:])", + "void(float64[::1], float64[::1], char, float64[:])", + ], + "(n),(n),()->()", + **nb_kwargs, +) +def classification_layer_no_bias( + x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: float +) -> None: + """ + This is the same as dense_layer_no_bias but the final output is a single number f(x.W) + + Parameters + ---------- + w_in + the input waveform shape n. + kernel + the matrix of weights shape (n x 1). + activation_func + the activation function to use specify with char: + s - sigmoid + r - relu + l - leaky relu + m - softmax + t - tanh + x_out + the output value. + + JSON Configuration Example + -------------------------- + + .. code-block :: json + + "classifier": { + "function": "dense_layer_with_bias", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.kernel", "'s'", "classifier"], + "unit": "ADC" + } + """ + x_out[0] = np.nan + + if np.isnan(x_in).any(): + return + + temp = np.zeros(1, dtype=x_out.dtype) + temp[0] = np.dot(x_in, kernel) + + if activation_func == ord("s"): + sigmoid(temp, x_out) + elif activation_func == ord("r"): + relu(temp, x_out) + elif activation_func == ord("l"): + leaky_relu(temp, x_out) + elif activation_func == ord("m"): + softmax(temp, x_out) + elif activation_func == ord("t"): + x_out[0] = np.tanh(temp[0]) + + +@guvectorize( + [ + "void(float32[::1], float32[::1], float32, char, float32[:])", + "void(float64[::1], float64[::1], float64, char, float64[:])", + ], + "(n),(n),(),()->()", + **nb_kwargs, +) +def classification_layer_with_bias( + x_in: np.ndarray, + kernel: np.ndarray, + bias: np.ndarray, + activation_func: np.int8, + x_out: float, +) -> None: + """ + this is the same as dense_layer_with_bias but the final output is a single number f(x.W+bs) + + Parameters + ---------- + w_in + the input waveform shape n. + kernel + the matrix of weights shape (n x 1). + bias + the bias in this case a single value. + activation_func + the activation function to use specify with char: + s - sigmoid + r - relu + l - leaky relu + m - softmax + t - tanh + x_out + the output value. + + JSON Configuration Example + -------------------------- + + .. code-block :: json + + "classifier": { + "function": "dense_layer_with_bias", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.kernel", "db.bias","'s'", "classifier"], + "unit": "ADC" + } + """ + x_out[0] = np.nan + + if np.isnan(x_in).any(): + return + + temp = np.zeros_like(x_out) + temp[0] = np.dot(x_in, kernel) + bias + + if activation_func == ord("s"): + sigmoid(temp, x_out) + elif activation_func == ord("r"): + relu(temp, x_out) + elif activation_func == ord("l"): + leaky_relu(temp, x_out) + elif activation_func == ord("m"): + softmax(temp, x_out) + elif activation_func == ord("t"): + x_out[0] = np.tanh(temp[0]) + + +@guvectorize( + [ + "void(float32[:], float32[:], float32[:], float32[:])", + "void(float64[:], float64[:], float64[:], float64[:])", + ], + "(n),(n),(n)->(n)", + **nb_kwargs, +) +def normalisation_layer( + x_in: np.ndarray, means: np.ndarray, variances: np.ndarray, x_out: np.ndarray +) -> None: + """ + Normalisation layer, (x_in - mu)/np.sqrt(variance) + Note this is variance not standard deviation + + Parameters + ---------- + w_in + the input waveform shape n. + means + array of means for each input value shape n. + variances + array of variances for each input value shape n. + x_out + the output vector shape n. + + JSON Configuration Example + -------------------------- + + .. code-block :: json + + "wf_normed": { + "function": "normalisation_layer", + "module": "dspeed.processors", + "args": ["wf_blsub", "db.mean", "db.variance", "wf_normed"], + "unit": "ADC" + } + """ + x_out[:] = (x_in - means) / np.sqrt(variances)