-
Notifications
You must be signed in to change notification settings - Fork 13
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Ml processors for ann #90
Merged
Merged
Changes from 4 commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
1179ca2
use value when loading lh5 scalars instead of nda
ggmarshall 2dc5831
add ml processors
ggmarshall 8e9bb40
docstrings and pc
ggmarshall 278887d
Merge branch 'main' into main
iguinn a1490c4
update docstring to work (hopefully)
ggmarshall 2b49f11
Merge branch 'main' of https://github.com/ggmarshall/dspeed
ggmarshall File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,355 @@ | ||
from __future__ import annotations | ||
|
||
import numpy as np | ||
from numba import guvectorize | ||
|
||
from ..utils import numba_defaults_kwargs as nb_kwargs | ||
|
||
|
||
@guvectorize( | ||
["void(float32[:], float32[:])", "void(float64[:], float64[:])"], | ||
"(n)->(n)", | ||
**nb_kwargs, | ||
) | ||
def relu(x_in: np.ndarray, x_out: np.ndarray) -> None: | ||
""" | ||
relu activation function 0 if x_in < 0 else x_in | ||
""" | ||
x_out[:] = x_in * (x_in > 0) | ||
|
||
|
||
@guvectorize( | ||
["void(float32[:], float32[:])", "void(float64[:], float64[:])"], | ||
"(n)->(n)", | ||
**nb_kwargs, | ||
) | ||
def leaky_relu(x_in: np.ndarray, x_out: np.ndarray) -> None: | ||
""" | ||
leaky relu activation function 0 if x_in < 0 else 0.01 x_in | ||
""" | ||
x_out[:] = x_in * (x_in > 0) + 0.01 * x_in * (x_in < 0) | ||
|
||
|
||
@guvectorize( | ||
["void(float32[:], float32[:])", "void(float64[:], float64[:])"], | ||
"(n)->(n)", | ||
**nb_kwargs, | ||
) | ||
def sigmoid(x_in: np.ndarray, x_out: np.ndarray) -> None: | ||
""" | ||
sigmoid activation function | ||
""" | ||
x_out[:] = 1 / (1 + np.exp(-x_in)) | ||
|
||
|
||
@guvectorize( | ||
["void(float32[:], float32[:])", "void(float64[:], float64[:])"], | ||
"(n)->(n)", | ||
**nb_kwargs, | ||
) | ||
def softmax(x_in: np.ndarray, x_out: np.ndarray) -> None: | ||
""" | ||
softmax activation function | ||
""" | ||
x_out[:] = np.log(1 + np.exp(x_in)) | ||
|
||
|
||
@guvectorize( | ||
[ | ||
"void(float32[::1], float32[:,::1], char, float32[:])", | ||
"void(float64[::1], float64[:,::1], char, float64[:])", | ||
], | ||
"(n),(n,m),()->(m)", | ||
**nb_kwargs, | ||
) | ||
def dense_layer_no_bias( | ||
x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: np.ndarray | ||
) -> None: | ||
""" | ||
Basic dense neural network layer with no bias, f(x.W) | ||
|
||
Parameters | ||
---------- | ||
w_in | ||
the input waveform shape n. | ||
kernel | ||
the matrix of weights shape (n x m). | ||
activation_func | ||
the activation function to use specify with char: | ||
s - sigmoid | ||
r - relu | ||
l - leaky relu | ||
m - softmax | ||
t - tanh | ||
x_out | ||
the output vector shape m. | ||
|
||
JSON Configuration Example | ||
-------------------------- | ||
|
||
.. code-block :: json | ||
|
||
"layer_1": { | ||
"function": "dense_layer_no_bias", | ||
"module": "dspeed.processors", | ||
"args": ["wf_blsub", "db.kernel", "'s'", "layer_1"], | ||
"unit": "ADC" | ||
} | ||
""" | ||
|
||
x_out[:] = np.nan | ||
|
||
if np.isnan(x_in).any(): | ||
return | ||
|
||
temp = np.dot(x_in, kernel) | ||
|
||
if activation_func == ord("s"): | ||
sigmoid(temp, x_out) | ||
elif activation_func == ord("r"): | ||
relu(temp, x_out) | ||
elif activation_func == ord("l"): | ||
leaky_relu(temp, x_out) | ||
elif activation_func == ord("m"): | ||
softmax(temp, x_out) | ||
elif activation_func == ord("t"): | ||
x_out[:] = np.tanh(temp) | ||
|
||
|
||
@guvectorize( | ||
[ | ||
"void(float32[::1], float32[:,::1], float32[:], char, float32[:])", | ||
"void(float64[::1], float64[:,::1], float64[:], char, float64[:])", | ||
], | ||
"(n),(n,m),(m),()->(m)", | ||
**nb_kwargs, | ||
) | ||
def dense_layer_with_bias( | ||
x_in: np.ndarray, | ||
kernel: np.ndarray, | ||
bias: np.ndarray, | ||
activation_func: np.int8, | ||
x_out: np.ndarray, | ||
) -> None: | ||
""" | ||
Basic dense neural network layer with bias added f(x.W+b) | ||
|
||
Parameters | ||
---------- | ||
w_in | ||
the input waveform shape n. | ||
kernel | ||
the matrix of weights shape (n x m). | ||
bias | ||
the bias with shape m. | ||
activation_func | ||
the activation function to use specify with char: | ||
s - sigmoid | ||
r - relu | ||
l - leaky relu | ||
m - softmax | ||
t - tanh | ||
x_out | ||
the output vector shape m. | ||
|
||
JSON Configuration Example | ||
-------------------------- | ||
|
||
.. code-block :: json | ||
|
||
"layer_1": { | ||
"function": "dense_layer_with_bias", | ||
"module": "dspeed.processors", | ||
"args": ["wf_blsub", "db.kernel", "db.bias", "'s'", "layer_1"], | ||
"unit": "ADC" | ||
} | ||
""" | ||
|
||
x_out[:] = np.nan | ||
|
||
if np.isnan(x_in).any(): | ||
return | ||
|
||
temp = np.dot(x_in, kernel) + bias | ||
|
||
if activation_func == ord("s"): | ||
sigmoid(temp, x_out) | ||
elif activation_func == ord("r"): | ||
relu(temp, x_out) | ||
elif activation_func == ord("l"): | ||
leaky_relu(temp, x_out) | ||
elif activation_func == ord("m"): | ||
softmax(temp, x_out) | ||
elif activation_func == ord("t"): | ||
x_out[:] = np.tanh(temp) | ||
|
||
|
||
@guvectorize( | ||
[ | ||
"void(float32[::1], float32[::1], char, float32[:])", | ||
"void(float64[::1], float64[::1], char, float64[:])", | ||
], | ||
"(n),(n),()->()", | ||
**nb_kwargs, | ||
) | ||
def classification_layer_no_bias( | ||
x_in: np.ndarray, kernel: np.ndarray, activation_func: np.int8, x_out: float | ||
) -> None: | ||
""" | ||
This is the same as dense_layer_no_bias but the final output is a single number f(x.W) | ||
|
||
Parameters | ||
---------- | ||
w_in | ||
the input waveform shape n. | ||
kernel | ||
the matrix of weights shape (n x 1). | ||
activation_func | ||
the activation function to use specify with char: | ||
s - sigmoid | ||
r - relu | ||
l - leaky relu | ||
m - softmax | ||
t - tanh | ||
x_out | ||
the output value. | ||
|
||
JSON Configuration Example | ||
-------------------------- | ||
|
||
.. code-block :: json | ||
|
||
"classifier": { | ||
"function": "dense_layer_with_bias", | ||
"module": "dspeed.processors", | ||
"args": ["wf_blsub", "db.kernel", "'s'", "classifier"], | ||
"unit": "ADC" | ||
} | ||
""" | ||
x_out[0] = np.nan | ||
|
||
if np.isnan(x_in).any(): | ||
return | ||
|
||
temp = np.zeros(1, dtype=x_out.dtype) | ||
temp[0] = np.dot(x_in, kernel) | ||
|
||
if activation_func == ord("s"): | ||
sigmoid(temp, x_out) | ||
elif activation_func == ord("r"): | ||
relu(temp, x_out) | ||
elif activation_func == ord("l"): | ||
leaky_relu(temp, x_out) | ||
elif activation_func == ord("m"): | ||
softmax(temp, x_out) | ||
elif activation_func == ord("t"): | ||
x_out[0] = np.tanh(temp[0]) | ||
|
||
|
||
@guvectorize( | ||
[ | ||
"void(float32[::1], float32[::1], float32, char, float32[:])", | ||
"void(float64[::1], float64[::1], float64, char, float64[:])", | ||
], | ||
"(n),(n),(),()->()", | ||
**nb_kwargs, | ||
) | ||
def classification_layer_with_bias( | ||
x_in: np.ndarray, | ||
kernel: np.ndarray, | ||
bias: np.ndarray, | ||
activation_func: np.int8, | ||
x_out: float, | ||
) -> None: | ||
""" | ||
this is the same as dense_layer_with_bias but the final output is a single number f(x.W+bs) | ||
|
||
Parameters | ||
---------- | ||
w_in | ||
the input waveform shape n. | ||
kernel | ||
the matrix of weights shape (n x 1). | ||
bias | ||
the bias in this case a single value. | ||
activation_func | ||
the activation function to use specify with char: | ||
s - sigmoid | ||
r - relu | ||
l - leaky relu | ||
m - softmax | ||
t - tanh | ||
x_out | ||
the output value. | ||
|
||
JSON Configuration Example | ||
-------------------------- | ||
|
||
.. code-block :: json | ||
|
||
"classifier": { | ||
"function": "dense_layer_with_bias", | ||
"module": "dspeed.processors", | ||
"args": ["wf_blsub", "db.kernel", "db.bias","'s'", "classifier"], | ||
"unit": "ADC" | ||
} | ||
""" | ||
x_out[0] = np.nan | ||
|
||
if np.isnan(x_in).any(): | ||
return | ||
|
||
temp = np.zeros_like(x_out) | ||
temp[0] = np.dot(x_in, kernel) + bias | ||
|
||
if activation_func == ord("s"): | ||
sigmoid(temp, x_out) | ||
elif activation_func == ord("r"): | ||
relu(temp, x_out) | ||
elif activation_func == ord("l"): | ||
leaky_relu(temp, x_out) | ||
elif activation_func == ord("m"): | ||
softmax(temp, x_out) | ||
elif activation_func == ord("t"): | ||
x_out[0] = np.tanh(temp[0]) | ||
|
||
|
||
@guvectorize( | ||
[ | ||
"void(float32[:], float32[:], float32[:], float32[:])", | ||
"void(float64[:], float64[:], float64[:], float64[:])", | ||
], | ||
"(n),(n),(n)->(n)", | ||
**nb_kwargs, | ||
) | ||
def normalisation_layer( | ||
x_in: np.ndarray, means: np.ndarray, variances: np.ndarray, x_out: np.ndarray | ||
) -> None: | ||
""" | ||
Normalisation layer, (x_in - mu)/np.sqrt(variance) | ||
Note this is variance not standard deviation | ||
|
||
Parameters | ||
---------- | ||
w_in | ||
the input waveform shape n. | ||
means | ||
array of means for each input value shape n. | ||
variances | ||
array of variances for each input value shape n. | ||
x_out | ||
the output vector shape n. | ||
|
||
JSON Configuration Example | ||
-------------------------- | ||
|
||
.. code-block :: json | ||
|
||
"wf_normed": { | ||
"function": "normalisation_layer", | ||
"module": "dspeed.processors", | ||
"args": ["wf_blsub", "db.mean", "db.variance", "wf_normed"], | ||
"unit": "ADC" | ||
} | ||
""" | ||
x_out[:] = (x_in - means) / np.sqrt(variances) |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The doc generation is failing with " /opt/hostedtoolcache/Python/3.10.15/x64/lib/python3.10/site-packages/dspeed/processors/ml.py:docstring of dspeed.processors.ml.normalisation_layer:21: WARNING: Definition list ends without a blank line; unexpected unindent. [docutils]". I think this code block needs an extra indent