Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ttest helper and benchmarks #16

Merged
merged 6 commits into from
Oct 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,7 @@ harness = false
[[bench]]
name = "dpa"
harness = false

[[bench]]
name = "ttest"
harness = false
58 changes: 29 additions & 29 deletions benches/cpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ pub fn leakage_model(value: usize, guess: usize) -> usize {
hw(sbox((value ^ guess) as u8) as usize)
}

fn cpa_sequential(leakages: &Array2<f64>, plaintexts: &Array2<u8>) -> Cpa {
let mut cpa = CpaProcessor::new(leakages.shape()[1], 256, 0, leakage_model);
fn cpa_sequential(traces: &Array2<f64>, plaintexts: &Array2<u8>) -> Cpa {
let mut cpa = CpaProcessor::new(traces.shape()[1], 256, 0, leakage_model);

for i in 0..leakages.shape()[0] {
for i in 0..traces.shape()[0] {
cpa.update(
leakages.row(i).map(|&x| x as usize).view(),
traces.row(i).map(|&x| x as usize).view(),
plaintexts.row(i).map(|&y| y as usize).view(),
);
}
Expand All @@ -29,17 +29,17 @@ pub fn leakage_model_normal(value: ArrayView1<usize>, guess: usize) -> usize {
hw(sbox((value[1] ^ guess) as u8) as usize)
}

fn cpa_normal_sequential(leakages: &Array2<f64>, plaintexts: &Array2<u8>) -> Cpa {
let chunk_size = 500;
fn cpa_normal_sequential(traces: &Array2<f64>, plaintexts: &Array2<u8>) -> Cpa {
let batch_size = 500;

let mut cpa =
cpa_normal::CpaProcessor::new(leakages.shape()[1], chunk_size, 256, leakage_model_normal);
cpa_normal::CpaProcessor::new(traces.shape()[1], batch_size, 256, leakage_model_normal);

for (leakages_chunk, plaintexts_chunk) in zip(
leakages.axis_chunks_iter(Axis(0), chunk_size),
plaintexts.axis_chunks_iter(Axis(0), chunk_size),
for (trace_batch, plaintext_batch) in zip(
traces.axis_chunks_iter(Axis(0), batch_size),
plaintexts.axis_chunks_iter(Axis(0), batch_size),
) {
cpa.update(leakages_chunk.map(|&x| x as f32).view(), plaintexts_chunk);
cpa.update(trace_batch.map(|&x| x as f32).view(), plaintext_batch);
}

cpa.finalize()
Expand All @@ -53,26 +53,26 @@ fn bench_cpa(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-2., 2.), &mut rng);
for num_traces in [5000, 10000, 25000].into_iter() {
let traces = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng);
let plaintexts = Array2::random_using(
(nb_traces, 16),
(num_traces, 16),
Uniform::new_inclusive(0u8, 255u8),
&mut rng,
);

group.bench_with_input(
BenchmarkId::new("cpa_sequential", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| cpa_sequential(leakages, plaintexts)),
BenchmarkId::new("cpa_sequential", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| cpa_sequential(traces, plaintexts)),
);
group.bench_with_input(
BenchmarkId::new("cpa_parallel", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| {
BenchmarkId::new("cpa_parallel", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| {
b.iter(|| {
cpa::cpa(
leakages.map(|&x| x as usize).view(),
traces.map(|&x| x as usize).view(),
plaintexts.map(|&x| x as usize).view(),
256,
0,
Expand All @@ -83,20 +83,20 @@ fn bench_cpa(c: &mut Criterion) {
},
);
// For 25000 traces, 60s of measurement_time is too low
if nb_traces <= 10000 {
if num_traces <= 10000 {
group.bench_with_input(
BenchmarkId::new("cpa_normal_sequential", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| cpa_normal_sequential(leakages, plaintexts)),
BenchmarkId::new("cpa_normal_sequential", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| cpa_normal_sequential(traces, plaintexts)),
);
}
group.bench_with_input(
BenchmarkId::new("cpa_normal_parallel", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| {
BenchmarkId::new("cpa_normal_parallel", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| {
b.iter(|| {
cpa_normal::cpa(
leakages.map(|&x| x as f32).view(),
traces.map(|&x| x as f32).view(),
plaintexts.view(),
256,
leakage_model_normal,
Expand Down
30 changes: 15 additions & 15 deletions benches/dpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,19 @@ fn selection_function(metadata: Array1<u8>, guess: usize) -> bool {
usize::from(sbox(metadata[1] ^ guess as u8)) & 1 == 1
}

fn dpa_sequential(leakages: &Array2<f32>, plaintexts: &Array2<u8>) -> Dpa {
let mut dpa = DpaProcessor::new(leakages.shape()[1], 256, selection_function);
fn dpa_sequential(traces: &Array2<f32>, plaintexts: &Array2<u8>) -> Dpa {
let mut dpa = DpaProcessor::new(traces.shape()[1], 256, selection_function);

for i in 0..leakages.shape()[0] {
dpa.update(leakages.row(i), plaintexts.row(i).to_owned());
for i in 0..traces.shape()[0] {
dpa.update(traces.row(i), plaintexts.row(i).to_owned());
}

dpa.finalize()
}

fn dpa_parallel(leakages: &Array2<f32>, plaintexts: &Array2<u8>) -> Dpa {
fn dpa_parallel(traces: &Array2<f32>, plaintexts: &Array2<u8>) -> Dpa {
dpa(
leakages.view(),
traces.view(),
plaintexts
.rows()
.into_iter()
Expand All @@ -43,21 +43,21 @@ fn bench_dpa(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [1000, 2000, 5000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-2., 2.), &mut rng);
for num_traces in [1000, 2000, 5000].into_iter() {
let traces = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng);
let plaintexts =
Array2::random_using((nb_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);
Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);

group.bench_with_input(
BenchmarkId::new("sequential", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| dpa_sequential(leakages, plaintexts)),
BenchmarkId::new("sequential", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| dpa_sequential(traces, plaintexts)),
);

group.bench_with_input(
BenchmarkId::new("parallel", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| dpa_parallel(leakages, plaintexts)),
BenchmarkId::new("parallel", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| dpa_parallel(traces, plaintexts)),
);
}

Expand Down
32 changes: 16 additions & 16 deletions benches/snr.rs
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use muscat::leakage_detection::{snr, Snr};
use muscat::leakage_detection::{snr, SnrProcessor};
use ndarray::{Array1, Array2};
use ndarray_rand::rand::{rngs::StdRng, SeedableRng};
use ndarray_rand::rand_distr::Uniform;
use ndarray_rand::RandomExt;

fn snr_sequential(leakages: &Array2<i64>, plaintexts: &Array2<u8>) -> Array1<f64> {
let mut snr = Snr::new(leakages.shape()[1], 256);
fn snr_sequential(traces: &Array2<i64>, plaintexts: &Array2<u8>) -> Array1<f64> {
let mut snr = SnrProcessor::new(traces.shape()[1], 256);

for i in 0..leakages.shape()[0] {
snr.process(leakages.row(i), plaintexts.row(i)[0] as usize);
for i in 0..traces.shape()[0] {
snr.process(traces.row(i), plaintexts.row(i)[0] as usize);
}

snr.snr()
}

fn snr_parallel(leakages: &Array2<i64>, plaintexts: &Array2<u8>) -> Array1<f64> {
snr(leakages.view(), 256, |i| plaintexts.row(i)[0].into(), 500)
fn snr_parallel(traces: &Array2<i64>, plaintexts: &Array2<u8>) -> Array1<f64> {
snr(traces.view(), 256, |i| plaintexts.row(i)[0].into(), 500)
}

fn bench_snr(c: &mut Criterion) {
Expand All @@ -27,21 +27,21 @@ fn bench_snr(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-200, 200), &mut rng);
for num_traces in [5000, 10000, 25000].into_iter() {
let traces = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng);
let plaintexts =
Array2::random_using((nb_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);
Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);

group.bench_with_input(
BenchmarkId::new("sequential", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| snr_sequential(leakages, plaintexts)),
BenchmarkId::new("sequential", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| snr_sequential(traces, plaintexts)),
);

group.bench_with_input(
BenchmarkId::new("parallel", nb_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| snr_parallel(leakages, plaintexts)),
BenchmarkId::new("parallel", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| snr_parallel(traces, plaintexts)),
);
}

Expand Down
51 changes: 51 additions & 0 deletions benches/ttest.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use muscat::leakage_detection::{ttest, TTestProcessor};
use ndarray::{Array1, Array2};
use ndarray_rand::rand::{rngs::StdRng, SeedableRng};
use ndarray_rand::rand_distr::{Standard, Uniform};
use ndarray_rand::RandomExt;

fn ttest_sequential(traces: &Array2<i64>, trace_classes: &Array1<bool>) -> Array1<f64> {
let mut ttest = TTestProcessor::new(traces.shape()[1]);

for i in 0..traces.shape()[0] {
ttest.process(traces.row(i), trace_classes[i]);
}

ttest.ttest()
}

fn ttest_parallel(traces: &Array2<i64>, trace_classes: &Array1<bool>) -> Array1<f64> {
ttest(traces.view(), trace_classes.view(), 500)
}

fn bench_ttest(c: &mut Criterion) {
// Seed rng to get the same output each run
let mut rng = StdRng::seed_from_u64(0);

let mut group = c.benchmark_group("ttest");

group.measurement_time(std::time::Duration::from_secs(60));

for num_traces in [5000, 10000, 25000].into_iter() {
let traces = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng);
let plaintexts = Array1::random_using(num_traces, Standard, &mut rng);

group.bench_with_input(
BenchmarkId::new("sequential", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| ttest_sequential(traces, plaintexts)),
);

group.bench_with_input(
BenchmarkId::new("parallel", num_traces),
&(&traces, &plaintexts),
|b, (traces, plaintexts)| b.iter(|| ttest_parallel(traces, plaintexts)),
);
}

group.finish();
}

criterion_group!(benches, bench_ttest);
criterion_main!(benches);
12 changes: 6 additions & 6 deletions examples/cpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ fn cpa() -> Result<()> {
let folder = String::from("../../data/cw");
let dir_l = format!("{folder}/leakages.npy");
let dir_p = format!("{folder}/plaintexts.npy");
let leakages = read_array2_from_npy_file::<FormatTraces>(&dir_l)?;
let traces = read_array2_from_npy_file::<FormatTraces>(&dir_l)?;
let plaintext = read_array2_from_npy_file::<FormatMetadata>(&dir_p)?;
let len_traces = leakages.shape()[0];
let len_traces = traces.shape()[0];

let cpa_parallel = ((0..len_traces).step_by(batch))
.progress_with(progress_bar(len_traces))
Expand All @@ -38,7 +38,7 @@ fn cpa() -> Result<()> {
let mut cpa = CpaProcessor::new(size, batch, guess_range, leakage_model);
let range_rows = row_number..row_number + batch;
let range_samples = start_sample..end_sample;
let sample_traces = leakages
let sample_traces = traces
.slice(s![range_rows.clone(), range_samples])
.map(|l| *l as f32);
let sample_metadata = plaintext.slice(s![range_rows, ..]).map(|p| *p as usize);
Expand Down Expand Up @@ -76,13 +76,13 @@ fn success() -> Result<()> {
for i in (0..nfiles).progress() {
let dir_l = format!("{folder}/l/{i}.npy");
let dir_p = format!("{folder}/p/{i}.npy");
let leakages = read_array2_from_npy_file::<FormatTraces>(&dir_l)?;
let traces = read_array2_from_npy_file::<FormatTraces>(&dir_l)?;
let plaintext = read_array2_from_npy_file::<FormatMetadata>(&dir_p)?;
for row in (0..leakages.shape()[0]).step_by(batch) {
for row in (0..traces.shape()[0]).step_by(batch) {
let range_samples = start_sample..end_sample;
let range_rows = row..row + batch;
let range_metadata = 0..plaintext.shape()[1];
let sample_traces = leakages
let sample_traces = traces
.slice(s![range_rows.clone(), range_samples])
.map(|l| *l as f32);
let sample_metadata = plaintext.slice(s![range_rows, range_metadata]);
Expand Down
9 changes: 4 additions & 5 deletions examples/cpa_partioned.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ fn cpa() -> Result<()> {
let size = 5000; // Number of samples
let guess_range = 256; // 2**(key length)
let target_byte = 1;
let folder = String::from("../../data"); // Directory of leakages and metadata
let folder = String::from("../../data"); // Directory of traces and metadata
let nfiles = 5; // Number of files in the directory. TBD: Automating this value

/* Parallel operation using multi-threading on batches */
Expand All @@ -28,15 +28,14 @@ fn cpa() -> Result<()> {
.map(|n| {
let dir_l = format!("{folder}/l{n}.npy");
let dir_p = format!("{folder}/p{n}.npy");
let leakages = read_array2_from_npy_file::<FormatTraces>(&dir_l).unwrap();
let traces = read_array2_from_npy_file::<FormatTraces>(&dir_l).unwrap();
let plaintext = read_array2_from_npy_file::<FormatMetadata>(&dir_p).unwrap();
(leakages, plaintext)
(traces, plaintext)
})
.par_bridge()
.map(|batch| {
let mut c = CpaProcessor::new(size, guess_range, target_byte, leakage_model);
let len_leakage = batch.0.shape()[0];
for i in 0..len_leakage {
for i in 0..batch.0.shape()[0] {
c.update(
batch.0.row(i).map(|x| *x as usize).view(),
batch.1.row(i).map(|y| *y as usize).view(),
Expand Down
Loading