Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds more tests #4

Merged
merged 2 commits into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 72 additions & 51 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ use std::sync::Arc;
mod chunk_structs;
mod errors;

mod sample;
pub use sample::Sample;

mod streams;
mod util;

Expand All @@ -54,7 +57,7 @@ mod parsers;
use crate::parsers::xdf_file::xdf_file_parser;

type StreamID = u32;
type SampleIter = std::vec::IntoIter<Sample>;
type SampleIter = std::vec::IntoIter<sample::Sample>;

/// XDF file struct
/// The main struct representing an XDF file.
Expand Down Expand Up @@ -101,30 +104,6 @@ pub enum Values {
String(String),
}

/// A single sample in a stream. Samples may have a timestamp and one or more values.
#[derive(Debug, PartialEq, Clone)]
pub struct Sample {
/**
The timestamp of the sample.
This is optional and may be None if the stream has an irregular sampling rate, as is often the case for marker streams.

It is worth mentioning that
* clock offsets are already applied to the timestamps, should they exist
* most of the timestamps are not actually in the recording but rather calulated using the provided nominal sampling rate.
Internally, streams are recorded in "chunks". The first sample in a chunk generally includes a timestamp while the rest are calculated.
*/
pub timestamp: Option<f64>,

/// The values of the sample.
pub values: Values,
}

impl PartialOrd for Sample {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.timestamp.partial_cmp(&other.timestamp)
}
}

struct GroupedChunks {
stream_header_chunks: Vec<StreamHeaderChunk>,
stream_footer_chunks: Vec<StreamFooterChunk>,
Expand Down Expand Up @@ -292,7 +271,7 @@ fn process_streams(mut grouped_chunks: GroupedChunks) -> Vec<Stream> {
.remove(&stream_header.stream_id)
.unwrap_or_default();

let samples_vec: Vec<Sample> = process_samples(
let samples_vec: Vec<sample::Sample> = process_samples(
grouped_chunks.sample_map.remove(&stream_id).unwrap_or_default(),
&stream_offsets,
stream_header.info.nominal_srate,
Expand Down Expand Up @@ -348,7 +327,7 @@ fn process_samples(
sample_iterators: Vec<SampleIter>,
stream_offsets: &[ClockOffsetChunk],
nominal_srate: Option<f64>,
) -> Vec<Sample> {
) -> Vec<sample::Sample> {
let mut offset_index: usize = 0;

let mut most_recent_timestamp = None;
Expand All @@ -357,7 +336,7 @@ fn process_samples(
.into_iter()
.flatten()
.enumerate()
.map(|(i, s)| -> Sample {
.map(|(i, s)| -> sample::Sample {
if let Some(srate) = nominal_srate {
let timestamp = if let Some(timestamp) = s.timestamp {
// if the sample has its own timestamp, use that and update the most recent timestamp
Expand All @@ -377,7 +356,7 @@ fn process_samples(

let timestamp = timestamp.map(|ts| interpolate_and_add_offsets(ts, stream_offsets, &mut offset_index));

Sample {
sample::Sample {
timestamp,
values: s.values,
}
Expand Down Expand Up @@ -451,39 +430,62 @@ fn interpolate_and_add_offsets(ts: f64, stream_offsets: &[ClockOffsetChunk], off

#[cfg(test)]
mod tests {


use super::*;

const EPSILON: f64 = 1E-15;
const EPSILON: f64 = 1E-14;

// test the interpolation function for timestamps *inside* the range of offsets
#[test]
fn test_interpolation_inside() {
let offsets = vec![
ClockOffsetChunk {
collection_time: 0.0,
offset_value: -1.0,
stream_id: 0,
},
ClockOffsetChunk {
collection_time: 1.0,
offset_value: 1.0,
stream_id: 0,
},
const TEST_VALUES: [((f64, f64), (f64, f64)); 4] = [
((0.0, -1.0), (1.0, 1.0)),
((0.0, 0.0), (1.0, 1.0)),
((0.0, -1.0), (1.0, 5.0)),
((4.0, -1.0), (5.0, 2.0)),
];

// test at multiple steps
for i in 0..=10 {
let timestamp = f64::from(i) / 10.0;
for ((s1_t, s1_v), (s2_t, s2_v)) in TEST_VALUES {
let offsets = vec![
ClockOffsetChunk {
collection_time: s1_t,
offset_value: s1_v,
stream_id: 0,
},
ClockOffsetChunk {
collection_time: s2_t,
offset_value: s2_v,
stream_id: 0,
},
];

let incline = (offsets[1].offset_value - offsets[0].offset_value)
/ (offsets[1].collection_time - offsets[0].collection_time);

let first_pos = (
offsets.first().unwrap().collection_time,
offsets.first().unwrap().offset_value,
);

let mut offset_index = 0;
let interpolated = interpolate_and_add_offsets(timestamp, &offsets, &mut offset_index);
let linspace = |start: f64, end: f64, n: usize| {
(0..n)
.map(|i| start + (end - start) * (i as f64) / (n as f64))
.collect::<Vec<f64>>()
};

let expected = timestamp + (timestamp * 2.0 - 1.0); // original timestamp + interpolated offset
// test at multiple steps
for timestamp in linspace(s1_t, s2_t, 100) {
let mut offset_index = 0;
let interpolated = interpolate_and_add_offsets(timestamp, &offsets, &mut offset_index);

assert!(
(interpolated - expected).abs() < EPSILON,
"expected {interpolated} to be within {EPSILON} of {expected}"
);
let expected: f64 = timestamp + ((timestamp - first_pos.0) * incline + first_pos.1); // original timestamp + interpolated offset

assert!(
(interpolated - expected).abs() < EPSILON,
"expected {interpolated} to be within {EPSILON} of {expected}"
);
}
}
}

Expand All @@ -501,6 +503,11 @@ mod tests {
offset_value: 1.0,
stream_id: 0,
},
ClockOffsetChunk {
collection_time: 3.0,
offset_value: 2.0,
stream_id: 0,
},
];
// after the range we expect for the last offset to be used
let last_offset = offsets.last().unwrap();
Expand Down Expand Up @@ -567,6 +574,20 @@ mod tests {
// should panic
interpolate_and_add_offsets(timestamp, &offsets, &mut offset_index);
}
#[test]
#[allow(clippy::float_cmp)]
fn test_no_offsets() {
let offsets = vec![];
let mut offset_index = 0;

for i in -20..=20 {
let timestamp = f64::from(i) / 10.0;
let res = interpolate_and_add_offsets(timestamp, &offsets, &mut offset_index);

//should be unchanged
assert_eq!(timestamp, res);
}
}

#[test]
const fn test_is_sync() {
Expand Down
45 changes: 45 additions & 0 deletions src/sample.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
use super::Values;

/// A single sample in a stream. Samples may have a timestamp and one or more values.
#[derive(Debug, PartialEq, Clone)]
pub struct Sample {
/**
The timestamp of the sample.
This is optional and may be None if the stream has an irregular sampling rate, as is often the case for marker streams.

It is worth mentioning that
* clock offsets are already applied to the timestamps, should they exist
* most of the timestamps are not actually in the recording but rather calulated using the provided nominal sampling rate.
Internally, streams are recorded in "chunks". The first sample in a chunk generally includes a timestamp while the rest are calculated.
*/
pub timestamp: Option<f64>,

/// The values of the sample.
pub values: Values,
}

impl PartialOrd for Sample {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.timestamp.partial_cmp(&other.timestamp)
}
}

#[test]
fn test_sample_partialord() {
let sample1 = Sample {
timestamp: Some(1.0),
values: Values::Int8(vec![1, 2, 3]),
};
let sample2 = Sample {
timestamp: Some(2.0),
values: Values::Int8(vec![4, 5, 6]),
};
let sample3 = Sample {
timestamp: Some(3.0),
values: Values::Int8(vec![7, 8, 9]),
};

assert!(sample1 < sample2);
assert!(sample2 < sample3);
assert!(sample1 < sample3);
}
Loading