diff --git a/nokhwa-core/src/buffer.rs b/nokhwa-core/src/buffer.rs index fdc8486..37b694a 100644 --- a/nokhwa-core/src/buffer.rs +++ b/nokhwa-core/src/buffer.rs @@ -19,7 +19,7 @@ use crate::{ pixel_format::FormatDecoder, types::{FrameFormat, Resolution}, }; -use bytes::Bytes; +use bytes::{Buf, Bytes}; use image::ImageBuffer; /// A buffer returned by a camera to accommodate custom decoding. @@ -102,6 +102,7 @@ impl Buffer { buffer, ) } + /// Decodes a image with allocation using the provided [`FormatDecoder`] into a [`Mat`](https://docs.rs/opencv/latest/opencv/core/struct.Mat.html). /// /// Note that this does a clone when creating the buffer, to decouple the lifetime of the internal data to the temporary Buffer. If you want to avoid this, please see [`decode_opencv_mat`](Self::decode_opencv_mat). @@ -152,4 +153,92 @@ impl Buffer { Ok(mat1) } } + + /// Decodes a image with allocation using the provided [`FormatDecoder`] into a [`Mat`](https://docs.rs/opencv/latest/opencv/core/struct.Mat.html). + /// + /// # Errors + /// Will error when the decoding fails, or `OpenCV` failed to create/copy the [`Mat`](https://docs.rs/opencv/latest/opencv/core/struct.Mat.html). + #[cfg(feature = "opencv-mat")] + #[cfg_attr(feature = "docs-features", doc(cfg(feature = "opencv-mat")))] + #[allow(clippy::cast_possible_wrap)] + pub fn decode_into_opencv_mat( + &mut self, + dst: &mut opencv::core::Mat, + ) -> Result<(), NokhwaError> { + use image::Pixel; + use opencv::core::{ + Mat, MatTraitConst, MatTraitManual, Scalar, CV_8UC1, CV_8UC2, CV_8UC3, CV_8UC4, + }; + + let array_type = match F::Output::CHANNEL_COUNT { + 1 => CV_8UC1, + 2 => CV_8UC2, + 3 => CV_8UC3, + 4 => CV_8UC4, + _ => { + return Err(NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: "Invalid Decoder FormatDecoder Channel Count".to_string(), + }) + } + }; + + // If destination does not exist, create a new matrix. + if dst.empty() { + *dst = Mat::new_rows_cols_with_default( + self.resolution.height_y as i32, + self.resolution.width_x as i32, + array_type, + Scalar::default(), + ) + .map_err(|why| NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: why.to_string(), + })?; + } else { + if dst.typ() != array_type { + return Err(NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: "Invalid Matrix Channel Count".to_string(), + }); + } + + if dst.rows() != self.resolution.height_y as _ + || dst.cols() != self.resolution.width_x as _ + { + return Err(NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: "Invalid Matrix Dimensions".to_string(), + }); + } + } + + let mut bytes = match dst.data_bytes_mut() { + Ok(bytes) => bytes, + Err(_e) => { + return Err(NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: "Matrix Must Be Continuous".to_string(), + }) + } + }; + + let mut buffer = self.buffer.as_ref(); + if bytes.len() != buffer.len() { + return Err(NokhwaError::ProcessFrameError { + src: FrameFormat::RAWRGB, + destination: "OpenCV Mat".to_string(), + error: "Matrix Buffer Size Mismatch".to_string(), + }); + } + + buffer.copy_to_slice(&mut bytes); + + Ok(()) + } }