Skip to content

Commit

Permalink
Rustfmt
Browse files Browse the repository at this point in the history
  • Loading branch information
gwihlidal committed Jan 13, 2019
1 parent cf08ebe commit d91e0c9
Show file tree
Hide file tree
Showing 9 changed files with 48 additions and 38 deletions.
8 changes: 4 additions & 4 deletions src/clusterize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@ pub type Meshlet = ffi::meshopt_Meshlet;

/// Splits the mesh into a set of meshlets where each meshlet has a micro index buffer
/// indexing into meshlet vertices that refer to the original vertex buffer.
///
///
/// The resulting data can be used to render meshes using NVidia programmable mesh shading
/// pipeline, or in other cluster-based renderers.
///
///
/// For maximum efficiency the index buffer being converted has to be optimized for vertex
/// cache first.
///
///
/// Note: `max_vertices` must be <= 64 and `max_triangles` must be <= 126
pub fn build_meshlets(
indices: &[u32],
Expand Down Expand Up @@ -47,7 +47,7 @@ pub fn build_meshlets(
///
/// Alternatively, you can use the formula that doesn't need cone apex and uses bounding sphere instead:
/// `dot(normalize(center - camera_position), cone_axis) >= cone_cutoff + radius / length(center - camera_position)`
///
///
/// or an equivalent formula that doesn't have a singularity at center = camera_position:
/// `dot(center - camera_position, cone_axis) >= cone_cutoff * length(center - camera_position) + radius`
///
Expand Down
4 changes: 2 additions & 2 deletions src/encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::mem;

/// Encodes index data into an array of bytes that is generally much smaller (<1.5 bytes/triangle)
/// and compresses better (<1 bytes/triangle) compared to original.
///
///
/// For maximum efficiency the index buffer being encoded has to be optimized for vertex cache and
/// vertex fetch first.
pub fn encode_index_buffer(indices: &[u32], vertex_count: usize) -> Result<Vec<u8>> {
Expand Down Expand Up @@ -54,7 +54,7 @@ pub fn decode_index_buffer<T: Clone + Default>(

/// Encodes vertex data into an array of bytes that is generally smaller and compresses better
/// compared to original.
///
///
/// This function works for a single vertex stream; for multiple vertex streams,
/// call `encode_vertex_buffer` for each stream.
pub fn encode_vertex_buffer<T>(vertices: &[T]) -> Result<Vec<u8>> {
Expand Down
4 changes: 2 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ pub use crate::stripify::*;
pub use crate::utilities::*;

/// Vertex attribute stream, similar to glVertexPointer
///
///
/// Each element takes size bytes, with stride controlling
/// the spacing between successive elements.
#[derive(Debug, Copy, Clone)]
pub struct VertexStream<'a> {
pub data: &'a [u8],
pub stride: usize,
}
}
20 changes: 10 additions & 10 deletions src/optimize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::DecodePosition;
use std::mem;

/// Reorders indices to reduce the number of GPU vertex shader invocations.
///
///
/// If index buffer contains multiple ranges for multiple draw calls,
/// this function needs to be called on each range individually.
pub fn optimize_vertex_cache(indices: &[u32], vertex_count: usize) -> Vec<u32> {
Expand All @@ -20,7 +20,7 @@ pub fn optimize_vertex_cache(indices: &[u32], vertex_count: usize) -> Vec<u32> {
}

/// Reorders indices to reduce the number of GPU vertex shader invocations.
///
///
/// If index buffer contains multiple ranges for multiple draw calls,
/// this function needs to be called on each range individually.
pub fn optimize_vertex_cache_in_place(indices: &mut [u32], vertex_count: usize) {
Expand All @@ -37,10 +37,10 @@ pub fn optimize_vertex_cache_in_place(indices: &mut [u32], vertex_count: usize)
/// Vertex transform cache optimizer for FIFO caches.
///
/// Reorders indices to reduce the number of GPU vertex shader invocations.
///
///
/// Generally takes ~3x less time to optimize meshes but produces inferior
/// results compared to `optimize_vertex_cache`.
///
///
/// If index buffer contains multiple ranges for multiple draw calls,
/// this function needs to be called on each range individually.
pub fn optimize_vertex_cache_fifo(
Expand All @@ -64,10 +64,10 @@ pub fn optimize_vertex_cache_fifo(
/// Vertex transform cache optimizer for FIFO caches (in place).
///
/// Reorders indices to reduce the number of GPU vertex shader invocations.
///
///
/// Generally takes ~3x less time to optimize meshes but produces inferior
/// results compared to `optimize_vertex_cache_fifo_in_place`.
///
///
/// If index buffer contains multiple ranges for multiple draw calls,
/// this function needs to be called on each range individually.
pub fn optimize_vertex_cache_fifo_in_place(
Expand All @@ -88,7 +88,7 @@ pub fn optimize_vertex_cache_fifo_in_place(

/// Reorders vertices and changes indices to reduce the amount of GPU
/// memory fetches during vertex processing.
///
///
/// This functions works for a single vertex stream; for multiple vertex streams,
/// use `optimize_vertex_fetch_remap` + `remap_vertex_buffer` for each stream.
///
Expand All @@ -112,7 +112,7 @@ pub fn optimize_vertex_fetch<T: Clone + Default>(indices: &mut [u32], vertices:
/// Vertex fetch cache optimizer (modifies in place)
/// Reorders vertices and changes indices to reduce the amount of GPU
/// memory fetches during vertex processing.
///
///
/// This functions works for a single vertex stream; for multiple vertex streams,
/// use `optimize_vertex_fetch_remap` + `remap_vertex_buffer` for each stream.
///
Expand All @@ -132,7 +132,7 @@ pub fn optimize_vertex_fetch_in_place<T>(indices: &mut [u32], vertices: &mut [T]

/// Generates vertex remap to reduce the amount of GPU memory fetches during
/// vertex processing.
///
///
/// The resulting remap table should be used to reorder vertex/index buffers
/// using `optimize_remap_vertex_buffer`/`optimize_remap_index_buffer`.
pub fn optimize_vertex_fetch_remap(indices: &[u32], vertex_count: usize) -> Vec<u32> {
Expand All @@ -154,7 +154,7 @@ pub fn optimize_vertex_fetch_remap(indices: &[u32], vertex_count: usize) -> Vec<
///
/// `indices` must contain index data that is the result of `optimize_vertex_cache`
/// (*not* the original mesh indices!)
///
///
/// `threshold` indicates how much the overdraw optimizer can degrade vertex cache
/// efficiency (1.05 = up to 5%) to reduce overdraw more efficiently.
pub fn optimize_overdraw_in_place<T: DecodePosition>(
Expand Down
19 changes: 12 additions & 7 deletions src/remap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::VertexStream;
use std::mem;

/// Generates a vertex remap table from the vertex buffer and an optional index buffer and returns number of unique vertices.
///
///
/// As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
/// Resulting remap table maps old vertices to new vertices and can be used in `remap_vertex_buffer`/`remap_index_buffer`.
///
Expand Down Expand Up @@ -34,21 +34,26 @@ pub fn generate_vertex_remap<T>(vertices: &[T], indices: Option<&[u32]>) -> (usi
}

/// Generates a vertex remap table from multiple vertex streams and an optional index buffer and returns number of unique vertices.
///
///
/// As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
/// Resulting remap table maps old vertices to new vertices and can be used in `remap_vertex_buffer`/`remap_index_buffer`.
///
/// To remap vertex buffers, you will need to call `remap_vertex_buffer` for each vertex stream.
///
/// The `indices` can be `None` if the input is unindexed.
pub fn generate_vertex_remap_multi<T>(vertex_count: usize, streams: &[VertexStream], indices: Option<&[u32]>) -> (usize, Vec<u32>) {
let streams: Vec<ffi::meshopt_Stream> = streams.iter().map(|stream| {
ffi::meshopt_Stream {
pub fn generate_vertex_remap_multi<T>(
vertex_count: usize,
streams: &[VertexStream],
indices: Option<&[u32]>,
) -> (usize, Vec<u32>) {
let streams: Vec<ffi::meshopt_Stream> = streams
.iter()
.map(|stream| ffi::meshopt_Stream {
data: stream.data.as_ptr() as *const ::std::ffi::c_void,
size: stream.data.len(),
stride: stream.stride,
}
}).collect();
})
.collect();
let remap: Vec<u32> = vec![0; vertex_count];
let vertex_count = unsafe {
match indices {
Expand Down
19 changes: 12 additions & 7 deletions src/shadow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use crate::VertexStream;
/// Generate index buffer that can be used for more efficient rendering when only a subset of the vertex
/// attributes is necessary. All vertices that are binary equivalent (wrt first vertex_size bytes) map to
/// the first vertex in the original vertex buffer.
///
///
/// This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using
/// the original index buffer for regular rendering.
pub fn generate_shadow_indices<T: DecodePosition>(indices: &[u32], vertices: &[T]) -> Vec<u32> {
Expand All @@ -32,17 +32,22 @@ pub fn generate_shadow_indices<T: DecodePosition>(indices: &[u32], vertices: &[T
/// Generate index buffer that can be used for more efficient rendering when only a subset of the vertex
/// attributes is necessary. All vertices that are binary equivalent (wrt specified streams) map to the
/// first vertex in the original vertex buffer.
///
///
/// This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using
/// the original index buffer for regular rendering.
pub fn generate_shadow_indices_multi(indices: &[u32], vertex_count: usize, streams: &[VertexStream]) -> Vec<u32> {
let streams: Vec<ffi::meshopt_Stream> = streams.iter().map(|stream| {
ffi::meshopt_Stream {
pub fn generate_shadow_indices_multi(
indices: &[u32],
vertex_count: usize,
streams: &[VertexStream],
) -> Vec<u32> {
let streams: Vec<ffi::meshopt_Stream> = streams
.iter()
.map(|stream| ffi::meshopt_Stream {
data: stream.data.as_ptr() as *const ::std::ffi::c_void,
size: stream.data.len(),
stride: stream.stride,
}
}).collect();
})
.collect();
let mut shadow_indices: Vec<u32> = vec![0; indices.len()];
unsafe {
ffi::meshopt_generateShadowIndexBufferMulti(
Expand Down
2 changes: 1 addition & 1 deletion src/simplify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::mem;
/// Reduces the number of triangles in the mesh, attempting to preserve mesh
/// appearance as much as possible. The resulting index buffer references vertices
/// from the original vertex buffer.
///
///
/// If the original vertex data isn't required, creating a compact vertex buffer
/// using `optimize_vertex_fetch` is recommended.
pub fn simplify<T: DecodePosition>(
Expand Down
2 changes: 1 addition & 1 deletion src/stripify.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::{ffi, Error, Result};

/// Converts a previously vertex cache optimized triangle list to triangle
/// strip, stitching strips using restart index.
///
///
/// For maximum efficiency the index buffer being converted has to be
/// optimized for vertex cache first.
pub fn stripify(indices: &[u32], vertex_count: usize) -> Result<Vec<u32>> {
Expand Down
8 changes: 4 additions & 4 deletions src/utilities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ pub fn convert_indices_16_to_32(indices: &[u16]) -> Result<Vec<u32>> {
}

/// Quantize a float in [0..1] range into an N-bit fixed point unorm value.
///
///
/// Assumes reconstruction function (q / (2^N-1)), which is the case for
/// fixed-function normalized fixed point conversion.
///
///
/// Maximum reconstruction error: 1/2^(N+1).
#[inline(always)]
pub fn quantize_unorm(v: f32, n: i32) -> i32 {
Expand All @@ -53,10 +53,10 @@ pub fn quantize_unorm(v: f32, n: i32) -> i32 {
}

/// Quantize a float in [-1..1] range into an N-bit fixed point snorm value.
///
///
/// Assumes reconstruction function (q / (2^(N-1)-1)), which is the case for
/// fixed-function normalized fixed point conversion (except early OpenGL versions).
///
///
/// Maximum reconstruction error: 1/2^N.
#[inline(always)]
pub fn quantize_snorm(v: f32, n: u32) -> i32 {
Expand Down

0 comments on commit d91e0c9

Please sign in to comment.