Skip to content
This repository has been archived by the owner on Jun 18, 2021. It is now read-only.

Commit

Permalink
Use the new map-async
Browse files Browse the repository at this point in the history
  • Loading branch information
kvark committed Jun 2, 2020
1 parent ac6f79c commit 383c7ab
Show file tree
Hide file tree
Showing 8 changed files with 210 additions and 420 deletions.
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ vulkan = ["wgc/gfx-backend-vulkan"]
package = "wgpu-core"
version = "0.5"
git = "https://github.com/gfx-rs/wgpu"
rev = "fbc2c87de61b0e7bab2583ddf305742e3cbf85e8"
rev = "9120f0399ce8d8cc48b0b87c26d58c71b2e925be"
features = ["raw-window-handle"]

[dependencies.wgt]
package = "wgpu-types"
version = "0.5"
git = "https://github.com/gfx-rs/wgpu"
rev = "fbc2c87de61b0e7bab2583ddf305742e3cbf85e8"
rev = "9120f0399ce8d8cc48b0b87c26d58c71b2e925be"

[dependencies]
arrayvec = "0.5"
Expand Down
11 changes: 7 additions & 4 deletions examples/capture/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ async fn run() {

// The output buffer lets us retrieve the data as an array
let output_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: (size * size) as u64 * size_of::<u32>() as u64,
usage: wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::COPY_DST,
label: None,
mapped_at_creation: false,
});

let texture_extent = wgpu::Extent3d {
Expand Down Expand Up @@ -95,7 +96,7 @@ async fn run() {
queue.submit(Some(command_buffer));

// Note that we're not calling `.await` here.
let buffer_future = output_buffer.map_read(0, wgt::BufferSize::WHOLE);
let buffer_future = output_buffer.map_async(wgpu::MapMode::Read, 0, wgt::BufferSize::WHOLE);

// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
Expand All @@ -108,15 +109,17 @@ async fn run() {
return;
}

if let Ok(mapping) = buffer_future.await {
if let Ok(()) = buffer_future.await {
let data = output_buffer.get_mapped_range(0, wgt::BufferSize::WHOLE);
let mut png_encoder = png::Encoder::new(File::create("red.png").unwrap(), size, size);
png_encoder.set_depth(png::BitDepth::Eight);
png_encoder.set_color(png::ColorType::RGBA);
png_encoder
.write_header()
.unwrap()
.write_image_data(mapping.as_slice())
.write_image_data(data)
.unwrap();
output_buffer.unmap();
}
}

Expand Down
36 changes: 18 additions & 18 deletions examples/hello-compute/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,20 +51,20 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
let cs_module =
device.create_shader_module(&wgpu::read_spirv(std::io::Cursor::new(&cs[..])).unwrap());

let staging_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&numbers),
wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::COPY_SRC,
);

let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor {
size,
usage: wgpu::BufferUsage::STORAGE
| wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::COPY_SRC,
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size,
usage: wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});

let storage_buffer = device.create_buffer_with_data(
bytemuck::cast_slice(&numbers),
wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::COPY_SRC,
);

let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
bindings: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::COMPUTE,
Expand All @@ -73,16 +73,15 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
readonly: false,
},
}],
label: None,
});

let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
bindings: &[wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::Buffer(storage_buffer.slice(..)),
}],
label: None,
});

let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
Expand All @@ -99,7 +98,6 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {

let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
encoder.copy_buffer_to_buffer(&staging_buffer, 0, &storage_buffer, 0, size);
{
let mut cpass = encoder.begin_compute_pass();
cpass.set_pipeline(&compute_pipeline);
Expand All @@ -111,19 +109,21 @@ async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
queue.submit(Some(encoder.finish()));

// Note that we're not calling `.await` here.
let buffer_future = staging_buffer.map_read(0, wgt::BufferSize::WHOLE);
let buffer_future = staging_buffer.map_async(wgpu::MapMode::Read, 0, wgt::BufferSize::WHOLE);

// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
// be called in an event loop or on another thread.
device.poll(wgpu::Maintain::Wait);

if let Ok(mapping) = buffer_future.await {
mapping
.as_slice()
if let Ok(()) = buffer_future.await {
let data = staging_buffer.get_mapped_range(0, wgt::BufferSize::WHOLE);
let result = data
.chunks_exact(4)
.map(|b| u32::from_ne_bytes(b.try_into().unwrap()))
.collect()
.collect();
staging_buffer.unmap();
result
} else {
panic!("failed to run compute on gpu!")
}
Expand Down
12 changes: 8 additions & 4 deletions examples/shadow/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,9 +236,10 @@ impl framework::Example for Example {

let entity_uniform_size = mem::size_of::<EntityUniforms>() as wgpu::BufferAddress;
let plane_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: entity_uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
label: None,
mapped_at_creation: false,
});

let local_bind_group_layout =
Expand Down Expand Up @@ -316,9 +317,10 @@ impl framework::Example for Example {
scale: cube.scale,
};
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: entity_uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
label: None,
mapped_at_creation: false,
});
entities.push(Entity {
mx_world: cgmath::Matrix4::from(transform),
Expand Down Expand Up @@ -408,11 +410,12 @@ impl framework::Example for Example {
let light_uniform_size =
(Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as wgpu::BufferAddress;
let light_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: light_uniform_size,
usage: wgpu::BufferUsage::UNIFORM
| wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::COPY_DST,
label: None,
mapped_at_creation: false,
});

let vb_desc = wgpu::VertexBufferDescriptor {
Expand All @@ -438,9 +441,10 @@ impl framework::Example for Example {

let uniform_size = mem::size_of::<ShadowUniforms>() as wgpu::BufferAddress;
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
label: None,
mapped_at_creation: false,
});

// Create bind group
Expand Down
Loading

0 comments on commit 383c7ab

Please sign in to comment.