@@ -117,32 +117,25 @@ impl framework::Example for Example {
117
117
let ( vertex_data, index_data) = create_vertices ( ) ;
118
118
let vertex_buffer_length = vertex_data. len ( ) * vertex_size;
119
119
let index_buffer_length = index_data. len ( ) * mem:: size_of :: < u16 > ( ) ;
120
- let vertex_buf = device. create_buffer ( & wgpu:: BufferDescriptor {
121
- size : vertex_buffer_length as u32 ,
122
- usage : wgpu:: BufferUsageFlags :: VERTEX | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
123
- } ) ;
124
-
125
- //vertex_buf.set_sub_data(0, framework::cast_slice(&vertex_data));
126
- vertex_buf. map_write_async ( 0 , vertex_buffer_length as u32 , |result : wgpu:: BufferMapAsyncResult < & mut [ Vertex ] > | {
127
- if let wgpu:: BufferMapAsyncResult :: Success ( data) = result {
128
- data. copy_from_slice ( & vertex_data) ;
129
- }
130
-
120
+ let vertex_buf = {
121
+ let ( vertex_buf, vertex_buf_data) = device. create_buffer_mapped ( & wgpu:: BufferDescriptor {
122
+ size : vertex_buffer_length as u32 ,
123
+ usage : wgpu:: BufferUsageFlags :: VERTEX | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
124
+ } ) ;
125
+ vertex_buf_data. copy_from_slice ( & vertex_data) ;
131
126
vertex_buf. unmap ( ) ;
132
- } ) ;
133
-
134
- let index_buf = device. create_buffer ( & wgpu:: BufferDescriptor {
135
- size : index_buffer_length as u32 ,
136
- usage : wgpu:: BufferUsageFlags :: INDEX | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
137
- } ) ;
138
- // index_buf.set_sub_data(0, framework::cast_slice(&index_data));
139
- index_buf. map_write_async ( 0 , index_buffer_length as u32 , |result : wgpu:: BufferMapAsyncResult < & mut [ u16 ] > | {
140
- if let wgpu:: BufferMapAsyncResult :: Success ( data) = result {
141
- data. copy_from_slice ( & index_data) ;
142
- }
127
+ vertex_buf
128
+ } ;
143
129
130
+ let index_buf = {
131
+ let ( index_buf, index_buf_data) = device. create_buffer_mapped ( & wgpu:: BufferDescriptor {
132
+ size : index_buffer_length as u32 ,
133
+ usage : wgpu:: BufferUsageFlags :: INDEX | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
134
+ } ) ;
135
+ index_buf_data. copy_from_slice ( & index_data) ;
144
136
index_buf. unmap ( ) ;
145
- } ) ;
137
+ index_buf
138
+ } ;
146
139
147
140
// Create pipeline layout
148
141
let bind_group_layout = device. create_bind_group_layout ( & wgpu:: BindGroupLayoutDescriptor {
@@ -184,18 +177,15 @@ impl framework::Example for Example {
184
177
usage : wgpu:: TextureUsageFlags :: SAMPLED | wgpu:: TextureUsageFlags :: TRANSFER_DST ,
185
178
} ) ;
186
179
let texture_view = texture. create_default_view ( ) ;
187
- let temp_buf = device. create_buffer ( & wgpu:: BufferDescriptor {
188
- size : texels. len ( ) as u32 ,
189
- usage : wgpu:: BufferUsageFlags :: TRANSFER_SRC | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
190
- } ) ;
191
- // temp_buf.set_sub_data(0, &texels);
192
- temp_buf. map_write_async ( 0 , texels. len ( ) as u32 , |result : wgpu:: BufferMapAsyncResult < & mut [ u8 ] > | {
193
- if let wgpu:: BufferMapAsyncResult :: Success ( data) = result {
194
- data. copy_from_slice ( & texels) ;
195
- }
196
-
180
+ let temp_buf = {
181
+ let ( temp_buf, temp_buf_data) = device. create_buffer_mapped ( & wgpu:: BufferDescriptor {
182
+ size : texels. len ( ) as u32 ,
183
+ usage : wgpu:: BufferUsageFlags :: TRANSFER_SRC | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
184
+ } ) ;
185
+ temp_buf_data. copy_from_slice ( & texels) ;
197
186
temp_buf. unmap ( ) ;
198
- } ) ;
187
+ temp_buf
188
+ } ;
199
189
init_encoder. copy_buffer_to_texture (
200
190
wgpu:: BufferCopyView {
201
191
buffer : & temp_buf,
@@ -230,20 +220,17 @@ impl framework::Example for Example {
230
220
compare_function : wgpu:: CompareFunction :: Always ,
231
221
border_color : wgpu:: BorderColor :: TransparentBlack ,
232
222
} ) ;
233
- let uniform_buf = device. create_buffer ( & wgpu:: BufferDescriptor {
234
- size : 64 ,
235
- usage : wgpu:: BufferUsageFlags :: UNIFORM | wgpu:: BufferUsageFlags :: TRANSFER_DST | wgpu:: BufferUsageFlags :: MAP_WRITE ,
236
- } ) ;
237
223
let mx_total = Self :: generate_matrix ( sc_desc. width as f32 / sc_desc. height as f32 ) ;
238
224
let mx_ref: & [ f32 ; 16 ] = mx_total. as_ref ( ) ;
239
- // uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
240
- uniform_buf . map_write_async ( 0 , 64 , | result : wgpu:: BufferMapAsyncResult < & mut [ f32 ] > | {
241
- if let wgpu :: BufferMapAsyncResult :: Success ( data ) = result {
242
- data . copy_from_slice ( mx_ref ) ;
243
- }
244
-
225
+ let uniform_buf = {
226
+ let ( uniform_buf , uniform_buf_data ) = device . create_buffer_mapped ( & wgpu:: BufferDescriptor {
227
+ size : 64 ,
228
+ usage : wgpu :: BufferUsageFlags :: UNIFORM | wgpu :: BufferUsageFlags :: TRANSFER_DST | wgpu :: BufferUsageFlags :: MAP_WRITE ,
229
+ } ) ;
230
+ uniform_buf_data . copy_from_slice ( mx_ref ) ;
245
231
uniform_buf. unmap ( ) ;
246
- } ) ;
232
+ uniform_buf
233
+ } ;
247
234
248
235
// Create bind group
249
236
let bind_group = device. create_bind_group ( & wgpu:: BindGroupDescriptor {
0 commit comments