Skip to content

Commit 34a3316

Browse files
Revert "fixup! fix(core): use BufferMapState::Active for any BufferUsages::MAP_* flags"
This reverts commit 1c0837b.
1 parent 1c0837b commit 34a3316

File tree

3 files changed

+91
-3
lines changed

3 files changed

+91
-3
lines changed

wgpu-core/src/device/global.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2346,6 +2346,18 @@ impl Global {
23462346
}
23472347
let map_state = &*buffer.map_state.lock();
23482348
match *map_state {
2349+
resource::BufferMapState::Init { ref staging_buffer } => {
2350+
// offset (u64) can not be < 0, so no need to validate the lower bound
2351+
if offset + range_size > buffer.size {
2352+
return Err(BufferAccessError::OutOfBoundsOverrun {
2353+
index: offset + range_size - 1,
2354+
max: buffer.size,
2355+
});
2356+
}
2357+
let ptr = unsafe { staging_buffer.ptr() };
2358+
let ptr = unsafe { NonNull::new_unchecked(ptr.as_ptr().offset(offset as isize)) };
2359+
Ok((ptr, range_size))
2360+
}
23492361
resource::BufferMapState::Active {
23502362
ref mapping,
23512363
ref range,

wgpu-core/src/device/resource.rs

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -988,7 +988,12 @@ impl Device {
988988

989989
let buffer = Arc::new(buffer);
990990

991-
let buffer_use = if desc.mapped_at_creation {
991+
let buffer_use = if !desc.mapped_at_creation {
992+
wgt::BufferUses::empty()
993+
} else if desc
994+
.usage
995+
.intersects(wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::MAP_READ)
996+
{
992997
// buffer is mappable, so we are just doing that at start
993998
let map_size = buffer.size;
994999
let mapping = if map_size == 0 {
@@ -1007,7 +1012,16 @@ impl Device {
10071012
};
10081013
wgt::BufferUses::MAP_WRITE
10091014
} else {
1010-
wgt::BufferUses::empty()
1015+
let mut staging_buffer =
1016+
StagingBuffer::new(self, wgt::BufferSize::new(aligned_size).unwrap())?;
1017+
1018+
// Zero initialize memory and then mark the buffer as initialized
1019+
// (it's guaranteed that this is the case by the time the buffer is usable)
1020+
staging_buffer.write_zeros();
1021+
buffer.initialization_status.write().drain(0..aligned_size);
1022+
1023+
*buffer.map_state.lock() = resource::BufferMapState::Init { staging_buffer };
1024+
wgt::BufferUses::COPY_DST
10111025
};
10121026

10131027
self.trackers

wgpu-core/src/resource.rs

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,8 @@ macro_rules! impl_trackable {
216216

217217
#[derive(Debug)]
218218
pub(crate) enum BufferMapState {
219+
/// Mapped at creation.
220+
Init { staging_buffer: StagingBuffer },
219221
/// Waiting for GPU to be done before mapping
220222
Waiting(BufferPendingMapping),
221223
/// Mapped
@@ -639,7 +641,7 @@ impl Buffer {
639641
{
640642
let map_state = &mut *self.map_state.lock();
641643
*map_state = match *map_state {
642-
BufferMapState::Active { .. } => {
644+
BufferMapState::Init { .. } | BufferMapState::Active { .. } => {
643645
return Err((op, BufferAccessError::AlreadyMapped));
644646
}
645647
BufferMapState::Waiting(_) => {
@@ -692,6 +694,7 @@ impl Buffer {
692694
*self.map_state.lock() = mapping;
693695
return None;
694696
}
697+
_ => panic!("No pending mapping."),
695698
};
696699
let status = if pending_mapping.range.start != pending_mapping.range.end {
697700
let host = pending_mapping.op.host;
@@ -752,6 +755,56 @@ impl Buffer {
752755
let snatch_guard = device.snatchable_lock.read();
753756
let raw_buf = self.try_raw(&snatch_guard)?;
754757
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
758+
BufferMapState::Init { staging_buffer } => {
759+
#[cfg(feature = "trace")]
760+
if let Some(ref mut trace) = *device.trace.lock() {
761+
let data = trace.make_binary("bin", staging_buffer.get_data());
762+
trace.add(trace::Action::WriteBuffer {
763+
id: buffer_id,
764+
data,
765+
range: 0..self.size,
766+
queued: true,
767+
});
768+
}
769+
770+
let staging_buffer = staging_buffer.flush();
771+
772+
if let Some(queue) = device.get_queue() {
773+
let region = wgt::BufferSize::new(self.size).map(|size| hal::BufferCopy {
774+
src_offset: 0,
775+
dst_offset: 0,
776+
size,
777+
});
778+
let transition_src = hal::BufferBarrier {
779+
buffer: staging_buffer.raw(),
780+
usage: hal::StateTransition {
781+
from: wgt::BufferUses::MAP_WRITE,
782+
to: wgt::BufferUses::COPY_SRC,
783+
},
784+
};
785+
let transition_dst = hal::BufferBarrier::<dyn hal::DynBuffer> {
786+
buffer: raw_buf,
787+
usage: hal::StateTransition {
788+
from: wgt::BufferUses::empty(),
789+
to: wgt::BufferUses::COPY_DST,
790+
},
791+
};
792+
let mut pending_writes = queue.pending_writes.lock();
793+
let encoder = pending_writes.activate();
794+
unsafe {
795+
encoder.transition_buffers(&[transition_src, transition_dst]);
796+
if self.size > 0 {
797+
encoder.copy_buffer_to_buffer(
798+
staging_buffer.raw(),
799+
raw_buf,
800+
region.as_slice(),
801+
);
802+
}
803+
}
804+
pending_writes.consume(staging_buffer);
805+
pending_writes.insert_buffer(self);
806+
}
807+
}
755808
BufferMapState::Idle => {
756809
return Err(BufferAccessError::NotMapped);
757810
}
@@ -996,6 +1049,15 @@ impl StagingBuffer {
9961049
self.ptr
9971050
}
9981051

1052+
#[cfg(feature = "trace")]
1053+
pub(crate) fn get_data(&self) -> &[u8] {
1054+
unsafe { core::slice::from_raw_parts(self.ptr.as_ptr(), self.size.get() as usize) }
1055+
}
1056+
1057+
pub(crate) fn write_zeros(&mut self) {
1058+
unsafe { core::ptr::write_bytes(self.ptr.as_ptr(), 0, self.size.get() as usize) };
1059+
}
1060+
9991061
pub(crate) fn write(&mut self, data: &[u8]) {
10001062
assert!(data.len() >= self.size.get() as usize);
10011063
// SAFETY: With the assert above, all of `copy_nonoverlapping`'s

0 commit comments

Comments
 (0)