ctx: Option<&PipeContext>,
mut offset: usize,
size: usize,
+ rw: RWFlags,
) -> CLResult<PipeTransfer> {
let b = self.to_parent(&mut offset);
let r = b.get_res()?.get(&q.device).unwrap();
offset.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
size.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?,
true,
+ rw,
)
} else {
q.device.helper_ctx().buffer_map_async(
ctx: &'a PipeContext,
offset: usize,
size: usize,
+ rw: RWFlags,
) -> CLResult<GuardedPipeTransfer<'a>> {
- Ok(self.tx_raw(q, Some(ctx), offset, size)?.with_ctx(ctx))
+ Ok(self.tx_raw(q, Some(ctx), offset, size, rw)?.with_ctx(ctx))
}
fn tx_image_raw(
q: &Arc<Queue>,
ctx: Option<&PipeContext>,
bx: &pipe_box,
+ rw: RWFlags,
) -> CLResult<PipeTransfer> {
assert!(!self.is_buffer());
let r = self.get_res()?.get(&q.device).unwrap();
Ok(if let Some(ctx) = ctx {
- ctx.texture_map(r, bx, true)
+ ctx.texture_map(r, bx, true, rw)
} else {
q.device.helper_ctx().texture_map_async(r, bx)
})
q: &Arc<Queue>,
ctx: &'a PipeContext,
bx: &pipe_box,
+ rw: RWFlags,
) -> CLResult<GuardedPipeTransfer<'a>> {
- Ok(self.tx_image_raw(q, Some(ctx), bx)?.with_ctx(ctx))
+ Ok(self.tx_image_raw(q, Some(ctx), bx, rw)?.with_ctx(ctx))
}
pub fn has_same_parent(&self, other: &Self) -> bool {
) -> CLResult<()> {
assert!(self.is_buffer());
- let tx = self.tx(q, ctx, offset, size)?;
+ let tx = self.tx(q, ctx, offset, size, RWFlags::RD)?;
unsafe {
ptr::copy_nonoverlapping(tx.ptr(), ptr, size);
if self.is_buffer() {
let bpp = dst.image_format.pixel_size().unwrap() as usize;
- tx_src = self.tx(q, ctx, src_origin[0], region.pixels() * bpp)?;
- tx_dst = dst.tx_image(q, ctx, &create_box(&dst_origin, region, dst.mem_type)?)?;
+ tx_src = self.tx(q, ctx, src_origin[0], region.pixels() * bpp, RWFlags::RD)?;
+ tx_dst = dst.tx_image(
+ q,
+ ctx,
+ &create_box(&dst_origin, region, dst.mem_type)?,
+ RWFlags::WR,
+ )?;
sw_copy(
tx_src.ptr(),
)
} else {
let bpp = self.image_format.pixel_size().unwrap() as usize;
- tx_src = self.tx_image(q, ctx, &create_box(&src_origin, region, self.mem_type)?)?;
- tx_dst = dst.tx(q, ctx, dst_origin[0], region.pixels() * bpp)?;
+ tx_src = self.tx_image(
+ q,
+ ctx,
+ &create_box(&src_origin, region, self.mem_type)?,
+ RWFlags::RD,
+ )?;
+ tx_dst = dst.tx(q, ctx, dst_origin[0], region.pixels() * bpp, RWFlags::WR)?;
sw_copy(
tx_src.ptr(),
if self.is_buffer() {
let (offset, size) =
buffer_offset_size(dst_origin, region, dst_row_pitch, dst_slice_pitch);
- let tx = self.tx(q, ctx, offset, size)?;
+ let tx = self.tx(q, ctx, offset, size, RWFlags::WR)?;
sw_copy(
src,
if self.is_buffer() {
let (offset, size) =
buffer_offset_size(src_origin, region, src_row_pitch, src_slice_pitch);
- tx = self.tx(q, ctx, offset, size)?;
+ tx = self.tx(q, ctx, offset, size, RWFlags::RD)?;
pixel_size = 1;
} else {
assert!(dst_origin == &CLVec::default());
let bx = create_box(src_origin, region, self.mem_type)?;
- tx = self.tx_image(q, ctx, &bx)?;
+ tx = self.tx_image(q, ctx, &bx, RWFlags::RD)?;
src_row_pitch = tx.row_pitch() as usize;
src_slice_pitch = tx.slice_pitch() as usize;
assert!(dst.is_buffer());
let (offset, size) = buffer_offset_size(src_origin, region, src_row_pitch, src_slice_pitch);
- let tx_src = self.tx(q, ctx, offset, size)?;
+ let tx_src = self.tx(q, ctx, offset, size, RWFlags::RD)?;
let (offset, size) = buffer_offset_size(dst_origin, region, dst_row_pitch, dst_slice_pitch);
- let tx_dst = dst.tx(q, ctx, offset, size)?;
+ let tx_dst = dst.tx(q, ctx, offset, size, RWFlags::WR)?;
// TODO check to use hw accelerated paths (e.g. resource_copy_region or blits)
sw_copy(
q: &Arc<Queue>,
ctx: Option<&PipeContext>,
lock: &'a mut MutexGuard<Mappings>,
+ rw: RWFlags,
) -> CLResult<&'a PipeTransfer> {
if !lock.tx.contains_key(&q.device) {
let tx = if self.is_buffer() {
- self.tx_raw(q, ctx, 0, self.size)?
+ self.tx_raw(q, ctx, 0, self.size, rw)?
} else {
let bx = self.image_desc.bx()?;
- self.tx_image_raw(q, ctx, &bx)?
+ self.tx_image_raw(q, ctx, &bx, rw)?
};
lock.tx.insert(q.device.clone(), (tx, 0));
self.host_ptr
} else {
- let tx = self.map(q, ctx, &mut lock)?;
+ let tx = self.map(q, ctx, &mut lock, RWFlags::RW)?;
tx.ptr()
};
self.host_ptr
} else {
- let tx = self.map(q, ctx, &mut lock)?;
+ let tx = self.map(q, ctx, &mut lock, RWFlags::RW)?;
if self.image_desc.dims() > 1 {
*row_pitch = tx.row_pitch() as usize;
unsafe impl Send for PipeContext {}
unsafe impl Sync for PipeContext {}
+#[repr(u32)]
+pub enum RWFlags {
+ RD = pipe_map_flags::PIPE_MAP_READ.0,
+ WR = pipe_map_flags::PIPE_MAP_WRITE.0,
+ RW = pipe_map_flags::PIPE_MAP_READ_WRITE.0,
+}
+
+impl From<RWFlags> for pipe_map_flags {
+ fn from(rw: RWFlags) -> Self {
+ pipe_map_flags(rw as u32)
+ }
+}
+
impl PipeContext {
pub(super) fn new(context: *mut pipe_context, screen: &Arc<PipeScreen>) -> Option<Self> {
let s = Self {
offset: i32,
size: i32,
block: bool,
+ rw: RWFlags,
) -> PipeTransfer {
let mut b = pipe_box::default();
let mut out: *mut pipe_transfer = ptr::null_mut();
let flags = match block {
false => pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED,
true => pipe_map_flags(0),
- } | pipe_map_flags::PIPE_MAP_READ_WRITE;
+ } | rw.into();
let ptr = unsafe {
self.pipe.as_ref().buffer_map.unwrap()(
unsafe { self.pipe.as_ref().buffer_unmap.unwrap()(self.pipe.as_ptr(), tx) };
}
- pub fn texture_map(&self, res: &PipeResource, bx: &pipe_box, block: bool) -> PipeTransfer {
+ pub fn texture_map(
+ &self,
+ res: &PipeResource,
+ bx: &pipe_box,
+ block: bool,
+ rw: RWFlags,
+ ) -> PipeTransfer {
let mut out: *mut pipe_transfer = ptr::null_mut();
let flags = match block {
false => pipe_map_flags::PIPE_MAP_UNSYNCHRONIZED,
true => pipe_map_flags(0),
- } | pipe_map_flags::PIPE_MAP_READ_WRITE;
+ } | rw.into();
let ptr = unsafe {
self.pipe.as_ref().texture_map.unwrap()(