1 #![allow(non_upper_case_globals)]
3 use crate::api::event::create_and_queue;
4 use crate::api::icd::*;
5 use crate::api::types::*;
6 use crate::api::util::*;
7 use crate::core::context::Context;
8 use crate::core::device::*;
9 use crate::core::format::*;
10 use crate::core::memory::*;
13 use mesa_rust_util::properties::Properties;
14 use mesa_rust_util::ptr::*;
15 use rusticl_opencl_gen::*;
16 use rusticl_proc_macros::cl_entrypoint;
17 use rusticl_proc_macros::cl_info_entrypoint;
20 use std::alloc::Layout;
21 use std::cmp::Ordering;
22 use std::mem::{self, MaybeUninit};
23 use std::os::raw::c_void;
28 fn validate_mem_flags(flags: cl_mem_flags, images: bool) -> CLResult<()> {
29 let mut valid_flags = cl_bitfield::from(
30 CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE,
34 valid_flags |= cl_bitfield::from(
36 | CL_MEM_ALLOC_HOST_PTR
37 | CL_MEM_COPY_HOST_PTR
38 | CL_MEM_HOST_WRITE_ONLY
39 | CL_MEM_HOST_READ_ONLY
40 | CL_MEM_HOST_NO_ACCESS,
44 let read_write_group =
45 cl_bitfield::from(CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY);
47 let alloc_host_group = cl_bitfield::from(CL_MEM_ALLOC_HOST_PTR | CL_MEM_USE_HOST_PTR);
49 let copy_host_group = cl_bitfield::from(CL_MEM_COPY_HOST_PTR | CL_MEM_USE_HOST_PTR);
51 let host_read_write_group =
52 cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
54 if (flags & !valid_flags != 0)
55 || (flags & read_write_group).count_ones() > 1
56 || (flags & alloc_host_group).count_ones() > 1
57 || (flags & copy_host_group).count_ones() > 1
58 || (flags & host_read_write_group).count_ones() > 1
60 return Err(CL_INVALID_VALUE);
65 fn validate_map_flags_common(map_flags: cl_mem_flags) -> CLResult<()> {
66 // CL_INVALID_VALUE ... if values specified in map_flags are not valid.
68 cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION);
69 let read_write_group = cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE);
70 let invalidate_group = cl_bitfield::from(CL_MAP_WRITE_INVALIDATE_REGION);
72 if (map_flags & !valid_flags != 0)
73 || ((map_flags & read_write_group != 0) && (map_flags & invalidate_group != 0))
75 return Err(CL_INVALID_VALUE);
81 fn validate_map_flags(m: &Mem, map_flags: cl_mem_flags) -> CLResult<()> {
82 validate_map_flags_common(map_flags)?;
84 // CL_INVALID_OPERATION if buffer has been created with CL_MEM_HOST_WRITE_ONLY or
85 // CL_MEM_HOST_NO_ACCESS and CL_MAP_READ is set in map_flags
86 if bit_check(m.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) &&
87 bit_check(map_flags, CL_MAP_READ) ||
88 // or if buffer has been created with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS and
89 // CL_MAP_WRITE or CL_MAP_WRITE_INVALIDATE_REGION is set in map_flags.
90 bit_check(m.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) &&
91 bit_check(map_flags, CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION)
93 return Err(CL_INVALID_OPERATION);
99 fn filter_image_access_flags(flags: cl_mem_flags) -> cl_mem_flags {
101 & (CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE)
105 fn inherit_mem_flags(mut flags: cl_mem_flags, mem: &Mem) -> cl_mem_flags {
106 let read_write_mask = cl_bitfield::from(
110 // not in spec, but...
111 CL_MEM_KERNEL_READ_AND_WRITE,
114 cl_bitfield::from(CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR);
116 cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
118 // For CL_MEM_OBJECT_IMAGE1D_BUFFER image type, or an image created from another memory object
119 // (image or buffer)...
121 // ... if the CL_MEM_READ_WRITE, CL_MEM_READ_ONLY or CL_MEM_WRITE_ONLY values are not
122 // specified in flags, they are inherited from the corresponding memory access qualifiers
123 // associated with mem_object. ...
124 if flags & read_write_mask == 0 {
125 flags |= mem.flags & read_write_mask;
128 // ... The CL_MEM_USE_HOST_PTR, CL_MEM_ALLOC_HOST_PTR and CL_MEM_COPY_HOST_PTR values cannot
129 // be specified in flags but are inherited from the corresponding memory access qualifiers
130 // associated with mem_object. ...
131 flags &= !host_ptr_mask;
132 flags |= mem.flags & host_ptr_mask;
134 // ... If the CL_MEM_HOST_WRITE_ONLY, CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS values
135 // are not specified in flags, they are inherited from the corresponding memory access
136 // qualifiers associated with mem_object.
137 if flags & host_mask == 0 {
138 flags |= mem.flags & host_mask;
144 fn image_type_valid(image_type: cl_mem_object_type) -> bool {
145 CL_IMAGE_TYPES.contains(&image_type)
148 fn validate_addressing_mode(addressing_mode: cl_addressing_mode) -> CLResult<()> {
149 match addressing_mode {
151 | CL_ADDRESS_CLAMP_TO_EDGE
154 | CL_ADDRESS_MIRRORED_REPEAT => Ok(()),
155 _ => Err(CL_INVALID_VALUE),
159 fn validate_filter_mode(filter_mode: cl_filter_mode) -> CLResult<()> {
161 CL_FILTER_NEAREST | CL_FILTER_LINEAR => Ok(()),
162 _ => Err(CL_INVALID_VALUE),
166 fn validate_host_ptr(host_ptr: *mut ::std::os::raw::c_void, flags: cl_mem_flags) -> CLResult<()> {
167 // CL_INVALID_HOST_PTR if host_ptr is NULL and CL_MEM_USE_HOST_PTR or CL_MEM_COPY_HOST_PTR are
169 if host_ptr.is_null()
170 && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) != 0
172 return Err(CL_INVALID_HOST_PTR);
175 // or if host_ptr is not NULL but CL_MEM_COPY_HOST_PTR or CL_MEM_USE_HOST_PTR are not set in
177 if !host_ptr.is_null()
178 && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) == 0
180 return Err(CL_INVALID_HOST_PTR);
186 fn validate_matching_buffer_flags(mem: &Mem, flags: cl_mem_flags) -> CLResult<()> {
187 // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
188 // under one of the following circumstances:
190 // 1) mem_object was created with CL_MEM_WRITE_ONLY and
191 // flags specifies CL_MEM_READ_WRITE or CL_MEM_READ_ONLY,
192 if bit_check(mem.flags, CL_MEM_WRITE_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_READ_ONLY) ||
193 // 2) mem_object was created with CL_MEM_READ_ONLY and
194 // flags specifies CL_MEM_READ_WRITE or CL_MEM_WRITE_ONLY,
195 bit_check(mem.flags, CL_MEM_READ_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY) ||
196 // 3) flags specifies CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR or CL_MEM_COPY_HOST_PTR.
197 bit_check(flags, CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR) ||
198 // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
199 // and mem_object was created with CL_MEM_HOST_WRITE_ONLY and flags specifies CL_MEM_HOST_READ_ONLY
200 bit_check(mem.flags, CL_MEM_HOST_WRITE_ONLY) && bit_check(flags, CL_MEM_HOST_READ_ONLY) ||
201 // or if mem_object was created with CL_MEM_HOST_READ_ONLY and flags specifies CL_MEM_HOST_WRITE_ONLY
202 bit_check(mem.flags, CL_MEM_HOST_READ_ONLY) && bit_check(flags, CL_MEM_HOST_WRITE_ONLY) ||
203 // or if mem_object was created with CL_MEM_HOST_NO_ACCESS and_flags_ specifies CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_WRITE_ONLY.
204 bit_check(mem.flags, CL_MEM_HOST_NO_ACCESS) && bit_check(flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_WRITE_ONLY)
206 return Err(CL_INVALID_VALUE);
212 #[cl_info_entrypoint(cl_get_mem_object_info)]
213 impl CLInfo<cl_mem_info> for cl_mem {
214 fn query(&self, q: cl_mem_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
215 let mem = self.get_ref()?;
217 CL_MEM_ASSOCIATED_MEMOBJECT => {
218 let ptr = match mem.parent.as_ref() {
219 // Note we use as_ptr here which doesn't increase the reference count.
220 Some(parent) => Arc::as_ptr(parent),
223 cl_prop::<cl_mem>(cl_mem::from_ptr(ptr))
226 // Note we use as_ptr here which doesn't increase the reference count.
227 let ptr = Arc::as_ptr(&mem.context);
228 cl_prop::<cl_context>(cl_context::from_ptr(ptr))
230 CL_MEM_FLAGS => cl_prop::<cl_mem_flags>(mem.flags),
231 // TODO debugging feature
232 CL_MEM_MAP_COUNT => cl_prop::<cl_uint>(0),
233 CL_MEM_HOST_PTR => cl_prop::<*mut c_void>(mem.host_ptr),
234 CL_MEM_OFFSET => cl_prop::<usize>(mem.offset),
235 CL_MEM_PROPERTIES => cl_prop::<&Vec<cl_mem_properties>>(&mem.props),
236 CL_MEM_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?),
237 CL_MEM_SIZE => cl_prop::<usize>(mem.size),
238 CL_MEM_TYPE => cl_prop::<cl_mem_object_type>(mem.mem_type),
239 CL_MEM_USES_SVM_POINTER | CL_MEM_USES_SVM_POINTER_ARM => {
240 cl_prop::<cl_bool>(mem.is_svm().into())
242 _ => return Err(CL_INVALID_VALUE),
248 fn create_buffer_with_properties(
250 properties: *const cl_mem_properties,
253 host_ptr: *mut ::std::os::raw::c_void,
254 ) -> CLResult<cl_mem> {
255 let c = context.get_arc()?;
257 // CL_INVALID_VALUE if values specified in flags are not valid as defined in the Memory Flags table.
258 validate_mem_flags(flags, false)?;
260 // CL_INVALID_BUFFER_SIZE if size is 0
262 return Err(CL_INVALID_BUFFER_SIZE);
265 // ... or if size is greater than CL_DEVICE_MAX_MEM_ALLOC_SIZE for all devices in context.
266 if checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
267 return Err(CL_INVALID_BUFFER_SIZE);
270 validate_host_ptr(host_ptr, flags)?;
272 let props = Properties::from_ptr_raw(properties);
273 // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
274 // the value specified for a supported property name is not valid, or if the same property name
275 // is specified more than once.
277 // we don't support any properties besides the 0 property
278 return Err(CL_INVALID_PROPERTY);
281 Ok(cl_mem::from_arc(Mem::new_buffer(
282 c, flags, size, host_ptr, props,
291 host_ptr: *mut ::std::os::raw::c_void,
292 ) -> CLResult<cl_mem> {
293 create_buffer_with_properties(context, ptr::null(), flags, size, host_ptr)
297 fn create_sub_buffer(
299 mut flags: cl_mem_flags,
300 buffer_create_type: cl_buffer_create_type,
301 buffer_create_info: *const ::std::os::raw::c_void,
302 ) -> CLResult<cl_mem> {
303 let b = buffer.get_arc()?;
305 // CL_INVALID_MEM_OBJECT if buffer ... is a sub-buffer object.
306 if b.parent.is_some() {
307 return Err(CL_INVALID_MEM_OBJECT);
310 validate_matching_buffer_flags(&b, flags)?;
312 flags = inherit_mem_flags(flags, &b);
313 validate_mem_flags(flags, false)?;
315 let (offset, size) = match buffer_create_type {
316 CL_BUFFER_CREATE_TYPE_REGION => {
317 // buffer_create_info is a pointer to a cl_buffer_region structure specifying a region of
319 // CL_INVALID_VALUE if value(s) specified in buffer_create_info (for a given
320 // buffer_create_type) is not valid or if buffer_create_info is NULL.
321 let region = unsafe { buffer_create_info.cast::<cl_buffer_region>().as_ref() }
322 .ok_or(CL_INVALID_VALUE)?;
324 // CL_INVALID_BUFFER_SIZE if the size field of the cl_buffer_region structure passed in
325 // buffer_create_info is 0.
326 if region.size == 0 {
327 return Err(CL_INVALID_BUFFER_SIZE);
330 // CL_INVALID_VALUE if the region specified by the cl_buffer_region structure passed in
331 // buffer_create_info is out of bounds in buffer.
332 if region.origin + region.size > b.size {
333 return Err(CL_INVALID_VALUE);
336 (region.origin, region.size)
338 // CL_INVALID_VALUE if the value specified in buffer_create_type is not valid.
339 _ => return Err(CL_INVALID_VALUE),
342 Ok(cl_mem::from_arc(Mem::new_sub_buffer(
343 b, flags, offset, size,
347 // CL_MISALIGNED_SUB_BUFFER_OFFSET if there are no devices in context associated with buffer for which the origin field of the cl_buffer_region structure passed in buffer_create_info is aligned to the CL_DEVICE_MEM_BASE_ADDR_ALIGN value.
351 fn set_mem_object_destructor_callback(
353 pfn_notify: Option<MemCB>,
354 user_data: *mut ::std::os::raw::c_void,
356 let m = memobj.get_ref()?;
358 // CL_INVALID_VALUE if pfn_notify is NULL.
359 if pfn_notify.is_none() {
360 return Err(CL_INVALID_VALUE);
366 .push(cl_closure!(|m| pfn_notify(m, user_data)));
370 fn validate_image_format<'a>(
371 image_format: *const cl_image_format,
372 ) -> CLResult<(&'a cl_image_format, u8)> {
373 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR ... if image_format is NULL.
374 let format = unsafe { image_format.as_ref() }.ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
375 let pixel_size = format
377 .ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
379 // special validation
380 let valid_combination = match format.image_channel_data_type {
381 CL_UNORM_SHORT_565 | CL_UNORM_SHORT_555 | CL_UNORM_INT_101010 => {
382 [CL_RGB, CL_RGBx].contains(&format.image_channel_order)
384 CL_UNORM_INT_101010_2 => format.image_channel_order == CL_RGBA,
387 if !valid_combination {
388 return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
391 Ok((format, pixel_size))
394 fn validate_image_desc(
395 image_desc: *const cl_image_desc,
396 host_ptr: *mut ::std::os::raw::c_void,
399 ) -> CLResult<(cl_image_desc, Option<Arc<Mem>>)> {
400 // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
401 const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
403 // CL_INVALID_IMAGE_DESCRIPTOR ... if image_desc is NULL.
404 let mut desc = *unsafe { image_desc.as_ref() }.ok_or(err)?;
406 // image_type describes the image type and must be either CL_MEM_OBJECT_IMAGE1D,
407 // CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_MEM_OBJECT_IMAGE1D_ARRAY, CL_MEM_OBJECT_IMAGE2D,
408 // CL_MEM_OBJECT_IMAGE2D_ARRAY, or CL_MEM_OBJECT_IMAGE3D.
409 if !CL_IMAGE_TYPES.contains(&desc.image_type) {
413 let (dims, array) = desc.type_info();
415 // image_width is the width of the image in pixels. For a 2D image and image array, the image
416 // width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH. For a 3D image, the image width
417 // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_WIDTH. For a 1D image buffer, the image width
418 // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE_MAX_BUFFER_SIZE. For a 1D image and 1D image array,
419 // the image width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH.
421 // image_height is the height of the image in pixels. This is only used if the image is a 2D or
422 // 3D image, or a 2D image array. For a 2D image or image array, the image height must be a
423 // value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_HEIGHT. For a 3D image, the image height must be a
424 // value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_HEIGHT.
426 // image_depth is the depth of the image in pixels. This is only used if the image is a 3D image
427 // and must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_DEPTH.
428 if desc.image_width < 1
429 || desc.image_height < 1 && dims >= 2
430 || desc.image_depth < 1 && dims >= 3
431 || desc.image_array_size < 1 && array
436 let max_size = if dims == 3 {
437 devs.iter().map(|d| d.image_3d_size()).min()
438 } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
439 devs.iter().map(|d| d.image_buffer_size()).min()
441 devs.iter().map(|d| d.image_2d_size()).min()
444 let max_array = devs.iter().map(|d| d.image_array_size()).min().unwrap();
446 // CL_INVALID_IMAGE_SIZE if image dimensions specified in image_desc exceed the maximum image
447 // dimensions described in the Device Queries table for all devices in context.
448 if desc.image_width > max_size
449 || desc.image_height > max_size && dims >= 2
450 || desc.image_depth > max_size && dims >= 3
451 || desc.image_array_size > max_array && array
453 return Err(CL_INVALID_IMAGE_SIZE);
456 // num_mip_levels and num_samples must be 0.
457 if desc.num_mip_levels != 0 || desc.num_samples != 0 {
461 // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
462 // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D.
463 // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
466 // TODO: cl_khr_image2d_from_buffer is an optional feature
467 let p = unsafe { &desc.anon_1.mem_object };
468 let parent = if !p.is_null() {
469 let p = p.get_arc()?;
470 if !match desc.image_type {
471 CL_MEM_OBJECT_IMAGE1D_BUFFER => p.is_buffer(),
472 CL_MEM_OBJECT_IMAGE2D => {
473 (p.is_buffer() && devs.iter().any(|d| d.image2d_from_buffer_supported()))
474 || p.mem_type == CL_MEM_OBJECT_IMAGE2D
478 return Err(CL_INVALID_OPERATION);
485 // image_row_pitch is the scan-line pitch in bytes. This must be 0 if host_ptr is NULL and can
486 // be either 0 or ≥ image_width × size of element in bytes if host_ptr is not NULL. If host_ptr
487 // is not NULL and image_row_pitch = 0, image_row_pitch is calculated as image_width × size of
488 // element in bytes. If image_row_pitch is not 0, it must be a multiple of the image element
489 // size in bytes. For a 2D image created from a buffer, the pitch specified (or computed if
490 // pitch specified is 0) must be a multiple of the maximum of the
491 // CL_DEVICE_IMAGE_PITCH_ALIGNMENT value for all devices in the context associated with the
492 // buffer specified by mem_object that support images.
494 // image_slice_pitch is the size in bytes of each 2D slice in the 3D image or the size in bytes
495 // of each image in a 1D or 2D image array. This must be 0 if host_ptr is NULL. If host_ptr is
496 // not NULL, image_slice_pitch can be either 0 or ≥ image_row_pitch × image_height for a 2D
497 // image array or 3D image and can be either 0 or ≥ image_row_pitch for a 1D image array. If
498 // host_ptr is not NULL and image_slice_pitch = 0, image_slice_pitch is calculated as
499 // image_row_pitch × image_height for a 2D image array or 3D image and image_row_pitch for a 1D
500 // image array. If image_slice_pitch is not 0, it must be a multiple of the image_row_pitch.
501 let has_buf_parent = parent.as_ref().map_or(false, |p| p.is_buffer());
502 if host_ptr.is_null() {
503 if (desc.image_row_pitch != 0 || desc.image_slice_pitch != 0) && !has_buf_parent {
507 if desc.image_row_pitch == 0 {
508 desc.image_row_pitch = desc.image_width * elem_size;
510 if desc.image_slice_pitch == 0 {
511 desc.image_slice_pitch = desc.image_row_pitch * desc.image_height;
514 if has_buf_parent && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
515 let pitch_alignment = devs
517 .map(|d| d.image_pitch_alignment())
520 if desc.image_row_pitch % (pitch_alignment * elem_size) != 0 {
525 if desc.image_row_pitch == 0 {
526 desc.image_row_pitch = desc.image_width * elem_size;
527 } else if desc.image_row_pitch % elem_size != 0 {
531 if dims == 3 || array {
532 let valid_slice_pitch =
533 desc.image_row_pitch * if dims == 1 { 1 } else { desc.image_height };
534 if desc.image_slice_pitch == 0 {
535 desc.image_slice_pitch = valid_slice_pitch;
536 } else if desc.image_slice_pitch < valid_slice_pitch
537 || desc.image_slice_pitch % desc.image_row_pitch != 0
547 fn validate_image_bounds(i: &Mem, origin: CLVec<usize>, region: CLVec<usize>) -> CLResult<()> {
548 let dims = i.image_desc.dims_with_array();
549 let bound = region + origin;
550 if bound > i.image_desc.size() {
551 return Err(CL_INVALID_VALUE);
554 // If image is a 2D image object, origin[2] must be 0. If image is a 1D image or 1D image buffer
555 // object, origin[1] and origin[2] must be 0. If image is a 1D image array object, origin[2]
557 if dims < 3 && origin[2] != 0 || dims < 2 && origin[1] != 0 {
558 return Err(CL_INVALID_VALUE);
561 // If image is a 2D image object, region[2] must be 1. If image is a 1D image or 1D image buffer
562 // object, region[1] and region[2] must be 1. If image is a 1D image array object, region[2]
563 // must be 1. The values in region cannot be 0.
564 if dims < 3 && region[2] != 1 || dims < 2 && region[1] != 1 || region.contains(&0) {
565 return Err(CL_INVALID_VALUE);
571 fn desc_eq_no_buffer(a: &cl_image_desc, b: &cl_image_desc) -> bool {
572 a.image_type == b.image_type
573 && a.image_width == b.image_width
574 && a.image_height == b.image_height
575 && a.image_depth == b.image_depth
576 && a.image_array_size == b.image_array_size
577 && a.image_row_pitch == b.image_row_pitch
578 && a.image_slice_pitch == b.image_slice_pitch
579 && a.num_mip_levels == b.num_mip_levels
580 && a.num_samples == b.num_samples
584 desc: &cl_image_desc,
585 mut flags: cl_mem_flags,
586 format: &cl_image_format,
587 host_ptr: *mut ::std::os::raw::c_void,
589 ) -> CLResult<cl_mem_flags> {
590 // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
591 const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
592 let mem_object = unsafe { desc.anon_1.mem_object };
594 // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
595 // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D
596 // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
597 // be NULL. The image pixels are taken from the memory objects data store. When the contents of
598 // the specified memory objects data store are modified, those changes are reflected in the
599 // contents of the image object and vice-versa at corresponding synchronization points.
600 if !mem_object.is_null() {
601 let mem = mem_object.get_ref()?;
604 CL_MEM_OBJECT_BUFFER => {
605 match desc.image_type {
606 // For a 1D image buffer created from a buffer object, the image_width × size of
607 // element in bytes must be ≤ size of the buffer object.
608 CL_MEM_OBJECT_IMAGE1D_BUFFER => {
609 if desc.image_width * elem_size > mem.size {
613 // For a 2D image created from a buffer object, the image_row_pitch × image_height
614 // must be ≤ size of the buffer object specified by mem_object.
615 CL_MEM_OBJECT_IMAGE2D => {
617 //• CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a buffer and the row pitch and base address alignment does not follow the rules described for creating a 2D image from a buffer.
618 if desc.image_row_pitch * desc.image_height > mem.size {
622 _ => return Err(err),
625 // For an image object created from another image object, the values specified in the
626 // image descriptor except for mem_object must match the image descriptor information
627 // associated with mem_object.
628 CL_MEM_OBJECT_IMAGE2D => {
629 if desc.image_type != mem.mem_type || !desc_eq_no_buffer(desc, &mem.image_desc) {
633 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a 2D image object
634 // and the rules described above are not followed.
636 // Creating a 2D image object from another 2D image object creates a new 2D image
637 // object that shares the image data store with mem_object but views the pixels in the
638 // image with a different image channel order. Restrictions are:
640 // The image channel data type specified in image_format must match the image channel
641 // data type associated with mem_object.
642 if format.image_channel_data_type != mem.image_format.image_channel_data_type {
643 return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
646 // The image channel order specified in image_format must be compatible with the image
647 // channel order associated with mem_object. Compatible image channel orders are:
648 if format.image_channel_order != mem.image_format.image_channel_order {
649 // in image_format | in mem_object:
650 // CL_sBGRA | CL_BGRA
651 // CL_BGRA | CL_sBGRA
652 // CL_sRGBA | CL_RGBA
653 // CL_RGBA | CL_sRGBA
656 // CL_sRGBx | CL_RGBx
657 // CL_RGBx | CL_sRGBx
660 format.image_channel_order,
661 mem.image_format.image_channel_order,
664 | (CL_BGRA, CL_sBGRA)
665 | (CL_sRGBA, CL_RGBA)
666 | (CL_RGBA, CL_sRGBA)
669 | (CL_sRGBx, CL_RGBx)
670 | (CL_RGBx, CL_sRGBx)
671 | (CL_DEPTH, CL_R) => (),
672 _ => return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR),
676 _ => return Err(err),
679 // If the buffer object specified by mem_object was created with CL_MEM_USE_HOST_PTR, the
680 // host_ptr specified to clCreateBuffer or clCreateBufferWithProperties must be aligned to
681 // the maximum of the CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT value for all devices in the
682 // context associated with the buffer specified by mem_object that support images.
683 if mem.flags & CL_MEM_USE_HOST_PTR as cl_mem_flags != 0 {
684 for dev in &mem.context.devs {
685 let addr_alignment = dev.image_base_address_alignment();
686 if addr_alignment == 0 {
687 return Err(CL_INVALID_OPERATION);
688 } else if !is_alligned(host_ptr, addr_alignment as usize) {
694 validate_matching_buffer_flags(mem, flags)?;
696 flags = inherit_mem_flags(flags, mem);
698 } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
705 #[cl_info_entrypoint(cl_get_image_info)]
706 impl CLInfo<cl_image_info> for cl_mem {
707 fn query(&self, q: cl_image_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
708 let mem = self.get_ref()?;
710 CL_IMAGE_ARRAY_SIZE => cl_prop::<usize>(mem.image_desc.image_array_size),
711 CL_IMAGE_BUFFER => cl_prop::<cl_mem>(unsafe { mem.image_desc.anon_1.buffer }),
712 CL_IMAGE_DEPTH => cl_prop::<usize>(mem.image_desc.image_depth),
713 CL_IMAGE_ELEMENT_SIZE => cl_prop::<usize>(mem.image_elem_size.into()),
714 CL_IMAGE_FORMAT => cl_prop::<cl_image_format>(mem.image_format),
715 CL_IMAGE_HEIGHT => cl_prop::<usize>(mem.image_desc.image_height),
716 CL_IMAGE_NUM_MIP_LEVELS => cl_prop::<cl_uint>(mem.image_desc.num_mip_levels),
717 CL_IMAGE_NUM_SAMPLES => cl_prop::<cl_uint>(mem.image_desc.num_samples),
718 CL_IMAGE_ROW_PITCH => cl_prop::<usize>(mem.image_desc.image_row_pitch),
719 CL_IMAGE_SLICE_PITCH => cl_prop::<usize>(mem.image_desc.image_slice_pitch),
720 CL_IMAGE_WIDTH => cl_prop::<usize>(mem.image_desc.image_width),
721 _ => return Err(CL_INVALID_VALUE),
727 fn create_image_with_properties(
729 properties: *const cl_mem_properties,
730 mut flags: cl_mem_flags,
731 image_format: *const cl_image_format,
732 image_desc: *const cl_image_desc,
733 host_ptr: *mut ::std::os::raw::c_void,
734 ) -> CLResult<cl_mem> {
735 let c = context.get_arc()?;
737 // CL_INVALID_OPERATION if there are no devices in context that support images (i.e.
738 // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
741 .find(|d| d.image_supported())
742 .ok_or(CL_INVALID_OPERATION)?;
744 let (format, elem_size) = validate_image_format(image_format)?;
745 let (desc, parent) = validate_image_desc(image_desc, host_ptr, elem_size.into(), &c.devs)?;
747 // validate host_ptr before merging flags
748 validate_host_ptr(host_ptr, flags)?;
750 flags = validate_buffer(&desc, flags, format, host_ptr, elem_size.into())?;
752 // For all image types except CL_MEM_OBJECT_IMAGE1D_BUFFER, if the value specified for flags is 0, the
753 // default is used which is CL_MEM_READ_WRITE.
754 if flags == 0 && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
755 flags = CL_MEM_READ_WRITE.into();
758 validate_mem_flags(flags, false)?;
760 let filtered_flags = filter_image_access_flags(flags);
761 // CL_IMAGE_FORMAT_NOT_SUPPORTED if there are no devices in context that support image_format.
764 .filter_map(|d| d.formats.get(format))
765 .filter_map(|f| f.get(&desc.image_type))
766 .find(|f| *f & filtered_flags == filtered_flags)
767 .ok_or(CL_IMAGE_FORMAT_NOT_SUPPORTED)?;
769 let props = Properties::from_ptr_raw(properties);
770 // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
771 // the value specified for a supported property name is not valid, or if the same property name
772 // is specified more than once.
774 // we don't support any properties besides the 0 property
775 return Err(CL_INVALID_PROPERTY);
778 Ok(cl_mem::from_arc(Mem::new_image(
795 image_format: *const cl_image_format,
796 image_desc: *const cl_image_desc,
797 host_ptr: *mut ::std::os::raw::c_void,
798 ) -> CLResult<cl_mem> {
799 create_image_with_properties(
813 image_format: *const cl_image_format,
816 image_row_pitch: usize,
817 host_ptr: *mut ::std::os::raw::c_void,
818 ) -> CLResult<cl_mem> {
819 let image_desc = cl_image_desc {
820 image_type: CL_MEM_OBJECT_IMAGE2D,
821 image_width: image_width,
822 image_height: image_height,
823 image_row_pitch: image_row_pitch,
827 create_image(context, flags, image_format, &image_desc, host_ptr)
834 image_format: *const cl_image_format,
838 image_row_pitch: usize,
839 image_slice_pitch: usize,
840 host_ptr: *mut ::std::os::raw::c_void,
841 ) -> CLResult<cl_mem> {
842 let image_desc = cl_image_desc {
843 image_type: CL_MEM_OBJECT_IMAGE3D,
844 image_width: image_width,
845 image_height: image_height,
846 image_depth: image_depth,
847 image_row_pitch: image_row_pitch,
848 image_slice_pitch: image_slice_pitch,
852 create_image(context, flags, image_format, &image_desc, host_ptr)
856 fn get_supported_image_formats(
859 image_type: cl_mem_object_type,
860 num_entries: cl_uint,
861 image_formats: *mut cl_image_format,
862 num_image_formats: *mut cl_uint,
864 let c = context.get_ref()?;
866 // CL_INVALID_VALUE if flags
867 validate_mem_flags(flags, true)?;
869 // or image_type are not valid
870 if !image_type_valid(image_type) {
871 return Err(CL_INVALID_VALUE);
874 // CL_INVALID_VALUE ... if num_entries is 0 and image_formats is not NULL.
875 if num_entries == 0 && !image_formats.is_null() {
876 return Err(CL_INVALID_VALUE);
879 let mut res = Vec::<cl_image_format>::new();
880 let filtered_flags = filter_image_access_flags(flags);
882 for f in &dev.formats {
883 let s = f.1.get(&image_type).unwrap_or(&0);
885 if filtered_flags & s == filtered_flags {
894 num_image_formats.write_checked(res.len() as cl_uint);
895 unsafe { image_formats.copy_checked(res.as_ptr(), res.len()) };
900 #[cl_info_entrypoint(cl_get_sampler_info)]
901 impl CLInfo<cl_sampler_info> for cl_sampler {
902 fn query(&self, q: cl_sampler_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
903 let sampler = self.get_ref()?;
905 CL_SAMPLER_ADDRESSING_MODE => cl_prop::<cl_addressing_mode>(sampler.addressing_mode),
906 CL_SAMPLER_CONTEXT => {
907 // Note we use as_ptr here which doesn't increase the reference count.
908 let ptr = Arc::as_ptr(&sampler.context);
909 cl_prop::<cl_context>(cl_context::from_ptr(ptr))
911 CL_SAMPLER_FILTER_MODE => cl_prop::<cl_filter_mode>(sampler.filter_mode),
912 CL_SAMPLER_NORMALIZED_COORDS => cl_prop::<bool>(sampler.normalized_coords),
913 CL_SAMPLER_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?),
914 CL_SAMPLER_PROPERTIES => {
915 cl_prop::<&Option<Properties<cl_sampler_properties>>>(&sampler.props)
917 // CL_INVALID_VALUE if param_name is not one of the supported values
918 _ => return Err(CL_INVALID_VALUE),
923 fn create_sampler_impl(
925 normalized_coords: cl_bool,
926 addressing_mode: cl_addressing_mode,
927 filter_mode: cl_filter_mode,
928 props: Option<Properties<cl_sampler_properties>>,
929 ) -> CLResult<cl_sampler> {
930 let c = context.get_arc()?;
932 // CL_INVALID_OPERATION if images are not supported by any device associated with context (i.e.
933 // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
936 .find(|d| d.image_supported())
937 .ok_or(CL_INVALID_OPERATION)?;
939 // CL_INVALID_VALUE if addressing_mode, filter_mode, normalized_coords or a combination of these
940 // arguements are not valid.
941 validate_addressing_mode(addressing_mode)?;
942 validate_filter_mode(filter_mode)?;
944 let sampler = Sampler::new(
946 check_cl_bool(normalized_coords).ok_or(CL_INVALID_VALUE)?,
951 Ok(cl_sampler::from_arc(sampler))
957 normalized_coords: cl_bool,
958 addressing_mode: cl_addressing_mode,
959 filter_mode: cl_filter_mode,
960 ) -> CLResult<cl_sampler> {
971 fn create_sampler_with_properties(
973 sampler_properties: *const cl_sampler_properties,
974 ) -> CLResult<cl_sampler> {
975 let mut normalized_coords = CL_TRUE;
976 let mut addressing_mode = CL_ADDRESS_CLAMP;
977 let mut filter_mode = CL_FILTER_NEAREST;
979 // CL_INVALID_VALUE if the same property name is specified more than once.
980 let sampler_properties = if sampler_properties.is_null() {
983 let sampler_properties =
984 Properties::from_ptr(sampler_properties).ok_or(CL_INVALID_VALUE)?;
985 for p in &sampler_properties.props {
987 CL_SAMPLER_ADDRESSING_MODE => addressing_mode = p.1 as u32,
988 CL_SAMPLER_FILTER_MODE => filter_mode = p.1 as u32,
989 CL_SAMPLER_NORMALIZED_COORDS => normalized_coords = p.1 as u32,
990 // CL_INVALID_VALUE if the property name in sampler_properties is not a supported
992 _ => return Err(CL_INVALID_VALUE),
995 Some(sampler_properties)
1008 fn retain_sampler(sampler: cl_sampler) -> CLResult<()> {
1013 fn release_sampler(sampler: cl_sampler) -> CLResult<()> {
1018 fn enqueue_read_buffer(
1019 command_queue: cl_command_queue,
1021 blocking_read: cl_bool,
1024 ptr: *mut ::std::os::raw::c_void,
1025 num_events_in_wait_list: cl_uint,
1026 event_wait_list: *const cl_event,
1027 event: *mut cl_event,
1029 let q = command_queue.get_arc()?;
1030 let b = buffer.get_arc()?;
1031 let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1032 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1034 // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1035 // bounds or if ptr is a NULL value.
1036 if offset + cb > b.size || ptr.is_null() {
1037 return Err(CL_INVALID_VALUE);
1040 // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1041 if b.context != q.context {
1042 return Err(CL_INVALID_CONTEXT);
1045 // CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking
1046 // and the execution status of any of the events in event_wait_list is a negative integer value.
1047 if block && evs.iter().any(|e| e.is_error()) {
1048 return Err(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
1051 // CL_INVALID_OPERATION if clEnqueueReadBuffer is called on buffer which has been created with
1052 // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1053 if bit_check(b.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1054 return Err(CL_INVALID_OPERATION);
1059 CL_COMMAND_READ_BUFFER,
1063 Box::new(move |q, ctx| b.read_to_user(q, ctx, offset, ptr, cb)),
1067 // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1071 fn enqueue_write_buffer(
1072 command_queue: cl_command_queue,
1074 blocking_write: cl_bool,
1077 ptr: *const ::std::os::raw::c_void,
1078 num_events_in_wait_list: cl_uint,
1079 event_wait_list: *const cl_event,
1080 event: *mut cl_event,
1082 let q = command_queue.get_arc()?;
1083 let b = buffer.get_arc()?;
1084 let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1085 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1087 // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1088 // bounds or if ptr is a NULL value.
1089 if offset + cb > b.size || ptr.is_null() {
1090 return Err(CL_INVALID_VALUE);
1093 // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1094 if b.context != q.context {
1095 return Err(CL_INVALID_CONTEXT);
1098 // CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking
1099 // and the execution status of any of the events in event_wait_list is a negative integer value.
1100 if block && evs.iter().any(|e| e.is_error()) {
1101 return Err(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
1104 // CL_INVALID_OPERATION if clEnqueueWriteBuffer is called on buffer which has been created with
1105 // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1106 if bit_check(b.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1107 return Err(CL_INVALID_OPERATION);
1112 CL_COMMAND_WRITE_BUFFER,
1116 Box::new(move |q, ctx| b.write_from_user(q, ctx, offset, ptr, cb)),
1120 // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1124 fn enqueue_copy_buffer(
1125 command_queue: cl_command_queue,
1131 num_events_in_wait_list: cl_uint,
1132 event_wait_list: *const cl_event,
1133 event: *mut cl_event,
1135 let q = command_queue.get_arc()?;
1136 let src = src_buffer.get_arc()?;
1137 let dst = dst_buffer.get_arc()?;
1138 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1140 // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1142 if q.context != src.context || q.context != dst.context {
1143 return Err(CL_INVALID_CONTEXT);
1146 // CL_INVALID_VALUE if src_offset, dst_offset, size, src_offset + size or dst_offset + size
1147 // require accessing elements outside the src_buffer and dst_buffer buffer objects respectively.
1148 if src_offset + size > src.size || dst_offset + size > dst.size {
1149 return Err(CL_INVALID_VALUE);
1152 // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object
1153 // and the source and destination regions overlap or if src_buffer and dst_buffer are different
1154 // sub-buffers of the same associated buffer object and they overlap. The regions overlap if
1155 // src_offset ≤ dst_offset ≤ src_offset + size - 1 or if dst_offset ≤ src_offset ≤ dst_offset + size - 1.
1156 if src.has_same_parent(&dst) {
1157 let src_offset = src_offset + src.offset;
1158 let dst_offset = dst_offset + dst.offset;
1160 if (src_offset <= dst_offset && dst_offset < src_offset + size)
1161 || (dst_offset <= src_offset && src_offset < dst_offset + size)
1163 return Err(CL_MEM_COPY_OVERLAP);
1169 CL_COMMAND_COPY_BUFFER,
1173 Box::new(move |q, ctx| {
1178 CLVec::new([src_offset, 0, 0]),
1179 CLVec::new([dst_offset, 0, 0]),
1180 &CLVec::new([size, 1, 1]),
1186 //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1187 //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1188 //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_buffer.
1192 fn enqueue_read_buffer_rect(
1193 command_queue: cl_command_queue,
1195 blocking_read: cl_bool,
1196 buffer_origin: *const usize,
1197 host_origin: *const usize,
1198 region: *const usize,
1199 mut buffer_row_pitch: usize,
1200 mut buffer_slice_pitch: usize,
1201 mut host_row_pitch: usize,
1202 mut host_slice_pitch: usize,
1203 ptr: *mut ::std::os::raw::c_void,
1204 num_events_in_wait_list: cl_uint,
1205 event_wait_list: *const cl_event,
1206 event: *mut cl_event,
1208 let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1209 let q = command_queue.get_arc()?;
1210 let buf = buffer.get_arc()?;
1211 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1213 // CL_INVALID_OPERATION if clEnqueueReadBufferRect is called on buffer which has been created
1214 // with CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1215 if bit_check(buf.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1216 return Err(CL_INVALID_OPERATION);
1219 // CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking
1220 // and the execution status of any of the events in event_wait_list is a negative integer value.
1221 if block && evs.iter().any(|e| e.is_error()) {
1222 return Err(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
1225 // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1226 if buffer_origin.is_null() ||
1227 host_origin.is_null() ||
1229 // CL_INVALID_VALUE if ptr is NULL.
1232 return Err(CL_INVALID_VALUE);
1235 let r = unsafe { CLVec::from_raw(region) };
1236 let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1237 let host_ori = unsafe { CLVec::from_raw(host_origin) };
1239 // CL_INVALID_VALUE if any region array element is 0.
1240 if r.contains(&0) ||
1241 // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1242 buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1243 // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1244 host_row_pitch != 0 && host_row_pitch < r[0]
1246 return Err(CL_INVALID_VALUE);
1249 // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1250 if buffer_row_pitch == 0 {
1251 buffer_row_pitch = r[0];
1254 // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1255 if host_row_pitch == 0 {
1256 host_row_pitch = r[0];
1259 // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1260 if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1261 // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1262 host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1264 return Err(CL_INVALID_VALUE);
1267 // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1268 if buffer_slice_pitch == 0 {
1269 buffer_slice_pitch = r[1] * buffer_row_pitch;
1272 // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1273 if host_slice_pitch == 0 {
1274 host_slice_pitch = r[1] * host_row_pitch
1277 // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1278 // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1279 if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1280 return Err(CL_INVALID_VALUE);
1283 // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1284 if q.context != buf.context {
1285 return Err(CL_INVALID_CONTEXT);
1290 CL_COMMAND_READ_BUFFER_RECT,
1294 Box::new(move |q, ctx| {
1295 buf.read_to_user_rect(
1311 // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1315 fn enqueue_write_buffer_rect(
1316 command_queue: cl_command_queue,
1318 blocking_write: cl_bool,
1319 buffer_origin: *const usize,
1320 host_origin: *const usize,
1321 region: *const usize,
1322 mut buffer_row_pitch: usize,
1323 mut buffer_slice_pitch: usize,
1324 mut host_row_pitch: usize,
1325 mut host_slice_pitch: usize,
1326 ptr: *const ::std::os::raw::c_void,
1327 num_events_in_wait_list: cl_uint,
1328 event_wait_list: *const cl_event,
1329 event: *mut cl_event,
1331 let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1332 let q = command_queue.get_arc()?;
1333 let buf = buffer.get_arc()?;
1334 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1336 // CL_INVALID_OPERATION if clEnqueueWriteBufferRect is called on buffer which has been created
1337 // with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1338 if bit_check(buf.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1339 return Err(CL_INVALID_OPERATION);
1342 // CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking
1343 // and the execution status of any of the events in event_wait_list is a negative integer value.
1344 if block && evs.iter().any(|e| e.is_error()) {
1345 return Err(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
1348 // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1349 if buffer_origin.is_null() ||
1350 host_origin.is_null() ||
1352 // CL_INVALID_VALUE if ptr is NULL.
1355 return Err(CL_INVALID_VALUE);
1358 let r = unsafe { CLVec::from_raw(region) };
1359 let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1360 let host_ori = unsafe { CLVec::from_raw(host_origin) };
1362 // CL_INVALID_VALUE if any region array element is 0.
1363 if r.contains(&0) ||
1364 // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1365 buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1366 // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1367 host_row_pitch != 0 && host_row_pitch < r[0]
1369 return Err(CL_INVALID_VALUE);
1372 // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1373 if buffer_row_pitch == 0 {
1374 buffer_row_pitch = r[0];
1377 // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1378 if host_row_pitch == 0 {
1379 host_row_pitch = r[0];
1382 // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1383 if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1384 // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1385 host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1387 return Err(CL_INVALID_VALUE);
1390 // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1391 if buffer_slice_pitch == 0 {
1392 buffer_slice_pitch = r[1] * buffer_row_pitch;
1395 // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1396 if host_slice_pitch == 0 {
1397 host_slice_pitch = r[1] * host_row_pitch
1400 // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1401 // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1402 if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1403 return Err(CL_INVALID_VALUE);
1406 // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1407 if q.context != buf.context {
1408 return Err(CL_INVALID_CONTEXT);
1413 CL_COMMAND_WRITE_BUFFER_RECT,
1417 Box::new(move |q, ctx| {
1418 buf.write_from_user_rect(
1434 // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1438 fn enqueue_copy_buffer_rect(
1439 command_queue: cl_command_queue,
1442 src_origin: *const usize,
1443 dst_origin: *const usize,
1444 region: *const usize,
1445 mut src_row_pitch: usize,
1446 mut src_slice_pitch: usize,
1447 mut dst_row_pitch: usize,
1448 mut dst_slice_pitch: usize,
1449 num_events_in_wait_list: cl_uint,
1450 event_wait_list: *const cl_event,
1451 event: *mut cl_event,
1453 let q = command_queue.get_arc()?;
1454 let src = src_buffer.get_arc()?;
1455 let dst = dst_buffer.get_arc()?;
1456 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1458 // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1459 if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1460 return Err(CL_INVALID_VALUE);
1463 let r = unsafe { CLVec::from_raw(region) };
1464 let src_ori = unsafe { CLVec::from_raw(src_origin) };
1465 let dst_ori = unsafe { CLVec::from_raw(dst_origin) };
1467 // CL_INVALID_VALUE if any region array element is 0.
1468 if r.contains(&0) ||
1469 // CL_INVALID_VALUE if src_row_pitch is not 0 and is less than region[0].
1470 src_row_pitch != 0 && src_row_pitch < r[0] ||
1471 // CL_INVALID_VALUE if dst_row_pitch is not 0 and is less than region[0].
1472 dst_row_pitch != 0 && dst_row_pitch < r[0]
1474 return Err(CL_INVALID_VALUE);
1477 // If src_row_pitch is 0, src_row_pitch is computed as region[0].
1478 if src_row_pitch == 0 {
1479 src_row_pitch = r[0];
1482 // If dst_row_pitch is 0, dst_row_pitch is computed as region[0].
1483 if dst_row_pitch == 0 {
1484 dst_row_pitch = r[0];
1487 // CL_INVALID_VALUE if src_slice_pitch is not 0 and is less than region[1] × src_row_pitch
1488 if src_slice_pitch != 0 && src_slice_pitch < r[1] * src_row_pitch ||
1489 // CL_INVALID_VALUE if dst_slice_pitch is not 0 and is less than region[1] × dst_row_pitch
1490 dst_slice_pitch != 0 && dst_slice_pitch < r[1] * dst_row_pitch ||
1491 // if src_slice_pitch is not 0 and is not a multiple of src_row_pitch.
1492 src_slice_pitch != 0 && src_slice_pitch % src_row_pitch != 0 ||
1493 // if dst_slice_pitch is not 0 and is not a multiple of dst_row_pitch.
1494 dst_slice_pitch != 0 && dst_slice_pitch % dst_row_pitch != 0
1496 return Err(CL_INVALID_VALUE);
1499 // If src_slice_pitch is 0, src_slice_pitch is computed as region[1] × src_row_pitch.
1500 if src_slice_pitch == 0 {
1501 src_slice_pitch = r[1] * src_row_pitch;
1504 // If dst_slice_pitch is 0, dst_slice_pitch is computed as region[1] × dst_row_pitch.
1505 if dst_slice_pitch == 0 {
1506 dst_slice_pitch = r[1] * dst_row_pitch;
1509 // CL_INVALID_VALUE if src_buffer and dst_buffer are the same buffer object and src_slice_pitch
1510 // is not equal to dst_slice_pitch and src_row_pitch is not equal to dst_row_pitch.
1511 if src_buffer == dst_buffer
1512 && src_slice_pitch != dst_slice_pitch
1513 && src_row_pitch != dst_row_pitch
1515 return Err(CL_INVALID_VALUE);
1518 // CL_INVALID_VALUE if (src_origin, region, src_row_pitch, src_slice_pitch) or (dst_origin,
1519 // region, dst_row_pitch, dst_slice_pitch) require accessing elements outside the src_buffer
1520 // and dst_buffer buffer objects respectively.
1521 if CLVec::calc_size(r + src_ori, [1, src_row_pitch, src_slice_pitch]) > src.size
1522 || CLVec::calc_size(r + dst_ori, [1, dst_row_pitch, dst_slice_pitch]) > dst.size
1524 return Err(CL_INVALID_VALUE);
1527 // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object and
1528 // the source and destination regions overlap or if src_buffer and dst_buffer are different
1529 // sub-buffers of the same associated buffer object and they overlap.
1530 if src.has_same_parent(&dst)
1531 && check_copy_overlap(
1541 return Err(CL_MEM_COPY_OVERLAP);
1544 // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1546 if src.context != q.context || dst.context != q.context {
1547 return Err(CL_INVALID_CONTEXT);
1552 CL_COMMAND_COPY_BUFFER_RECT,
1556 Box::new(move |q, ctx| {
1573 // CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1577 fn enqueue_fill_buffer(
1578 command_queue: cl_command_queue,
1580 pattern: *const ::std::os::raw::c_void,
1581 pattern_size: usize,
1584 num_events_in_wait_list: cl_uint,
1585 event_wait_list: *const cl_event,
1586 event: *mut cl_event,
1588 let q = command_queue.get_arc()?;
1589 let b = buffer.get_arc()?;
1590 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1592 // CL_INVALID_VALUE if offset or offset + size require accessing elements outside the buffer
1593 // buffer object respectively.
1594 if offset + size > b.size {
1595 return Err(CL_INVALID_VALUE);
1598 // CL_INVALID_VALUE if pattern is NULL or if pattern_size is 0 or if pattern_size is not one of
1599 // { 1, 2, 4, 8, 16, 32, 64, 128 }.
1600 if pattern.is_null() || pattern_size.count_ones() != 1 || pattern_size > 128 {
1601 return Err(CL_INVALID_VALUE);
1604 // CL_INVALID_VALUE if offset and size are not a multiple of pattern_size.
1605 if offset % pattern_size != 0 || size % pattern_size != 0 {
1606 return Err(CL_INVALID_VALUE);
1609 // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1610 if b.context != q.context {
1611 return Err(CL_INVALID_CONTEXT);
1614 // we have to copy memory
1615 let pattern = unsafe { slice::from_raw_parts(pattern.cast(), pattern_size).to_vec() };
1618 CL_COMMAND_FILL_BUFFER,
1622 Box::new(move |q, ctx| b.fill(q, ctx, &pattern, offset, size)),
1626 //• CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1627 //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with buffer.
1631 fn enqueue_map_buffer(
1632 command_queue: cl_command_queue,
1634 blocking_map: cl_bool,
1635 map_flags: cl_map_flags,
1638 num_events_in_wait_list: cl_uint,
1639 event_wait_list: *const cl_event,
1640 event: *mut cl_event,
1641 ) -> CLResult<*mut c_void> {
1642 let q = command_queue.get_arc()?;
1643 let b = buffer.get_arc()?;
1644 let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
1645 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1647 validate_map_flags(&b, map_flags)?;
1649 // CL_INVALID_VALUE if region being mapped given by (offset, size) is out of bounds or if size
1651 if offset + size > b.size || size == 0 {
1652 return Err(CL_INVALID_VALUE);
1655 // CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the map operation is blocking and the
1656 // execution status of any of the events in event_wait_list is a negative integer value.
1657 if block && evs.iter().any(|e| e.is_error()) {
1658 return Err(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
1661 // CL_INVALID_CONTEXT if context associated with command_queue and buffer are not the same
1662 if b.context != q.context {
1663 return Err(CL_INVALID_CONTEXT);
1666 let ptr = b.map_buffer(&q, offset, size)?;
1669 CL_COMMAND_MAP_BUFFER,
1673 Box::new(move |q, ctx| b.sync_shadow_buffer(q, ctx, ptr)),
1679 // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for the device associated with queue. This error code is missing before version 1.1.
1680 // CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for buffer objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
1681 // CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
1685 fn enqueue_read_image(
1686 command_queue: cl_command_queue,
1688 blocking_read: cl_bool,
1689 origin: *const usize,
1690 region: *const usize,
1691 mut row_pitch: usize,
1692 mut slice_pitch: usize,
1693 ptr: *mut ::std::os::raw::c_void,
1694 num_events_in_wait_list: cl_uint,
1695 event_wait_list: *const cl_event,
1696 event: *mut cl_event,
1698 let q = command_queue.get_arc()?;
1699 let i = image.get_arc()?;
1700 let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1701 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1702 let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1704 // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1705 if i.context != q.context {
1706 return Err(CL_INVALID_CONTEXT);
1709 // CL_INVALID_OPERATION if clEnqueueReadImage is called on image which has been created with
1710 // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1711 if bit_check(i.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1712 return Err(CL_INVALID_OPERATION);
1715 // CL_INVALID_VALUE if origin or region is NULL.
1716 // CL_INVALID_VALUE if ptr is NULL.
1717 if origin.is_null() || region.is_null() || ptr.is_null() {
1718 return Err(CL_INVALID_VALUE);
1721 // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1722 if !i.image_desc.has_slice() && slice_pitch != 0 {
1723 return Err(CL_INVALID_VALUE);
1726 let r = unsafe { CLVec::from_raw(region) };
1727 let o = unsafe { CLVec::from_raw(origin) };
1729 // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1731 // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1732 // description for origin and region.
1733 validate_image_bounds(&i, o, r)?;
1735 // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1736 // on the size of each element in bytes multiplied by width.
1738 row_pitch = r[0] * pixel_size;
1741 // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1742 // based on the row_pitch × height.
1743 if slice_pitch == 0 {
1744 slice_pitch = row_pitch * r[1];
1749 CL_COMMAND_READ_IMAGE,
1753 Box::new(move |q, ctx| {
1754 i.read_to_user_rect(
1760 i.image_desc.image_row_pitch,
1761 i.image_desc.image_slice_pitch,
1769 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1770 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1771 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1772 //• CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking and the execution status of any of the events in event_wait_list is a negative integer value.
1776 fn enqueue_write_image(
1777 command_queue: cl_command_queue,
1779 blocking_write: cl_bool,
1780 origin: *const usize,
1781 region: *const usize,
1782 mut row_pitch: usize,
1783 mut slice_pitch: usize,
1784 ptr: *const ::std::os::raw::c_void,
1785 num_events_in_wait_list: cl_uint,
1786 event_wait_list: *const cl_event,
1787 event: *mut cl_event,
1789 let q = command_queue.get_arc()?;
1790 let i = image.get_arc()?;
1791 let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1792 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1793 let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1795 // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1796 if i.context != q.context {
1797 return Err(CL_INVALID_CONTEXT);
1800 // CL_INVALID_OPERATION if clEnqueueWriteImage is called on image which has been created with
1801 // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1802 if bit_check(i.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1803 return Err(CL_INVALID_OPERATION);
1806 // CL_INVALID_VALUE if origin or region is NULL.
1807 // CL_INVALID_VALUE if ptr is NULL.
1808 if origin.is_null() || region.is_null() || ptr.is_null() {
1809 return Err(CL_INVALID_VALUE);
1812 // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1813 if !i.image_desc.has_slice() && slice_pitch != 0 {
1814 return Err(CL_INVALID_VALUE);
1817 let r = unsafe { CLVec::from_raw(region) };
1818 let o = unsafe { CLVec::from_raw(origin) };
1820 // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1822 // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1823 // description for origin and region.
1824 validate_image_bounds(&i, o, r)?;
1826 // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1827 // on the size of each element in bytes multiplied by width.
1829 row_pitch = r[0] * pixel_size;
1832 // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1833 // based on the row_pitch × height.
1834 if slice_pitch == 0 {
1835 slice_pitch = row_pitch * r[1];
1840 CL_COMMAND_WRITE_BUFFER_RECT,
1844 Box::new(move |q, ctx| {
1845 i.write_from_user_rect(
1854 i.image_desc.image_row_pitch,
1855 i.image_desc.image_slice_pitch,
1860 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1861 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1862 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1863 //• CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the read and write operations are blocking and the execution status of any of the events in event_wait_list is a negative integer value.
1867 fn enqueue_copy_image(
1868 command_queue: cl_command_queue,
1871 src_origin: *const usize,
1872 dst_origin: *const usize,
1873 region: *const usize,
1874 num_events_in_wait_list: cl_uint,
1875 event_wait_list: *const cl_event,
1876 event: *mut cl_event,
1878 let q = command_queue.get_arc()?;
1879 let src_image = src_image.get_arc()?;
1880 let dst_image = dst_image.get_arc()?;
1881 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1883 // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_image are not the same
1884 if src_image.context != q.context || dst_image.context != q.context {
1885 return Err(CL_INVALID_CONTEXT);
1888 // CL_IMAGE_FORMAT_MISMATCH if src_image and dst_image do not use the same image format.
1889 if src_image.image_format != dst_image.image_format {
1890 return Err(CL_IMAGE_FORMAT_MISMATCH);
1893 // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1894 if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1895 return Err(CL_INVALID_VALUE);
1898 let region = unsafe { CLVec::from_raw(region) };
1899 let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
1900 let src_origin = unsafe { CLVec::from_raw(src_origin) };
1902 // CL_INVALID_VALUE if the 2D or 3D rectangular region specified by src_origin and
1903 // src_origin + region refers to a region outside src_image, or if the 2D or 3D rectangular
1904 // region specified by dst_origin and dst_origin + region refers to a region outside dst_image.
1905 // CL_INVALID_VALUE if values in src_origin, dst_origin and region do not follow rules described
1906 // in the argument description for src_origin, dst_origin and region.
1907 validate_image_bounds(&src_image, src_origin, region)?;
1908 validate_image_bounds(&dst_image, dst_origin, region)?;
1912 CL_COMMAND_COPY_IMAGE,
1916 Box::new(move |q, ctx| {
1917 src_image.copy_to(q, ctx, &dst_image, src_origin, dst_origin, ®ion)
1921 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image or dst_image are not supported by device associated with queue.
1922 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image or dst_image are not supported by device associated with queue.
1923 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1924 //• CL_MEM_COPY_OVERLAP if src_image and dst_image are the same image object and the source and destination regions overlap.
1928 fn enqueue_fill_image(
1929 command_queue: cl_command_queue,
1931 fill_color: *const ::std::os::raw::c_void,
1932 origin: *const [usize; 3],
1933 region: *const [usize; 3],
1934 num_events_in_wait_list: cl_uint,
1935 event_wait_list: *const cl_event,
1936 event: *mut cl_event,
1938 let q = command_queue.get_arc()?;
1939 let i = image.get_arc()?;
1940 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1942 // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1943 if i.context != q.context {
1944 return Err(CL_INVALID_CONTEXT);
1947 // CL_INVALID_VALUE if fill_color is NULL.
1948 // CL_INVALID_VALUE if origin or region is NULL.
1949 if fill_color.is_null() || origin.is_null() || region.is_null() {
1950 return Err(CL_INVALID_VALUE);
1953 let region = unsafe { CLVec::from_raw(region.cast()) };
1954 let origin = unsafe { CLVec::from_raw(origin.cast()) };
1956 // CL_INVALID_VALUE if the region being filled as specified by origin and region is out of
1958 // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1959 // description for origin and region.
1960 validate_image_bounds(&i, origin, region)?;
1962 // we have to copy memory and it's always a 4 component int value
1963 // TODO but not for CL_DEPTH
1964 let fill_color = unsafe { slice::from_raw_parts(fill_color.cast(), 4).to_vec() };
1967 CL_COMMAND_FILL_BUFFER,
1971 Box::new(move |q, ctx| i.fill_image(q, ctx, &fill_color, &origin, ®ion)),
1974 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1975 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for
1976 //image are not supported by device associated with queue.
1980 fn enqueue_copy_buffer_to_image(
1981 command_queue: cl_command_queue,
1985 dst_origin: *const usize,
1986 region: *const usize,
1987 num_events_in_wait_list: cl_uint,
1988 event_wait_list: *const cl_event,
1989 event: *mut cl_event,
1991 let q = command_queue.get_arc()?;
1992 let src = src_buffer.get_arc()?;
1993 let dst = dst_image.get_arc()?;
1994 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1996 // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_image
1998 if q.context != src.context || q.context != dst.context {
1999 return Err(CL_INVALID_CONTEXT);
2002 // CL_INVALID_VALUE if dst_origin or region is NULL.
2003 if dst_origin.is_null() || region.is_null() {
2004 return Err(CL_INVALID_VALUE);
2007 let region = unsafe { CLVec::from_raw(region) };
2008 let src_origin = CLVec::new([src_offset, 0, 0]);
2009 let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
2011 // CL_INVALID_VALUE if values in dst_origin and region do not follow rules described in the
2012 // argument description for dst_origin and region.
2013 // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by dst_origin and
2014 // dst_origin + region refer to a region outside dst_image,
2015 validate_image_bounds(&dst, dst_origin, region)?;
2019 CL_COMMAND_COPY_BUFFER_TO_IMAGE,
2023 Box::new(move |q, ctx| src.copy_to(q, ctx, &dst, src_origin, dst_origin, ®ion)),
2026 //• CL_INVALID_MEM_OBJECT if src_buffer is not a valid buffer object or dst_image is not a valid image object or if dst_image is a 1D image buffer object created from src_buffer.
2027 //• CL_INVALID_VALUE ... if the region specified by src_offset and src_offset + src_cb refer to a region outside src_buffer.
2028 //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
2029 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for dst_image are not supported by device associated with queue.
2030 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for dst_image are not supported by device associated with queue.
2031 //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_image.
2032 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2036 fn enqueue_copy_image_to_buffer(
2037 command_queue: cl_command_queue,
2040 src_origin: *const usize,
2041 region: *const usize,
2043 num_events_in_wait_list: cl_uint,
2044 event_wait_list: *const cl_event,
2045 event: *mut cl_event,
2047 let q = command_queue.get_arc()?;
2048 let src = src_image.get_arc()?;
2049 let dst = dst_buffer.get_arc()?;
2050 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2052 // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_buffer
2054 if q.context != src.context || q.context != dst.context {
2055 return Err(CL_INVALID_CONTEXT);
2058 // CL_INVALID_VALUE if src_origin or region is NULL.
2059 if src_origin.is_null() || region.is_null() {
2060 return Err(CL_INVALID_VALUE);
2063 let region = unsafe { CLVec::from_raw(region) };
2064 let src_origin = unsafe { CLVec::from_raw(src_origin) };
2065 let dst_origin = CLVec::new([dst_offset, 0, 0]);
2067 // CL_INVALID_VALUE if values in src_origin and region do not follow rules described in the
2068 // argument description for src_origin and region.
2069 // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by src_origin and
2070 // src_origin + region refers to a region outside src_image, or if the region specified by
2071 // dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2072 validate_image_bounds(&src, src_origin, region)?;
2076 CL_COMMAND_COPY_IMAGE_TO_BUFFER,
2080 Box::new(move |q, ctx| src.copy_to(q, ctx, &dst, src_origin, dst_origin, ®ion)),
2083 //• CL_INVALID_MEM_OBJECT if src_image is not a valid image object or dst_buffer is not a valid buffer object or if src_image is a 1D image buffer object created from dst_buffer.
2084 //• CL_INVALID_VALUE ... if the region specified by dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2085 //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code is missing before version 1.1.
2086 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image are not supported by device associated with queue.
2087 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image are not supported by device associated with queue.
2088 //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_image or dst_buffer.
2089 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2093 fn enqueue_map_image(
2094 command_queue: cl_command_queue,
2096 blocking_map: cl_bool,
2097 map_flags: cl_map_flags,
2098 origin: *const usize,
2099 region: *const usize,
2100 image_row_pitch: *mut usize,
2101 image_slice_pitch: *mut usize,
2102 num_events_in_wait_list: cl_uint,
2103 event_wait_list: *const cl_event,
2104 event: *mut cl_event,
2105 ) -> CLResult<*mut ::std::os::raw::c_void> {
2106 let q = command_queue.get_arc()?;
2107 let i = image.get_arc()?;
2108 let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2109 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2111 // CL_INVALID_VALUE ... or if values specified in map_flags are not valid.
2112 validate_map_flags(&i, map_flags)?;
2114 // CL_INVALID_CONTEXT if context associated with command_queue and image are not the same
2115 if i.context != q.context {
2116 return Err(CL_INVALID_CONTEXT);
2119 // CL_INVALID_VALUE if origin or region is NULL.
2120 // CL_INVALID_VALUE if image_row_pitch is NULL.
2121 if origin.is_null() || region.is_null() || image_row_pitch.is_null() {
2122 return Err(CL_INVALID_VALUE);
2125 let region = unsafe { CLVec::from_raw(region) };
2126 let origin = unsafe { CLVec::from_raw(origin) };
2128 // CL_INVALID_VALUE if region being mapped given by (origin, origin + region) is out of bounds
2129 // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
2130 // description for origin and region.
2131 validate_image_bounds(&i, origin, region)?;
2133 let mut dummy_slice_pitch: usize = 0;
2134 let image_slice_pitch = if image_slice_pitch.is_null() {
2135 // CL_INVALID_VALUE if image is a 3D image, 1D or 2D image array object and
2136 // image_slice_pitch is NULL.
2137 if i.image_desc.is_array() || i.image_desc.image_type == CL_MEM_OBJECT_IMAGE3D {
2138 return Err(CL_INVALID_VALUE);
2140 &mut dummy_slice_pitch
2142 unsafe { image_slice_pitch.as_mut().unwrap() }
2145 let ptr = i.map_image(
2149 unsafe { image_row_pitch.as_mut().unwrap() },
2155 CL_COMMAND_MAP_IMAGE,
2159 Box::new(move |q, ctx| i.sync_shadow_image(q, ctx, ptr)),
2164 //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
2165 //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
2166 //• CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for image objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
2167 //• CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST if the map operation is blocking and the execution status of any of the events in event_wait_list is a negative integer value.
2168 //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2169 //• CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
2173 fn retain_mem_object(mem: cl_mem) -> CLResult<()> {
2178 fn release_mem_object(mem: cl_mem) -> CLResult<()> {
2183 fn enqueue_unmap_mem_object(
2184 command_queue: cl_command_queue,
2186 mapped_ptr: *mut ::std::os::raw::c_void,
2187 num_events_in_wait_list: cl_uint,
2188 event_wait_list: *const cl_event,
2189 event: *mut cl_event,
2191 let q = command_queue.get_arc()?;
2192 let m = memobj.get_arc()?;
2193 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2195 // CL_INVALID_CONTEXT if context associated with command_queue and memobj are not the same
2196 if q.context != m.context {
2197 return Err(CL_INVALID_CONTEXT);
2200 // CL_INVALID_VALUE if mapped_ptr is not a valid pointer returned by clEnqueueMapBuffer or
2201 // clEnqueueMapImage for memobj.
2202 if !m.is_mapped_ptr(mapped_ptr) {
2203 return Err(CL_INVALID_VALUE);
2208 CL_COMMAND_UNMAP_MEM_OBJECT,
2212 Box::new(move |q, ctx| m.unmap(q, ctx, mapped_ptr)),
2217 fn enqueue_migrate_mem_objects(
2218 command_queue: cl_command_queue,
2219 num_mem_objects: cl_uint,
2220 mem_objects: *const cl_mem,
2221 flags: cl_mem_migration_flags,
2222 num_events_in_wait_list: cl_uint,
2223 event_wait_list: *const cl_event,
2224 event: *mut cl_event,
2226 let q = command_queue.get_arc()?;
2227 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2228 let bufs = cl_mem::get_arc_vec_from_arr(mem_objects, num_mem_objects)?;
2230 // CL_INVALID_VALUE if num_mem_objects is zero or if mem_objects is NULL.
2231 if bufs.is_empty() {
2232 return Err(CL_INVALID_VALUE);
2235 // CL_INVALID_CONTEXT if the context associated with command_queue and memory objects in
2236 // mem_objects are not the same
2237 if bufs.iter().any(|b| b.context != q.context) {
2238 return Err(CL_INVALID_CONTEXT);
2241 // CL_INVALID_VALUE if flags is not 0 or is not any of the values described in the table above.
2245 !(CL_MIGRATE_MEM_OBJECT_HOST | CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED),
2248 return Err(CL_INVALID_VALUE);
2251 // we should do something, but it's legal to not do anything at all
2254 CL_COMMAND_MIGRATE_MEM_OBJECTS,
2258 Box::new(|_, _| Ok(())),
2261 //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for the specified set of memory objects in mem_objects.
2264 #[cl_info_entrypoint(cl_get_pipe_info)]
2265 impl CLInfo<cl_pipe_info> for cl_mem {
2266 fn query(&self, _q: cl_pipe_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
2267 // CL_INVALID_MEM_OBJECT if pipe is a not a valid pipe object.
2268 Err(CL_INVALID_MEM_OBJECT)
2273 context: cl_context,
2274 flags: cl_svm_mem_flags,
2276 mut alignment: cl_uint,
2277 ) -> CLResult<*mut c_void> {
2278 // clSVMAlloc will fail if
2280 // context is not a valid context
2281 let c = context.get_ref()?;
2283 // or no devices in context support SVM.
2284 if !c.has_svm_devs() {
2285 return Err(CL_INVALID_OPERATION);
2288 // flags does not contain CL_MEM_SVM_FINE_GRAIN_BUFFER but does contain CL_MEM_SVM_ATOMICS.
2289 if !bit_check(flags, CL_MEM_SVM_FINE_GRAIN_BUFFER) && bit_check(flags, CL_MEM_SVM_ATOMICS) {
2290 return Err(CL_INVALID_VALUE);
2293 // size is 0 or > CL_DEVICE_MAX_MEM_ALLOC_SIZE value for any device in context.
2294 if size == 0 || checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
2295 return Err(CL_INVALID_VALUE);
2299 alignment = mem::size_of::<[u64; 16]>() as cl_uint;
2302 // alignment is not a power of two
2303 if !alignment.is_power_of_two() {
2304 return Err(CL_INVALID_VALUE);
2310 // SAFETY: we already verify the parameters to from_size_align above and layout is of non zero
2313 layout = Layout::from_size_align_unchecked(size, alignment as usize);
2314 ptr = alloc::alloc(layout);
2318 return Err(CL_OUT_OF_HOST_MEMORY);
2321 c.add_svm_ptr(ptr.cast(), layout);
2324 // Values specified in flags do not follow rules described for supported values in the SVM Memory Flags table.
2325 // CL_MEM_SVM_FINE_GRAIN_BUFFER or CL_MEM_SVM_ATOMICS is specified in flags and these are not supported by at least one device in context.
2326 // The values specified in flags are not valid, i.e. don’t match those defined in the SVM Memory Flags table.
2327 // the OpenCL implementation cannot support the specified alignment for at least one device in context.
2328 // There was a failure to allocate resources.
2331 fn svm_free_impl(c: &Context, svm_pointer: *mut c_void) {
2332 if let Some(layout) = c.remove_svm_ptr(svm_pointer) {
2333 // SAFETY: we make sure that svm_pointer is a valid allocation and reuse the same layout
2334 // from the allocation
2336 alloc::dealloc(svm_pointer.cast(), layout);
2341 pub fn svm_free(context: cl_context, svm_pointer: *mut c_void) -> CLResult<()> {
2342 let c = context.get_ref()?;
2343 svm_free_impl(c, svm_pointer);
2347 fn enqueue_svm_free_impl(
2348 command_queue: cl_command_queue,
2349 num_svm_pointers: cl_uint,
2350 svm_pointers: *mut *mut c_void,
2351 pfn_free_func: Option<SVMFreeCb>,
2352 user_data: *mut c_void,
2353 num_events_in_wait_list: cl_uint,
2354 event_wait_list: *const cl_event,
2355 event: *mut cl_event,
2356 cmd_type: cl_command_type,
2358 let q = command_queue.get_arc()?;
2359 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2361 // CL_INVALID_VALUE if num_svm_pointers is 0 and svm_pointers is non-NULL, or if svm_pointers is
2362 // NULL and num_svm_pointers is not 0.
2363 if num_svm_pointers == 0 && !svm_pointers.is_null()
2364 || num_svm_pointers != 0 && svm_pointers.is_null()
2366 return Err(CL_INVALID_VALUE);
2369 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2370 if !q.device.svm_supported() {
2371 return Err(CL_INVALID_OPERATION);
2380 Box::new(move |q, _| {
2381 if let Some(cb) = pfn_free_func {
2382 // SAFETY: it's undefined behavior if the application screws up
2384 cb(command_queue, num_svm_pointers, svm_pointers, user_data);
2387 // SAFETY: num_svm_pointers specifies the amount of elements in svm_pointers
2389 unsafe { slice::from_raw_parts(svm_pointers, num_svm_pointers as usize) };
2390 for &ptr in svm_pointers {
2391 svm_free_impl(&q.context, ptr);
2401 fn enqueue_svm_free(
2402 command_queue: cl_command_queue,
2403 num_svm_pointers: cl_uint,
2404 svm_pointers: *mut *mut c_void,
2405 pfn_free_func: Option<SVMFreeCb>,
2406 user_data: *mut c_void,
2407 num_events_in_wait_list: cl_uint,
2408 event_wait_list: *const cl_event,
2409 event: *mut cl_event,
2411 enqueue_svm_free_impl(
2417 num_events_in_wait_list,
2420 CL_COMMAND_SVM_FREE,
2425 fn enqueue_svm_free_arm(
2426 command_queue: cl_command_queue,
2427 num_svm_pointers: cl_uint,
2428 svm_pointers: *mut *mut c_void,
2429 pfn_free_func: Option<SVMFreeCb>,
2430 user_data: *mut c_void,
2431 num_events_in_wait_list: cl_uint,
2432 event_wait_list: *const cl_event,
2433 event: *mut cl_event,
2435 enqueue_svm_free_impl(
2441 num_events_in_wait_list,
2444 CL_COMMAND_SVM_FREE_ARM,
2448 fn enqueue_svm_memcpy_impl(
2449 command_queue: cl_command_queue,
2450 blocking_copy: cl_bool,
2451 dst_ptr: *mut c_void,
2452 src_ptr: *const c_void,
2454 num_events_in_wait_list: cl_uint,
2455 event_wait_list: *const cl_event,
2456 event: *mut cl_event,
2457 cmd_type: cl_command_type,
2459 let q = command_queue.get_arc()?;
2460 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2461 let block = check_cl_bool(blocking_copy).ok_or(CL_INVALID_VALUE)?;
2463 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2464 if !q.device.svm_supported() {
2465 return Err(CL_INVALID_OPERATION);
2468 // CL_INVALID_VALUE if dst_ptr or src_ptr is NULL.
2469 if dst_ptr.is_null() || src_ptr.is_null() {
2470 return Err(CL_INVALID_VALUE);
2473 // CL_MEM_COPY_OVERLAP if the values specified for dst_ptr, src_ptr and size result in an
2474 // overlapping copy.
2475 let dst_ptr_addr = dst_ptr as usize;
2476 let src_ptr_addr = src_ptr as usize;
2477 if (src_ptr_addr <= dst_ptr_addr && dst_ptr_addr < src_ptr_addr + size)
2478 || (dst_ptr_addr <= src_ptr_addr && src_ptr_addr < dst_ptr_addr + size)
2480 return Err(CL_MEM_COPY_OVERLAP);
2489 Box::new(move |_, _| {
2490 // SAFETY: We check for overlapping copies already and alignment doesn't matter for void
2491 // pointers. And we also trust applications to provide properly allocated memory regions
2492 // and if not it's all undefined anyway.
2494 ptr::copy_nonoverlapping(src_ptr, dst_ptr, size);
2502 fn enqueue_svm_memcpy(
2503 command_queue: cl_command_queue,
2504 blocking_copy: cl_bool,
2505 dst_ptr: *mut c_void,
2506 src_ptr: *const c_void,
2508 num_events_in_wait_list: cl_uint,
2509 event_wait_list: *const cl_event,
2510 event: *mut cl_event,
2512 enqueue_svm_memcpy_impl(
2518 num_events_in_wait_list,
2521 CL_COMMAND_SVM_MEMCPY,
2526 fn enqueue_svm_memcpy_arm(
2527 command_queue: cl_command_queue,
2528 blocking_copy: cl_bool,
2529 dst_ptr: *mut c_void,
2530 src_ptr: *const c_void,
2532 num_events_in_wait_list: cl_uint,
2533 event_wait_list: *const cl_event,
2534 event: *mut cl_event,
2536 enqueue_svm_memcpy_impl(
2542 num_events_in_wait_list,
2545 CL_COMMAND_SVM_MEMCPY_ARM,
2549 fn enqueue_svm_mem_fill_impl(
2550 command_queue: cl_command_queue,
2551 svm_ptr: *mut ::std::os::raw::c_void,
2552 pattern: *const ::std::os::raw::c_void,
2553 pattern_size: usize,
2555 num_events_in_wait_list: cl_uint,
2556 event_wait_list: *const cl_event,
2557 event: *mut cl_event,
2558 cmd_type: cl_command_type,
2560 let q = command_queue.get_arc()?;
2561 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2562 let svm_ptr_addr = svm_ptr as usize;
2564 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2565 if !q.device.svm_supported() {
2566 return Err(CL_INVALID_OPERATION);
2569 // CL_INVALID_VALUE if svm_ptr is NULL.
2570 if svm_ptr.is_null() {
2571 return Err(CL_INVALID_VALUE);
2574 // CL_INVALID_VALUE if svm_ptr is not aligned to pattern_size bytes.
2575 if svm_ptr_addr & (pattern_size - 1) != 0 {
2576 return Err(CL_INVALID_VALUE);
2579 // CL_INVALID_VALUE if pattern is NULL or if pattern_size is 0 or if pattern_size is not one of
2580 // {1, 2, 4, 8, 16, 32, 64, 128}.
2581 if pattern.is_null()
2582 || pattern_size == 0
2583 || !pattern_size.is_power_of_two()
2584 || pattern_size > 128
2586 return Err(CL_INVALID_VALUE);
2589 // CL_INVALID_VALUE if size is not a multiple of pattern_size.
2590 if size % pattern_size != 0 {
2591 return Err(CL_INVALID_VALUE);
2600 Box::new(move |_, _| {
2602 while offset < size {
2603 // SAFETY: pointer are either valid or undefined behavior
2605 ptr::copy(pattern, svm_ptr.add(offset), pattern_size);
2607 offset += pattern_size;
2616 fn enqueue_svm_mem_fill(
2617 command_queue: cl_command_queue,
2618 svm_ptr: *mut ::std::os::raw::c_void,
2619 pattern: *const ::std::os::raw::c_void,
2620 pattern_size: usize,
2622 num_events_in_wait_list: cl_uint,
2623 event_wait_list: *const cl_event,
2624 event: *mut cl_event,
2626 enqueue_svm_mem_fill_impl(
2632 num_events_in_wait_list,
2635 CL_COMMAND_SVM_MEMFILL,
2640 fn enqueue_svm_mem_fill_arm(
2641 command_queue: cl_command_queue,
2642 svm_ptr: *mut ::std::os::raw::c_void,
2643 pattern: *const ::std::os::raw::c_void,
2644 pattern_size: usize,
2646 num_events_in_wait_list: cl_uint,
2647 event_wait_list: *const cl_event,
2648 event: *mut cl_event,
2650 enqueue_svm_mem_fill_impl(
2656 num_events_in_wait_list,
2659 CL_COMMAND_SVM_MEMFILL_ARM,
2663 fn enqueue_svm_map_impl(
2664 command_queue: cl_command_queue,
2665 blocking_map: cl_bool,
2666 flags: cl_map_flags,
2667 svm_ptr: *mut ::std::os::raw::c_void,
2669 num_events_in_wait_list: cl_uint,
2670 event_wait_list: *const cl_event,
2671 event: *mut cl_event,
2672 cmd_type: cl_command_type,
2674 let q = command_queue.get_arc()?;
2675 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2676 let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2678 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2679 if !q.device.svm_supported() {
2680 return Err(CL_INVALID_OPERATION);
2683 // CL_INVALID_VALUE if svm_ptr is NULL.
2684 if svm_ptr.is_null() {
2685 return Err(CL_INVALID_VALUE);
2688 // CL_INVALID_VALUE if size is 0 ...
2690 return Err(CL_INVALID_VALUE);
2693 // ... or if values specified in map_flags are not valid.
2694 validate_map_flags_common(flags)?;
2696 create_and_queue(q, cmd_type, evs, event, block, Box::new(|_, _| Ok(())))
2701 command_queue: cl_command_queue,
2702 blocking_map: cl_bool,
2703 flags: cl_map_flags,
2704 svm_ptr: *mut ::std::os::raw::c_void,
2706 num_events_in_wait_list: cl_uint,
2707 event_wait_list: *const cl_event,
2708 event: *mut cl_event,
2710 enqueue_svm_map_impl(
2716 num_events_in_wait_list,
2724 fn enqueue_svm_map_arm(
2725 command_queue: cl_command_queue,
2726 blocking_map: cl_bool,
2727 flags: cl_map_flags,
2728 svm_ptr: *mut ::std::os::raw::c_void,
2730 num_events_in_wait_list: cl_uint,
2731 event_wait_list: *const cl_event,
2732 event: *mut cl_event,
2734 enqueue_svm_map_impl(
2740 num_events_in_wait_list,
2743 CL_COMMAND_SVM_MAP_ARM,
2747 fn enqueue_svm_unmap_impl(
2748 command_queue: cl_command_queue,
2749 svm_ptr: *mut ::std::os::raw::c_void,
2750 num_events_in_wait_list: cl_uint,
2751 event_wait_list: *const cl_event,
2752 event: *mut cl_event,
2753 cmd_type: cl_command_type,
2755 let q = command_queue.get_arc()?;
2756 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2758 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2759 if !q.device.svm_supported() {
2760 return Err(CL_INVALID_OPERATION);
2763 // CL_INVALID_VALUE if svm_ptr is NULL.
2764 if svm_ptr.is_null() {
2765 return Err(CL_INVALID_VALUE);
2768 create_and_queue(q, cmd_type, evs, event, false, Box::new(|_, _| Ok(())))
2772 fn enqueue_svm_unmap(
2773 command_queue: cl_command_queue,
2774 svm_ptr: *mut ::std::os::raw::c_void,
2775 num_events_in_wait_list: cl_uint,
2776 event_wait_list: *const cl_event,
2777 event: *mut cl_event,
2779 enqueue_svm_unmap_impl(
2782 num_events_in_wait_list,
2785 CL_COMMAND_SVM_UNMAP,
2790 fn enqueue_svm_unmap_arm(
2791 command_queue: cl_command_queue,
2792 svm_ptr: *mut ::std::os::raw::c_void,
2793 num_events_in_wait_list: cl_uint,
2794 event_wait_list: *const cl_event,
2795 event: *mut cl_event,
2797 enqueue_svm_unmap_impl(
2800 num_events_in_wait_list,
2803 CL_COMMAND_SVM_UNMAP_ARM,
2808 fn enqueue_svm_migrate_mem(
2809 command_queue: cl_command_queue,
2810 num_svm_pointers: cl_uint,
2811 svm_pointers: *mut *const ::std::os::raw::c_void,
2812 sizes: *const usize,
2813 flags: cl_mem_migration_flags,
2814 num_events_in_wait_list: cl_uint,
2815 event_wait_list: *const cl_event,
2816 event: *mut cl_event,
2818 let q = command_queue.get_arc()?;
2819 let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2821 // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2822 if !q.device.svm_supported() {
2823 return Err(CL_INVALID_OPERATION);
2826 // CL_INVALID_VALUE if num_svm_pointers is zero or svm_pointers is NULL.
2827 if num_svm_pointers == 0 || svm_pointers.is_null() {
2828 return Err(CL_INVALID_VALUE);
2831 let num_svm_pointers = num_svm_pointers as usize;
2832 // SAFETY: Just hoping the application is alright.
2833 let mut svm_pointers =
2834 unsafe { slice::from_raw_parts(svm_pointers, num_svm_pointers) }.to_owned();
2835 // if sizes is NULL, every allocation containing the pointers need to be migrated
2836 let mut sizes = if sizes.is_null() {
2837 vec![0; num_svm_pointers]
2839 unsafe { slice::from_raw_parts(sizes, num_svm_pointers) }.to_owned()
2842 // CL_INVALID_VALUE if sizes[i] is non-zero range [svm_pointers[i], svm_pointers[i]+sizes[i]) is
2843 // not contained within an existing clSVMAlloc allocation.
2844 for (ptr, size) in svm_pointers.iter_mut().zip(&mut sizes) {
2845 if let Some((alloc, layout)) = q.context.find_svm_alloc(ptr.cast()) {
2846 let ptr_addr = *ptr as usize;
2847 let alloc_addr = alloc as usize;
2849 // if the offset + size is bigger than the allocation we are out of bounds
2850 if (ptr_addr - alloc_addr) + *size <= layout.size() {
2851 // if the size is 0, the entire allocation should be migrated
2853 *ptr = alloc.cast();
2854 *size = layout.size();
2860 return Err(CL_INVALID_VALUE);
2863 let to_device = !bit_check(flags, CL_MIGRATE_MEM_OBJECT_HOST);
2864 let content_undefined = bit_check(flags, CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED);
2868 CL_COMMAND_SVM_MIGRATE_MEM,
2872 Box::new(move |_, ctx| {
2873 ctx.svm_migrate(&svm_pointers, &sizes, to_device, content_undefined);
2881 _context: cl_context,
2882 _flags: cl_mem_flags,
2883 _pipe_packet_size: cl_uint,
2884 _pipe_max_packets: cl_uint,
2885 _properties: *const cl_pipe_properties,
2886 ) -> CLResult<cl_mem> {
2887 Err(CL_INVALID_OPERATION)