xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/rusticl/api/memory.rs (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 #![allow(non_upper_case_globals)]
2 
3 use crate::api::event::create_and_queue;
4 use crate::api::icd::*;
5 use crate::api::types::*;
6 use crate::api::util::*;
7 use crate::core::context::Context;
8 use crate::core::device::*;
9 use crate::core::event::EventSig;
10 use crate::core::format::*;
11 use crate::core::gl::*;
12 use crate::core::memory::*;
13 use crate::core::queue::*;
14 
15 use mesa_rust_util::properties::Properties;
16 use mesa_rust_util::ptr::*;
17 use mesa_rust_util::static_assert;
18 use rusticl_opencl_gen::*;
19 use rusticl_proc_macros::cl_entrypoint;
20 use rusticl_proc_macros::cl_info_entrypoint;
21 
22 use std::alloc;
23 use std::alloc::Layout;
24 use std::cmp;
25 use std::cmp::Ordering;
26 use std::mem::{self, MaybeUninit};
27 use std::os::raw::c_void;
28 use std::ptr;
29 use std::slice;
30 use std::sync::Arc;
31 
validate_mem_flags(flags: cl_mem_flags, images: bool) -> CLResult<()>32 fn validate_mem_flags(flags: cl_mem_flags, images: bool) -> CLResult<()> {
33     let mut valid_flags = cl_bitfield::from(
34         CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE,
35     );
36 
37     if !images {
38         valid_flags |= cl_bitfield::from(
39             CL_MEM_USE_HOST_PTR
40                 | CL_MEM_ALLOC_HOST_PTR
41                 | CL_MEM_COPY_HOST_PTR
42                 | CL_MEM_HOST_WRITE_ONLY
43                 | CL_MEM_HOST_READ_ONLY
44                 | CL_MEM_HOST_NO_ACCESS,
45         );
46     }
47 
48     let read_write_group =
49         cl_bitfield::from(CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY);
50 
51     let alloc_host_group = cl_bitfield::from(CL_MEM_ALLOC_HOST_PTR | CL_MEM_USE_HOST_PTR);
52 
53     let copy_host_group = cl_bitfield::from(CL_MEM_COPY_HOST_PTR | CL_MEM_USE_HOST_PTR);
54 
55     let host_read_write_group =
56         cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
57 
58     if (flags & !valid_flags != 0)
59         || (flags & read_write_group).count_ones() > 1
60         || (flags & alloc_host_group).count_ones() > 1
61         || (flags & copy_host_group).count_ones() > 1
62         || (flags & host_read_write_group).count_ones() > 1
63     {
64         return Err(CL_INVALID_VALUE);
65     }
66     Ok(())
67 }
68 
validate_map_flags_common(map_flags: cl_mem_flags) -> CLResult<()>69 fn validate_map_flags_common(map_flags: cl_mem_flags) -> CLResult<()> {
70     // CL_INVALID_VALUE ... if values specified in map_flags are not valid.
71     let valid_flags =
72         cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION);
73     let read_write_group = cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE);
74     let invalidate_group = cl_bitfield::from(CL_MAP_WRITE_INVALIDATE_REGION);
75 
76     if (map_flags & !valid_flags != 0)
77         || ((map_flags & read_write_group != 0) && (map_flags & invalidate_group != 0))
78     {
79         return Err(CL_INVALID_VALUE);
80     }
81 
82     Ok(())
83 }
84 
validate_map_flags(m: &MemBase, map_flags: cl_mem_flags) -> CLResult<()>85 fn validate_map_flags(m: &MemBase, map_flags: cl_mem_flags) -> CLResult<()> {
86     validate_map_flags_common(map_flags)?;
87 
88     // CL_INVALID_OPERATION if buffer has been created with CL_MEM_HOST_WRITE_ONLY or
89     // CL_MEM_HOST_NO_ACCESS and CL_MAP_READ is set in map_flags
90     if bit_check(m.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) &&
91       bit_check(map_flags, CL_MAP_READ) ||
92       // or if buffer has been created with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS and
93       // CL_MAP_WRITE or CL_MAP_WRITE_INVALIDATE_REGION is set in map_flags.
94       bit_check(m.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) &&
95       bit_check(map_flags, CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION)
96     {
97         return Err(CL_INVALID_OPERATION);
98     }
99 
100     Ok(())
101 }
102 
filter_image_access_flags(flags: cl_mem_flags) -> cl_mem_flags103 fn filter_image_access_flags(flags: cl_mem_flags) -> cl_mem_flags {
104     flags
105         & (CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE)
106             as cl_mem_flags
107 }
108 
inherit_mem_flags(mut flags: cl_mem_flags, mem: &MemBase) -> cl_mem_flags109 fn inherit_mem_flags(mut flags: cl_mem_flags, mem: &MemBase) -> cl_mem_flags {
110     let read_write_mask = cl_bitfield::from(
111         CL_MEM_READ_WRITE |
112       CL_MEM_WRITE_ONLY |
113       CL_MEM_READ_ONLY |
114       // not in spec, but...
115       CL_MEM_KERNEL_READ_AND_WRITE,
116     );
117     let host_ptr_mask =
118         cl_bitfield::from(CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR);
119     let host_mask =
120         cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
121 
122     // For CL_MEM_OBJECT_IMAGE1D_BUFFER image type, or an image created from another memory object
123     // (image or buffer)...
124     //
125     // ... if the CL_MEM_READ_WRITE, CL_MEM_READ_ONLY or CL_MEM_WRITE_ONLY values are not
126     // specified in flags, they are inherited from the corresponding memory access qualifiers
127     // associated with mem_object. ...
128     if flags & read_write_mask == 0 {
129         flags |= mem.flags & read_write_mask;
130     }
131 
132     // ... The CL_MEM_USE_HOST_PTR, CL_MEM_ALLOC_HOST_PTR and CL_MEM_COPY_HOST_PTR values cannot
133     // be specified in flags but are inherited from the corresponding memory access qualifiers
134     // associated with mem_object. ...
135     flags &= !host_ptr_mask;
136     flags |= mem.flags & host_ptr_mask;
137 
138     // ... If the CL_MEM_HOST_WRITE_ONLY, CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS values
139     // are not specified in flags, they are inherited from the corresponding memory access
140     // qualifiers associated with mem_object.
141     if flags & host_mask == 0 {
142         flags |= mem.flags & host_mask;
143     }
144 
145     flags
146 }
147 
image_type_valid(image_type: cl_mem_object_type) -> bool148 fn image_type_valid(image_type: cl_mem_object_type) -> bool {
149     CL_IMAGE_TYPES.contains(&image_type)
150 }
151 
validate_addressing_mode(addressing_mode: cl_addressing_mode) -> CLResult<()>152 fn validate_addressing_mode(addressing_mode: cl_addressing_mode) -> CLResult<()> {
153     match addressing_mode {
154         CL_ADDRESS_NONE
155         | CL_ADDRESS_CLAMP_TO_EDGE
156         | CL_ADDRESS_CLAMP
157         | CL_ADDRESS_REPEAT
158         | CL_ADDRESS_MIRRORED_REPEAT => Ok(()),
159         _ => Err(CL_INVALID_VALUE),
160     }
161 }
162 
validate_filter_mode(filter_mode: cl_filter_mode) -> CLResult<()>163 fn validate_filter_mode(filter_mode: cl_filter_mode) -> CLResult<()> {
164     match filter_mode {
165         CL_FILTER_NEAREST | CL_FILTER_LINEAR => Ok(()),
166         _ => Err(CL_INVALID_VALUE),
167     }
168 }
169 
validate_host_ptr(host_ptr: *mut ::std::os::raw::c_void, flags: cl_mem_flags) -> CLResult<()>170 fn validate_host_ptr(host_ptr: *mut ::std::os::raw::c_void, flags: cl_mem_flags) -> CLResult<()> {
171     // CL_INVALID_HOST_PTR if host_ptr is NULL and CL_MEM_USE_HOST_PTR or CL_MEM_COPY_HOST_PTR are
172     // set in flags
173     if host_ptr.is_null()
174         && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) != 0
175     {
176         return Err(CL_INVALID_HOST_PTR);
177     }
178 
179     // or if host_ptr is not NULL but CL_MEM_COPY_HOST_PTR or CL_MEM_USE_HOST_PTR are not set in
180     // flags.
181     if !host_ptr.is_null()
182         && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) == 0
183     {
184         return Err(CL_INVALID_HOST_PTR);
185     }
186 
187     Ok(())
188 }
189 
validate_matching_buffer_flags(mem: &MemBase, flags: cl_mem_flags) -> CLResult<()>190 fn validate_matching_buffer_flags(mem: &MemBase, flags: cl_mem_flags) -> CLResult<()> {
191     // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
192     // under one of the following circumstances:
193     //
194     // 1) mem_object was created with CL_MEM_WRITE_ONLY and
195     //    flags specifies CL_MEM_READ_WRITE or CL_MEM_READ_ONLY,
196     if bit_check(mem.flags, CL_MEM_WRITE_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_READ_ONLY) ||
197       // 2) mem_object was created with CL_MEM_READ_ONLY and
198       //    flags specifies CL_MEM_READ_WRITE or CL_MEM_WRITE_ONLY,
199       bit_check(mem.flags, CL_MEM_READ_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY) ||
200       // 3) flags specifies CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR or CL_MEM_COPY_HOST_PTR.
201       bit_check(flags, CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR) ||
202       // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
203       // and mem_object was created with CL_MEM_HOST_WRITE_ONLY and flags specifies CL_MEM_HOST_READ_ONLY
204       bit_check(mem.flags, CL_MEM_HOST_WRITE_ONLY) && bit_check(flags, CL_MEM_HOST_READ_ONLY) ||
205       // or if mem_object was created with CL_MEM_HOST_READ_ONLY and flags specifies CL_MEM_HOST_WRITE_ONLY
206       bit_check(mem.flags, CL_MEM_HOST_READ_ONLY) && bit_check(flags, CL_MEM_HOST_WRITE_ONLY) ||
207       // or if mem_object was created with CL_MEM_HOST_NO_ACCESS and_flags_ specifies CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_WRITE_ONLY.
208       bit_check(mem.flags, CL_MEM_HOST_NO_ACCESS) && bit_check(flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_WRITE_ONLY)
209     {
210         return Err(CL_INVALID_VALUE);
211     }
212 
213     Ok(())
214 }
215 
216 #[cl_info_entrypoint(clGetMemObjectInfo)]
217 impl CLInfo<cl_mem_info> for cl_mem {
query(&self, q: cl_mem_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>>218     fn query(&self, q: cl_mem_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
219         let mem = MemBase::ref_from_raw(*self)?;
220         Ok(match *q {
221             CL_MEM_ASSOCIATED_MEMOBJECT => {
222                 let ptr = match mem.parent.as_ref() {
223                     // Note we use as_ptr here which doesn't increase the reference count.
224                     Some(Mem::Buffer(buffer)) => cl_mem::from_ptr(Arc::as_ptr(buffer)),
225                     Some(Mem::Image(image)) => cl_mem::from_ptr(Arc::as_ptr(image)),
226                     None => ptr::null_mut(),
227                 };
228                 cl_prop::<cl_mem>(ptr.cast())
229             }
230             CL_MEM_CONTEXT => {
231                 // Note we use as_ptr here which doesn't increase the reference count.
232                 let ptr = Arc::as_ptr(&mem.context);
233                 cl_prop::<cl_context>(cl_context::from_ptr(ptr))
234             }
235             CL_MEM_FLAGS => cl_prop::<cl_mem_flags>(mem.flags),
236             // TODO debugging feature
237             CL_MEM_MAP_COUNT => cl_prop::<cl_uint>(0),
238             CL_MEM_HOST_PTR => cl_prop::<*mut c_void>(mem.host_ptr()),
239             CL_MEM_OFFSET => cl_prop::<usize>(if mem.is_buffer() {
240                 Buffer::ref_from_raw(*self)?.offset
241             } else {
242                 0
243             }),
244             CL_MEM_PROPERTIES => cl_prop::<&Vec<cl_mem_properties>>(&mem.props),
245             CL_MEM_REFERENCE_COUNT => cl_prop::<cl_uint>(if mem.is_buffer() {
246                 Buffer::refcnt(*self)?
247             } else {
248                 Image::refcnt(*self)?
249             }),
250             CL_MEM_SIZE => cl_prop::<usize>(mem.size),
251             CL_MEM_TYPE => cl_prop::<cl_mem_object_type>(mem.mem_type),
252             CL_MEM_USES_SVM_POINTER | CL_MEM_USES_SVM_POINTER_ARM => {
253                 cl_prop::<cl_bool>(mem.is_svm().into())
254             }
255             _ => return Err(CL_INVALID_VALUE),
256         })
257     }
258 }
259 
260 #[cl_entrypoint(clCreateBufferWithProperties)]
create_buffer_with_properties( context: cl_context, properties: *const cl_mem_properties, flags: cl_mem_flags, size: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>261 fn create_buffer_with_properties(
262     context: cl_context,
263     properties: *const cl_mem_properties,
264     flags: cl_mem_flags,
265     size: usize,
266     host_ptr: *mut ::std::os::raw::c_void,
267 ) -> CLResult<cl_mem> {
268     let c = Context::arc_from_raw(context)?;
269 
270     // CL_INVALID_VALUE if values specified in flags are not valid as defined in the Memory Flags table.
271     validate_mem_flags(flags, false)?;
272 
273     // CL_INVALID_BUFFER_SIZE if size is 0
274     if size == 0 {
275         return Err(CL_INVALID_BUFFER_SIZE);
276     }
277 
278     // ... or if size is greater than CL_DEVICE_MAX_MEM_ALLOC_SIZE for all devices in context,
279     if checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
280         return Err(CL_INVALID_BUFFER_SIZE);
281     }
282 
283     validate_host_ptr(host_ptr, flags)?;
284 
285     // or if CL_MEM_USE_HOST_PTR is set in flags and host_ptr is a pointer returned by clSVMAlloc
286     // and size is greater than the size passed to clSVMAlloc.
287     if let Some((svm_ptr, svm_layout)) = c.find_svm_alloc(host_ptr as usize) {
288         // SAFETY: they are part of the same allocation, and because host_ptr >= svm_ptr we can cast
289         // to usize.
290         let diff = unsafe { host_ptr.byte_offset_from(svm_ptr) } as usize;
291 
292         // technically we don't have to account for the offset, but it's almost for free.
293         if size > svm_layout - diff {
294             return Err(CL_INVALID_BUFFER_SIZE);
295         }
296     }
297 
298     let props = Properties::from_ptr_raw(properties);
299     // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
300     // the value specified for a supported property name is not valid, or if the same property name
301     // is specified more than once.
302     if props.len() > 1 {
303         // we don't support any properties besides the 0 property
304         return Err(CL_INVALID_PROPERTY);
305     }
306 
307     Ok(MemBase::new_buffer(c, flags, size, host_ptr, props)?.into_cl())
308 }
309 
310 #[cl_entrypoint(clCreateBuffer)]
create_buffer( context: cl_context, flags: cl_mem_flags, size: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>311 fn create_buffer(
312     context: cl_context,
313     flags: cl_mem_flags,
314     size: usize,
315     host_ptr: *mut ::std::os::raw::c_void,
316 ) -> CLResult<cl_mem> {
317     create_buffer_with_properties(context, ptr::null(), flags, size, host_ptr)
318 }
319 
320 #[cl_entrypoint(clCreateSubBuffer)]
create_sub_buffer( buffer: cl_mem, mut flags: cl_mem_flags, buffer_create_type: cl_buffer_create_type, buffer_create_info: *const ::std::os::raw::c_void, ) -> CLResult<cl_mem>321 fn create_sub_buffer(
322     buffer: cl_mem,
323     mut flags: cl_mem_flags,
324     buffer_create_type: cl_buffer_create_type,
325     buffer_create_info: *const ::std::os::raw::c_void,
326 ) -> CLResult<cl_mem> {
327     let b = Buffer::arc_from_raw(buffer)?;
328 
329     // CL_INVALID_MEM_OBJECT if buffer ... is a sub-buffer object.
330     if b.parent.is_some() {
331         return Err(CL_INVALID_MEM_OBJECT);
332     }
333 
334     validate_matching_buffer_flags(&b, flags)?;
335 
336     flags = inherit_mem_flags(flags, &b);
337     validate_mem_flags(flags, false)?;
338 
339     let (offset, size) = match buffer_create_type {
340         CL_BUFFER_CREATE_TYPE_REGION => {
341             // buffer_create_info is a pointer to a cl_buffer_region structure specifying a region of
342             // the buffer.
343             // CL_INVALID_VALUE if value(s) specified in buffer_create_info (for a given
344             // buffer_create_type) is not valid or if buffer_create_info is NULL.
345             let region = unsafe { buffer_create_info.cast::<cl_buffer_region>().as_ref() }
346                 .ok_or(CL_INVALID_VALUE)?;
347 
348             // CL_INVALID_BUFFER_SIZE if the size field of the cl_buffer_region structure passed in
349             // buffer_create_info is 0.
350             if region.size == 0 {
351                 return Err(CL_INVALID_BUFFER_SIZE);
352             }
353 
354             // CL_INVALID_VALUE if the region specified by the cl_buffer_region structure passed in
355             // buffer_create_info is out of bounds in buffer.
356             if region.origin >= b.size || region.size > b.size - region.origin {
357                 return Err(CL_INVALID_VALUE);
358             }
359 
360             (region.origin, region.size)
361         }
362         // CL_INVALID_VALUE if the value specified in buffer_create_type is not valid.
363         _ => return Err(CL_INVALID_VALUE),
364     };
365 
366     Ok(MemBase::new_sub_buffer(b, flags, offset, size).into_cl())
367 
368     // TODO
369     // CL_MISALIGNED_SUB_BUFFER_OFFSET if there are no devices in context associated with buffer for which the origin field of the cl_buffer_region structure passed in buffer_create_info is aligned to the CL_DEVICE_MEM_BASE_ADDR_ALIGN value.
370 }
371 
372 #[cl_entrypoint(clSetMemObjectDestructorCallback)]
set_mem_object_destructor_callback( memobj: cl_mem, pfn_notify: Option<FuncMemCB>, user_data: *mut ::std::os::raw::c_void, ) -> CLResult<()>373 fn set_mem_object_destructor_callback(
374     memobj: cl_mem,
375     pfn_notify: Option<FuncMemCB>,
376     user_data: *mut ::std::os::raw::c_void,
377 ) -> CLResult<()> {
378     let m = MemBase::ref_from_raw(memobj)?;
379 
380     // SAFETY: The requirements on `MemCB::new` match the requirements
381     // imposed by the OpenCL specification. It is the caller's duty to uphold them.
382     let cb = unsafe { MemCB::new(pfn_notify, user_data)? };
383 
384     m.cbs.lock().unwrap().push(cb);
385     Ok(())
386 }
387 
validate_image_format<'a>( image_format: *const cl_image_format, ) -> CLResult<(&'a cl_image_format, u8)>388 fn validate_image_format<'a>(
389     image_format: *const cl_image_format,
390 ) -> CLResult<(&'a cl_image_format, u8)> {
391     // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR ... if image_format is NULL.
392     let format = unsafe { image_format.as_ref() }.ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
393     let pixel_size = format
394         .pixel_size()
395         .ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
396 
397     // special validation
398     let valid_combination = match format.image_channel_data_type {
399         CL_UNORM_SHORT_565 | CL_UNORM_SHORT_555 | CL_UNORM_INT_101010 => {
400             [CL_RGB, CL_RGBx].contains(&format.image_channel_order)
401         }
402         CL_UNORM_INT_101010_2 => format.image_channel_order == CL_RGBA,
403         _ => true,
404     };
405     if !valid_combination {
406         return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
407     }
408 
409     Ok((format, pixel_size))
410 }
411 
validate_image_desc( image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, elem_size: usize, devs: &[&Device], ) -> CLResult<(cl_image_desc, Option<Mem>)>412 fn validate_image_desc(
413     image_desc: *const cl_image_desc,
414     host_ptr: *mut ::std::os::raw::c_void,
415     elem_size: usize,
416     devs: &[&Device],
417 ) -> CLResult<(cl_image_desc, Option<Mem>)> {
418     // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
419     const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
420 
421     // CL_INVALID_IMAGE_DESCRIPTOR ... if image_desc is NULL.
422     let mut desc = *unsafe { image_desc.as_ref() }.ok_or(err)?;
423 
424     // image_type describes the image type and must be either CL_MEM_OBJECT_IMAGE1D,
425     // CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_MEM_OBJECT_IMAGE1D_ARRAY, CL_MEM_OBJECT_IMAGE2D,
426     // CL_MEM_OBJECT_IMAGE2D_ARRAY, or CL_MEM_OBJECT_IMAGE3D.
427     if !CL_IMAGE_TYPES.contains(&desc.image_type) {
428         return Err(err);
429     }
430 
431     let (dims, array) = desc.type_info();
432 
433     // image_width is the width of the image in pixels. For a 2D image and image array, the image
434     // width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH. For a 3D image, the image width
435     // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_WIDTH. For a 1D image buffer, the image width
436     // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE_MAX_BUFFER_SIZE. For a 1D image and 1D image array,
437     // the image width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH.
438     //
439     // image_height is the height of the image in pixels. This is only used if the image is a 2D or
440     // 3D image, or a 2D image array. For a 2D image or image array, the image height must be a
441     // value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_HEIGHT. For a 3D image, the image height must be a
442     // value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_HEIGHT.
443     //
444     // image_depth is the depth of the image in pixels. This is only used if the image is a 3D image
445     // and must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_DEPTH.
446     if desc.image_width < 1
447         || desc.image_height < 1 && dims >= 2
448         || desc.image_depth < 1 && dims >= 3
449         || desc.image_array_size < 1 && array
450     {
451         return Err(err);
452     }
453 
454     let max_size = if dims == 3 {
455         devs.iter().map(|d| d.image_3d_size()).min()
456     } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
457         devs.iter().map(|d| d.image_buffer_max_size_pixels()).min()
458     } else {
459         devs.iter().map(|d| d.caps.image_2d_size as usize).min()
460     }
461     .unwrap();
462     let max_array = devs.iter().map(|d| d.image_array_size()).min().unwrap();
463 
464     // CL_INVALID_IMAGE_SIZE if image dimensions specified in image_desc exceed the maximum image
465     // dimensions described in the Device Queries table for all devices in context.
466     if desc.image_width > max_size
467         || desc.image_height > max_size && dims >= 2
468         || desc.image_depth > max_size && dims >= 3
469         || desc.image_array_size > max_array && array
470     {
471         return Err(CL_INVALID_IMAGE_SIZE);
472     }
473 
474     // num_mip_levels and num_samples must be 0.
475     if desc.num_mip_levels != 0 || desc.num_samples != 0 {
476         return Err(err);
477     }
478 
479     // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
480     // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D.
481     // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
482     // be NULL.
483     //
484     // TODO: cl_khr_image2d_from_buffer is an optional feature
485     let p = unsafe { &desc.anon_1.mem_object };
486     let parent = if !p.is_null() {
487         let p = MemBase::arc_from_raw(*p)?;
488         if !match desc.image_type {
489             CL_MEM_OBJECT_IMAGE1D_BUFFER => p.is_buffer(),
490             CL_MEM_OBJECT_IMAGE2D => {
491                 (p.is_buffer() && devs.iter().any(|d| d.image2d_from_buffer_supported()))
492                     || p.mem_type == CL_MEM_OBJECT_IMAGE2D
493             }
494             _ => false,
495         } {
496             return Err(CL_INVALID_OPERATION);
497         }
498         Some(p)
499     } else {
500         None
501     };
502 
503     // image_row_pitch is the scan-line pitch in bytes. This must be 0 if host_ptr is NULL and can
504     // be either 0 or ≥ image_width × size of element in bytes if host_ptr is not NULL. If host_ptr
505     // is not NULL and image_row_pitch = 0, image_row_pitch is calculated as image_width × size of
506     // element in bytes. If image_row_pitch is not 0, it must be a multiple of the image element
507     // size in bytes. For a 2D image created from a buffer, the pitch specified (or computed if
508     // pitch specified is 0) must be a multiple of the maximum of the
509     // CL_DEVICE_IMAGE_PITCH_ALIGNMENT value for all devices in the context associated with the
510     // buffer specified by mem_object that support images.
511     //
512     // image_slice_pitch is the size in bytes of each 2D slice in the 3D image or the size in bytes
513     // of each image in a 1D or 2D image array. This must be 0 if host_ptr is NULL. If host_ptr is
514     // not NULL, image_slice_pitch can be either 0 or ≥ image_row_pitch × image_height for a 2D
515     // image array or 3D image and can be either 0 or ≥ image_row_pitch for a 1D image array. If
516     // host_ptr is not NULL and image_slice_pitch = 0, image_slice_pitch is calculated as
517     // image_row_pitch × image_height for a 2D image array or 3D image and image_row_pitch for a 1D
518     // image array. If image_slice_pitch is not 0, it must be a multiple of the image_row_pitch.
519     let has_buf_parent = parent.as_ref().map_or(false, |p| p.is_buffer());
520     if host_ptr.is_null() {
521         if (desc.image_row_pitch != 0 || desc.image_slice_pitch != 0) && !has_buf_parent {
522             return Err(err);
523         }
524 
525         if desc.image_row_pitch == 0 {
526             desc.image_row_pitch = desc.image_width * elem_size;
527         }
528         if desc.image_slice_pitch == 0 {
529             desc.image_slice_pitch = desc.image_row_pitch * cmp::max(1, desc.image_height);
530         }
531 
532         if has_buf_parent && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
533             let pitch_alignment = devs
534                 .iter()
535                 .map(|d| d.image_pitch_alignment())
536                 .max()
537                 .unwrap() as usize;
538             if desc.image_row_pitch % (pitch_alignment * elem_size) != 0 {
539                 return Err(err);
540             }
541         }
542     } else {
543         if desc.image_row_pitch == 0 {
544             desc.image_row_pitch = desc.image_width * elem_size;
545         } else if desc.image_row_pitch % elem_size != 0 {
546             return Err(err);
547         }
548 
549         if dims == 3 || array {
550             let valid_slice_pitch = desc.image_row_pitch * cmp::max(1, desc.image_height);
551             if desc.image_slice_pitch == 0 {
552                 desc.image_slice_pitch = valid_slice_pitch;
553             } else if desc.image_slice_pitch < valid_slice_pitch
554                 || desc.image_slice_pitch % desc.image_row_pitch != 0
555             {
556                 return Err(err);
557             }
558         }
559     }
560 
561     Ok((desc, parent))
562 }
563 
validate_image_bounds(i: &Image, origin: CLVec<usize>, region: CLVec<usize>) -> CLResult<()>564 fn validate_image_bounds(i: &Image, origin: CLVec<usize>, region: CLVec<usize>) -> CLResult<()> {
565     let dims = i.image_desc.dims_with_array();
566     let bound = region + origin;
567     if bound > i.image_desc.size() {
568         return Err(CL_INVALID_VALUE);
569     }
570 
571     // If image is a 2D image object, origin[2] must be 0. If image is a 1D image or 1D image buffer
572     // object, origin[1] and origin[2] must be 0. If image is a 1D image array object, origin[2]
573     // must be 0.
574     if dims < 3 && origin[2] != 0 || dims < 2 && origin[1] != 0 {
575         return Err(CL_INVALID_VALUE);
576     }
577 
578     // If image is a 2D image object, region[2] must be 1. If image is a 1D image or 1D image buffer
579     // object, region[1] and region[2] must be 1. If image is a 1D image array object, region[2]
580     // must be 1. The values in region cannot be 0.
581     if dims < 3 && region[2] != 1 || dims < 2 && region[1] != 1 || region.contains(&0) {
582         return Err(CL_INVALID_VALUE);
583     }
584 
585     Ok(())
586 }
587 
desc_eq_no_buffer(a: &cl_image_desc, b: &cl_image_desc) -> bool588 fn desc_eq_no_buffer(a: &cl_image_desc, b: &cl_image_desc) -> bool {
589     a.image_type == b.image_type
590         && a.image_width == b.image_width
591         && a.image_height == b.image_height
592         && a.image_depth == b.image_depth
593         && a.image_array_size == b.image_array_size
594         && a.image_row_pitch == b.image_row_pitch
595         && a.image_slice_pitch == b.image_slice_pitch
596         && a.num_mip_levels == b.num_mip_levels
597         && a.num_samples == b.num_samples
598 }
599 
validate_buffer( desc: &cl_image_desc, mut flags: cl_mem_flags, format: &cl_image_format, host_ptr: *mut ::std::os::raw::c_void, elem_size: usize, ) -> CLResult<cl_mem_flags>600 fn validate_buffer(
601     desc: &cl_image_desc,
602     mut flags: cl_mem_flags,
603     format: &cl_image_format,
604     host_ptr: *mut ::std::os::raw::c_void,
605     elem_size: usize,
606 ) -> CLResult<cl_mem_flags> {
607     // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
608     const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
609     let mem_object = unsafe { desc.anon_1.mem_object };
610 
611     // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
612     // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D
613     // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
614     // be NULL. The image pixels are taken from the memory objects data store. When the contents of
615     // the specified memory objects data store are modified, those changes are reflected in the
616     // contents of the image object and vice-versa at corresponding synchronization points.
617     if !mem_object.is_null() {
618         let mem = MemBase::ref_from_raw(mem_object)?;
619 
620         match mem.mem_type {
621             CL_MEM_OBJECT_BUFFER => {
622                 match desc.image_type {
623                     // For a 1D image buffer created from a buffer object, the image_width × size of
624                     // element in bytes must be ≤ size of the buffer object.
625                     CL_MEM_OBJECT_IMAGE1D_BUFFER => {
626                         if desc.image_width * elem_size > mem.size {
627                             return Err(err);
628                         }
629                     }
630                     // For a 2D image created from a buffer object, the image_row_pitch × image_height
631                     // must be ≤ size of the buffer object specified by mem_object.
632                     CL_MEM_OBJECT_IMAGE2D => {
633                         //TODO
634                         //• CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a buffer and the row pitch and base address alignment does not follow the rules described for creating a 2D image from a buffer.
635                         if desc.image_row_pitch * desc.image_height > mem.size {
636                             return Err(err);
637                         }
638 
639                         // If the buffer object specified by mem_object was created with
640                         // CL_MEM_USE_HOST_PTR, the host_ptr specified to clCreateBuffer or
641                         // clCreateBufferWithProperties must be aligned to the maximum of the
642                         // CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT value for all devices in the
643                         // context associated with the buffer specified by mem_object that support
644                         // images.
645                         if mem.flags & CL_MEM_USE_HOST_PTR as cl_mem_flags != 0 {
646                             for dev in &mem.context.devs {
647                                 // CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT is only relevant for 2D
648                                 // images created from a buffer object.
649                                 let addr_alignment = dev.image_base_address_alignment();
650                                 if addr_alignment == 0 {
651                                     return Err(CL_INVALID_OPERATION);
652                                 } else if !is_alligned(host_ptr, addr_alignment as usize) {
653                                     return Err(err);
654                                 }
655                             }
656                         }
657                     }
658                     _ => return Err(err),
659                 }
660             }
661             // For an image object created from another image object, the values specified in the
662             // image descriptor except for mem_object must match the image descriptor information
663             // associated with mem_object.
664             CL_MEM_OBJECT_IMAGE2D => {
665                 let image = Image::ref_from_raw(mem_object).unwrap();
666                 if desc.image_type != mem.mem_type || !desc_eq_no_buffer(desc, &image.image_desc) {
667                     return Err(err);
668                 }
669 
670                 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a 2D image object
671                 // and the rules described above are not followed.
672 
673                 // Creating a 2D image object from another 2D image object creates a new 2D image
674                 // object that shares the image data store with mem_object but views the pixels in the
675                 //  image with a different image channel order. Restrictions are:
676                 //
677                 // The image channel data type specified in image_format must match the image channel
678                 // data type associated with mem_object.
679                 if format.image_channel_data_type != image.image_format.image_channel_data_type {
680                     return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
681                 }
682 
683                 // The image channel order specified in image_format must be compatible with the image
684                 // channel order associated with mem_object. Compatible image channel orders are:
685                 if format.image_channel_order != image.image_format.image_channel_order {
686                     // in image_format | in  mem_object:
687                     // CL_sBGRA | CL_BGRA
688                     // CL_BGRA  | CL_sBGRA
689                     // CL_sRGBA | CL_RGBA
690                     // CL_RGBA  | CL_sRGBA
691                     // CL_sRGB  | CL_RGB
692                     // CL_RGB   | CL_sRGB
693                     // CL_sRGBx | CL_RGBx
694                     // CL_RGBx  | CL_sRGBx
695                     // CL_DEPTH | CL_R
696                     match (
697                         format.image_channel_order,
698                         image.image_format.image_channel_order,
699                     ) {
700                         (CL_sBGRA, CL_BGRA)
701                         | (CL_BGRA, CL_sBGRA)
702                         | (CL_sRGBA, CL_RGBA)
703                         | (CL_RGBA, CL_sRGBA)
704                         | (CL_sRGB, CL_RGB)
705                         | (CL_RGB, CL_sRGB)
706                         | (CL_sRGBx, CL_RGBx)
707                         | (CL_RGBx, CL_sRGBx)
708                         | (CL_DEPTH, CL_R) => (),
709                         _ => return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR),
710                     }
711                 }
712             }
713             _ => return Err(err),
714         }
715 
716         validate_matching_buffer_flags(mem, flags)?;
717 
718         flags = inherit_mem_flags(flags, mem);
719     // implied by spec
720     } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
721         return Err(err);
722     }
723 
724     Ok(flags)
725 }
726 
727 #[cl_info_entrypoint(clGetImageInfo)]
728 impl CLInfo<cl_image_info> for cl_mem {
query(&self, q: cl_image_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>>729     fn query(&self, q: cl_image_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
730         let mem = Image::ref_from_raw(*self)?;
731         Ok(match *q {
732             CL_IMAGE_ARRAY_SIZE => cl_prop::<usize>(mem.image_desc.image_array_size),
733             CL_IMAGE_BUFFER => cl_prop::<cl_mem>(unsafe { mem.image_desc.anon_1.buffer }),
734             CL_IMAGE_DEPTH => cl_prop::<usize>(mem.image_desc.image_depth),
735             CL_IMAGE_ELEMENT_SIZE => cl_prop::<usize>(mem.image_elem_size.into()),
736             CL_IMAGE_FORMAT => cl_prop::<cl_image_format>(mem.image_format),
737             CL_IMAGE_HEIGHT => cl_prop::<usize>(mem.image_desc.image_height),
738             CL_IMAGE_NUM_MIP_LEVELS => cl_prop::<cl_uint>(mem.image_desc.num_mip_levels),
739             CL_IMAGE_NUM_SAMPLES => cl_prop::<cl_uint>(mem.image_desc.num_samples),
740             CL_IMAGE_ROW_PITCH => cl_prop::<usize>(mem.image_desc.image_row_pitch),
741             CL_IMAGE_SLICE_PITCH => cl_prop::<usize>(if mem.image_desc.dims() == 1 {
742                 0
743             } else {
744                 mem.image_desc.image_slice_pitch
745             }),
746             CL_IMAGE_WIDTH => cl_prop::<usize>(mem.image_desc.image_width),
747             _ => return Err(CL_INVALID_VALUE),
748         })
749     }
750 }
751 
752 #[cl_entrypoint(clCreateImageWithProperties)]
create_image_with_properties( context: cl_context, properties: *const cl_mem_properties, mut flags: cl_mem_flags, image_format: *const cl_image_format, image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>753 fn create_image_with_properties(
754     context: cl_context,
755     properties: *const cl_mem_properties,
756     mut flags: cl_mem_flags,
757     image_format: *const cl_image_format,
758     image_desc: *const cl_image_desc,
759     host_ptr: *mut ::std::os::raw::c_void,
760 ) -> CLResult<cl_mem> {
761     let c = Context::arc_from_raw(context)?;
762 
763     // CL_INVALID_OPERATION if there are no devices in context that support images (i.e.
764     // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
765     c.devs
766         .iter()
767         .find(|d| d.caps.has_images)
768         .ok_or(CL_INVALID_OPERATION)?;
769 
770     let (format, elem_size) = validate_image_format(image_format)?;
771     let (desc, parent) = validate_image_desc(image_desc, host_ptr, elem_size.into(), &c.devs)?;
772 
773     // validate host_ptr before merging flags
774     validate_host_ptr(host_ptr, flags)?;
775 
776     flags = validate_buffer(&desc, flags, format, host_ptr, elem_size.into())?;
777 
778     // For all image types except CL_MEM_OBJECT_IMAGE1D_BUFFER, if the value specified for flags is 0, the
779     // default is used which is CL_MEM_READ_WRITE.
780     if flags == 0 && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
781         flags = CL_MEM_READ_WRITE.into();
782     }
783 
784     validate_mem_flags(flags, false)?;
785 
786     let filtered_flags = filter_image_access_flags(flags);
787     // CL_IMAGE_FORMAT_NOT_SUPPORTED if there are no devices in context that support image_format.
788     c.devs
789         .iter()
790         .filter_map(|d| d.formats.get(format))
791         .filter_map(|f| f.get(&desc.image_type))
792         .find(|f| *f & filtered_flags == filtered_flags)
793         .ok_or(CL_IMAGE_FORMAT_NOT_SUPPORTED)?;
794 
795     let props = Properties::from_ptr_raw(properties);
796     // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
797     // the value specified for a supported property name is not valid, or if the same property name
798     // is specified more than once.
799     if props.len() > 1 {
800         // we don't support any properties besides the 0 property
801         return Err(CL_INVALID_PROPERTY);
802     }
803 
804     Ok(MemBase::new_image(
805         c,
806         parent,
807         desc.image_type,
808         flags,
809         format,
810         desc,
811         elem_size,
812         host_ptr,
813         props,
814     )?
815     .into_cl())
816 }
817 
818 #[cl_entrypoint(clCreateImage)]
create_image( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>819 fn create_image(
820     context: cl_context,
821     flags: cl_mem_flags,
822     image_format: *const cl_image_format,
823     image_desc: *const cl_image_desc,
824     host_ptr: *mut ::std::os::raw::c_void,
825 ) -> CLResult<cl_mem> {
826     create_image_with_properties(
827         context,
828         ptr::null(),
829         flags,
830         image_format,
831         image_desc,
832         host_ptr,
833     )
834 }
835 
836 #[cl_entrypoint(clCreateImage2D)]
create_image_2d( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_width: usize, image_height: usize, image_row_pitch: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>837 fn create_image_2d(
838     context: cl_context,
839     flags: cl_mem_flags,
840     image_format: *const cl_image_format,
841     image_width: usize,
842     image_height: usize,
843     image_row_pitch: usize,
844     host_ptr: *mut ::std::os::raw::c_void,
845 ) -> CLResult<cl_mem> {
846     let image_desc = cl_image_desc {
847         image_type: CL_MEM_OBJECT_IMAGE2D,
848         image_width: image_width,
849         image_height: image_height,
850         image_row_pitch: image_row_pitch,
851         ..Default::default()
852     };
853 
854     create_image(context, flags, image_format, &image_desc, host_ptr)
855 }
856 
857 #[cl_entrypoint(clCreateImage3D)]
create_image_3d( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_width: usize, image_height: usize, image_depth: usize, image_row_pitch: usize, image_slice_pitch: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>858 fn create_image_3d(
859     context: cl_context,
860     flags: cl_mem_flags,
861     image_format: *const cl_image_format,
862     image_width: usize,
863     image_height: usize,
864     image_depth: usize,
865     image_row_pitch: usize,
866     image_slice_pitch: usize,
867     host_ptr: *mut ::std::os::raw::c_void,
868 ) -> CLResult<cl_mem> {
869     let image_desc = cl_image_desc {
870         image_type: CL_MEM_OBJECT_IMAGE3D,
871         image_width: image_width,
872         image_height: image_height,
873         image_depth: image_depth,
874         image_row_pitch: image_row_pitch,
875         image_slice_pitch: image_slice_pitch,
876         ..Default::default()
877     };
878 
879     create_image(context, flags, image_format, &image_desc, host_ptr)
880 }
881 
882 #[cl_entrypoint(clGetSupportedImageFormats)]
get_supported_image_formats( context: cl_context, flags: cl_mem_flags, image_type: cl_mem_object_type, num_entries: cl_uint, image_formats: *mut cl_image_format, num_image_formats: *mut cl_uint, ) -> CLResult<()>883 fn get_supported_image_formats(
884     context: cl_context,
885     flags: cl_mem_flags,
886     image_type: cl_mem_object_type,
887     num_entries: cl_uint,
888     image_formats: *mut cl_image_format,
889     num_image_formats: *mut cl_uint,
890 ) -> CLResult<()> {
891     let c = Context::ref_from_raw(context)?;
892 
893     // CL_INVALID_VALUE if flags
894     validate_mem_flags(flags, true)?;
895 
896     // or image_type are not valid
897     if !image_type_valid(image_type) {
898         return Err(CL_INVALID_VALUE);
899     }
900 
901     // CL_INVALID_VALUE ... if num_entries is 0 and image_formats is not NULL.
902     if num_entries == 0 && !image_formats.is_null() {
903         return Err(CL_INVALID_VALUE);
904     }
905 
906     let mut res = Vec::<cl_image_format>::new();
907     let filtered_flags = filter_image_access_flags(flags);
908     for dev in &c.devs {
909         for f in &dev.formats {
910             let s = f.1.get(&image_type).unwrap_or(&0);
911 
912             if filtered_flags & s == filtered_flags {
913                 res.push(*f.0);
914             }
915         }
916     }
917 
918     res.sort();
919     res.dedup();
920 
921     num_image_formats.write_checked(res.len() as cl_uint);
922     unsafe { image_formats.copy_checked(res.as_ptr(), res.len()) };
923 
924     Ok(())
925 }
926 
927 #[cl_info_entrypoint(clGetSamplerInfo)]
928 impl CLInfo<cl_sampler_info> for cl_sampler {
query(&self, q: cl_sampler_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>>929     fn query(&self, q: cl_sampler_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
930         let sampler = Sampler::ref_from_raw(*self)?;
931         Ok(match q {
932             CL_SAMPLER_ADDRESSING_MODE => cl_prop::<cl_addressing_mode>(sampler.addressing_mode),
933             CL_SAMPLER_CONTEXT => {
934                 // Note we use as_ptr here which doesn't increase the reference count.
935                 let ptr = Arc::as_ptr(&sampler.context);
936                 cl_prop::<cl_context>(cl_context::from_ptr(ptr))
937             }
938             CL_SAMPLER_FILTER_MODE => cl_prop::<cl_filter_mode>(sampler.filter_mode),
939             CL_SAMPLER_NORMALIZED_COORDS => cl_prop::<bool>(sampler.normalized_coords),
940             CL_SAMPLER_REFERENCE_COUNT => cl_prop::<cl_uint>(Sampler::refcnt(*self)?),
941             CL_SAMPLER_PROPERTIES => {
942                 cl_prop::<&Option<Properties<cl_sampler_properties>>>(&sampler.props)
943             }
944             // CL_INVALID_VALUE if param_name is not one of the supported values
945             _ => return Err(CL_INVALID_VALUE),
946         })
947     }
948 }
949 
create_sampler_impl( context: cl_context, normalized_coords: cl_bool, addressing_mode: cl_addressing_mode, filter_mode: cl_filter_mode, props: Option<Properties<cl_sampler_properties>>, ) -> CLResult<cl_sampler>950 fn create_sampler_impl(
951     context: cl_context,
952     normalized_coords: cl_bool,
953     addressing_mode: cl_addressing_mode,
954     filter_mode: cl_filter_mode,
955     props: Option<Properties<cl_sampler_properties>>,
956 ) -> CLResult<cl_sampler> {
957     let c = Context::arc_from_raw(context)?;
958 
959     // CL_INVALID_OPERATION if images are not supported by any device associated with context (i.e.
960     // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
961     c.devs
962         .iter()
963         .find(|d| d.caps.has_images)
964         .ok_or(CL_INVALID_OPERATION)?;
965 
966     // CL_INVALID_VALUE if addressing_mode, filter_mode, normalized_coords or a combination of these
967     // arguements are not valid.
968     validate_addressing_mode(addressing_mode)?;
969     validate_filter_mode(filter_mode)?;
970 
971     let sampler = Sampler::new(
972         c,
973         check_cl_bool(normalized_coords).ok_or(CL_INVALID_VALUE)?,
974         addressing_mode,
975         filter_mode,
976         props,
977     );
978     Ok(sampler.into_cl())
979 }
980 
981 #[cl_entrypoint(clCreateSampler)]
create_sampler( context: cl_context, normalized_coords: cl_bool, addressing_mode: cl_addressing_mode, filter_mode: cl_filter_mode, ) -> CLResult<cl_sampler>982 fn create_sampler(
983     context: cl_context,
984     normalized_coords: cl_bool,
985     addressing_mode: cl_addressing_mode,
986     filter_mode: cl_filter_mode,
987 ) -> CLResult<cl_sampler> {
988     create_sampler_impl(
989         context,
990         normalized_coords,
991         addressing_mode,
992         filter_mode,
993         None,
994     )
995 }
996 
997 #[cl_entrypoint(clCreateSamplerWithProperties)]
create_sampler_with_properties( context: cl_context, sampler_properties: *const cl_sampler_properties, ) -> CLResult<cl_sampler>998 fn create_sampler_with_properties(
999     context: cl_context,
1000     sampler_properties: *const cl_sampler_properties,
1001 ) -> CLResult<cl_sampler> {
1002     let mut normalized_coords = CL_TRUE;
1003     let mut addressing_mode = CL_ADDRESS_CLAMP;
1004     let mut filter_mode = CL_FILTER_NEAREST;
1005 
1006     // CL_INVALID_VALUE if the same property name is specified more than once.
1007     let sampler_properties = if sampler_properties.is_null() {
1008         None
1009     } else {
1010         let sampler_properties =
1011             Properties::from_ptr(sampler_properties).ok_or(CL_INVALID_VALUE)?;
1012         for p in &sampler_properties.props {
1013             match p.0 as u32 {
1014                 CL_SAMPLER_ADDRESSING_MODE => addressing_mode = p.1 as u32,
1015                 CL_SAMPLER_FILTER_MODE => filter_mode = p.1 as u32,
1016                 CL_SAMPLER_NORMALIZED_COORDS => normalized_coords = p.1 as u32,
1017                 // CL_INVALID_VALUE if the property name in sampler_properties is not a supported
1018                 // property name
1019                 _ => return Err(CL_INVALID_VALUE),
1020             }
1021         }
1022         Some(sampler_properties)
1023     };
1024 
1025     create_sampler_impl(
1026         context,
1027         normalized_coords,
1028         addressing_mode,
1029         filter_mode,
1030         sampler_properties,
1031     )
1032 }
1033 
1034 #[cl_entrypoint(clRetainSampler)]
retain_sampler(sampler: cl_sampler) -> CLResult<()>1035 fn retain_sampler(sampler: cl_sampler) -> CLResult<()> {
1036     Sampler::retain(sampler)
1037 }
1038 
1039 #[cl_entrypoint(clReleaseSampler)]
release_sampler(sampler: cl_sampler) -> CLResult<()>1040 fn release_sampler(sampler: cl_sampler) -> CLResult<()> {
1041     Sampler::release(sampler)
1042 }
1043 
1044 #[cl_entrypoint(clEnqueueReadBuffer)]
enqueue_read_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_read: cl_bool, offset: usize, cb: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1045 fn enqueue_read_buffer(
1046     command_queue: cl_command_queue,
1047     buffer: cl_mem,
1048     blocking_read: cl_bool,
1049     offset: usize,
1050     cb: usize,
1051     ptr: *mut ::std::os::raw::c_void,
1052     num_events_in_wait_list: cl_uint,
1053     event_wait_list: *const cl_event,
1054     event: *mut cl_event,
1055 ) -> CLResult<()> {
1056     let q = Queue::arc_from_raw(command_queue)?;
1057     let b = Buffer::arc_from_raw(buffer)?;
1058     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1059     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1060 
1061     // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1062     // bounds or if ptr is a NULL value.
1063     if offset + cb > b.size || ptr.is_null() {
1064         return Err(CL_INVALID_VALUE);
1065     }
1066 
1067     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1068     if b.context != q.context {
1069         return Err(CL_INVALID_CONTEXT);
1070     }
1071 
1072     // CL_INVALID_OPERATION if clEnqueueReadBuffer is called on buffer which has been created with
1073     // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1074     if bit_check(b.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1075         return Err(CL_INVALID_OPERATION);
1076     }
1077 
1078     // SAFETY: it's required that applications do not cause data races
1079     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1080     create_and_queue(
1081         q,
1082         CL_COMMAND_READ_BUFFER,
1083         evs,
1084         event,
1085         block,
1086         Box::new(move |q, ctx| b.read(q, ctx, offset, ptr, cb)),
1087     )
1088 
1089     // TODO
1090     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1091 }
1092 
1093 #[cl_entrypoint(clEnqueueWriteBuffer)]
enqueue_write_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_write: cl_bool, offset: usize, cb: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1094 fn enqueue_write_buffer(
1095     command_queue: cl_command_queue,
1096     buffer: cl_mem,
1097     blocking_write: cl_bool,
1098     offset: usize,
1099     cb: usize,
1100     ptr: *const ::std::os::raw::c_void,
1101     num_events_in_wait_list: cl_uint,
1102     event_wait_list: *const cl_event,
1103     event: *mut cl_event,
1104 ) -> CLResult<()> {
1105     let q = Queue::arc_from_raw(command_queue)?;
1106     let b = Buffer::arc_from_raw(buffer)?;
1107     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1108     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1109 
1110     // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1111     // bounds or if ptr is a NULL value.
1112     if offset + cb > b.size || ptr.is_null() {
1113         return Err(CL_INVALID_VALUE);
1114     }
1115 
1116     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1117     if b.context != q.context {
1118         return Err(CL_INVALID_CONTEXT);
1119     }
1120 
1121     // CL_INVALID_OPERATION if clEnqueueWriteBuffer is called on buffer which has been created with
1122     // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1123     if bit_check(b.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1124         return Err(CL_INVALID_OPERATION);
1125     }
1126 
1127     // SAFETY: it's required that applications do not cause data races
1128     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1129     create_and_queue(
1130         q,
1131         CL_COMMAND_WRITE_BUFFER,
1132         evs,
1133         event,
1134         block,
1135         Box::new(move |q, ctx| b.write(q, ctx, offset, ptr, cb)),
1136     )
1137 
1138     // TODO
1139     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1140 }
1141 
1142 #[cl_entrypoint(clEnqueueCopyBuffer)]
enqueue_copy_buffer( command_queue: cl_command_queue, src_buffer: cl_mem, dst_buffer: cl_mem, src_offset: usize, dst_offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1143 fn enqueue_copy_buffer(
1144     command_queue: cl_command_queue,
1145     src_buffer: cl_mem,
1146     dst_buffer: cl_mem,
1147     src_offset: usize,
1148     dst_offset: usize,
1149     size: usize,
1150     num_events_in_wait_list: cl_uint,
1151     event_wait_list: *const cl_event,
1152     event: *mut cl_event,
1153 ) -> CLResult<()> {
1154     let q = Queue::arc_from_raw(command_queue)?;
1155     let src = Buffer::arc_from_raw(src_buffer)?;
1156     let dst = Buffer::arc_from_raw(dst_buffer)?;
1157     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1158 
1159     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1160     // are not the same
1161     if q.context != src.context || q.context != dst.context {
1162         return Err(CL_INVALID_CONTEXT);
1163     }
1164 
1165     // CL_INVALID_VALUE if src_offset, dst_offset, size, src_offset + size or dst_offset + size
1166     // require accessing elements outside the src_buffer and dst_buffer buffer objects respectively.
1167     if src_offset + size > src.size || dst_offset + size > dst.size {
1168         return Err(CL_INVALID_VALUE);
1169     }
1170 
1171     // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object
1172     // and the source and destination regions overlap or if src_buffer and dst_buffer are different
1173     // sub-buffers of the same associated buffer object and they overlap. The regions overlap if
1174     // src_offset ≤ dst_offset ≤ src_offset + size - 1 or if dst_offset ≤ src_offset ≤ dst_offset + size - 1.
1175     if src.has_same_parent(&dst) {
1176         let src_offset = src_offset + src.offset;
1177         let dst_offset = dst_offset + dst.offset;
1178 
1179         if (src_offset <= dst_offset && dst_offset < src_offset + size)
1180             || (dst_offset <= src_offset && src_offset < dst_offset + size)
1181         {
1182             return Err(CL_MEM_COPY_OVERLAP);
1183         }
1184     }
1185 
1186     create_and_queue(
1187         q,
1188         CL_COMMAND_COPY_BUFFER,
1189         evs,
1190         event,
1191         false,
1192         Box::new(move |q, ctx| src.copy_to_buffer(q, ctx, &dst, src_offset, dst_offset, size)),
1193     )
1194 
1195     // TODO
1196     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1197     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1198     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_buffer.
1199 }
1200 
1201 #[cl_entrypoint(clEnqueueReadBufferRect)]
enqueue_read_buffer_rect( command_queue: cl_command_queue, buffer: cl_mem, blocking_read: cl_bool, buffer_origin: *const usize, host_origin: *const usize, region: *const usize, mut buffer_row_pitch: usize, mut buffer_slice_pitch: usize, mut host_row_pitch: usize, mut host_slice_pitch: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1202 fn enqueue_read_buffer_rect(
1203     command_queue: cl_command_queue,
1204     buffer: cl_mem,
1205     blocking_read: cl_bool,
1206     buffer_origin: *const usize,
1207     host_origin: *const usize,
1208     region: *const usize,
1209     mut buffer_row_pitch: usize,
1210     mut buffer_slice_pitch: usize,
1211     mut host_row_pitch: usize,
1212     mut host_slice_pitch: usize,
1213     ptr: *mut ::std::os::raw::c_void,
1214     num_events_in_wait_list: cl_uint,
1215     event_wait_list: *const cl_event,
1216     event: *mut cl_event,
1217 ) -> CLResult<()> {
1218     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1219     let q = Queue::arc_from_raw(command_queue)?;
1220     let buf = Buffer::arc_from_raw(buffer)?;
1221     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1222 
1223     // CL_INVALID_OPERATION if clEnqueueReadBufferRect is called on buffer which has been created
1224     // with CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1225     if bit_check(buf.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1226         return Err(CL_INVALID_OPERATION);
1227     }
1228 
1229     // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1230     if buffer_origin.is_null() ||
1231       host_origin.is_null() ||
1232       region.is_null() ||
1233       // CL_INVALID_VALUE if ptr is NULL.
1234       ptr.is_null()
1235     {
1236         return Err(CL_INVALID_VALUE);
1237     }
1238 
1239     let r = unsafe { CLVec::from_raw(region) };
1240     let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1241     let host_ori = unsafe { CLVec::from_raw(host_origin) };
1242 
1243     // CL_INVALID_VALUE if any region array element is 0.
1244     if r.contains(&0) ||
1245       // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1246       buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1247       // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1248       host_row_pitch != 0 && host_row_pitch < r[0]
1249     {
1250         return Err(CL_INVALID_VALUE);
1251     }
1252 
1253     // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1254     if buffer_row_pitch == 0 {
1255         buffer_row_pitch = r[0];
1256     }
1257 
1258     // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1259     if host_row_pitch == 0 {
1260         host_row_pitch = r[0];
1261     }
1262 
1263     // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1264     if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1265       // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1266       host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1267     {
1268         return Err(CL_INVALID_VALUE);
1269     }
1270 
1271     // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1272     if buffer_slice_pitch == 0 {
1273         buffer_slice_pitch = r[1] * buffer_row_pitch;
1274     }
1275 
1276     // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1277     if host_slice_pitch == 0 {
1278         host_slice_pitch = r[1] * host_row_pitch
1279     }
1280 
1281     // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1282     // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1283     if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1284         return Err(CL_INVALID_VALUE);
1285     }
1286 
1287     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1288     if q.context != buf.context {
1289         return Err(CL_INVALID_CONTEXT);
1290     }
1291 
1292     // SAFETY: it's required that applications do not cause data races
1293     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1294     create_and_queue(
1295         q,
1296         CL_COMMAND_READ_BUFFER_RECT,
1297         evs,
1298         event,
1299         block,
1300         Box::new(move |q, ctx| {
1301             buf.read_rect(
1302                 ptr,
1303                 q,
1304                 ctx,
1305                 &r,
1306                 &buf_ori,
1307                 buffer_row_pitch,
1308                 buffer_slice_pitch,
1309                 &host_ori,
1310                 host_row_pitch,
1311                 host_slice_pitch,
1312             )
1313         }),
1314     )
1315 
1316     // TODO
1317     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1318 }
1319 
1320 #[cl_entrypoint(clEnqueueWriteBufferRect)]
enqueue_write_buffer_rect( command_queue: cl_command_queue, buffer: cl_mem, blocking_write: cl_bool, buffer_origin: *const usize, host_origin: *const usize, region: *const usize, mut buffer_row_pitch: usize, mut buffer_slice_pitch: usize, mut host_row_pitch: usize, mut host_slice_pitch: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1321 fn enqueue_write_buffer_rect(
1322     command_queue: cl_command_queue,
1323     buffer: cl_mem,
1324     blocking_write: cl_bool,
1325     buffer_origin: *const usize,
1326     host_origin: *const usize,
1327     region: *const usize,
1328     mut buffer_row_pitch: usize,
1329     mut buffer_slice_pitch: usize,
1330     mut host_row_pitch: usize,
1331     mut host_slice_pitch: usize,
1332     ptr: *const ::std::os::raw::c_void,
1333     num_events_in_wait_list: cl_uint,
1334     event_wait_list: *const cl_event,
1335     event: *mut cl_event,
1336 ) -> CLResult<()> {
1337     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1338     let q = Queue::arc_from_raw(command_queue)?;
1339     let buf = Buffer::arc_from_raw(buffer)?;
1340     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1341 
1342     // CL_INVALID_OPERATION if clEnqueueWriteBufferRect is called on buffer which has been created
1343     // with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1344     if bit_check(buf.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1345         return Err(CL_INVALID_OPERATION);
1346     }
1347 
1348     // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1349     if buffer_origin.is_null() ||
1350       host_origin.is_null() ||
1351       region.is_null() ||
1352       // CL_INVALID_VALUE if ptr is NULL.
1353       ptr.is_null()
1354     {
1355         return Err(CL_INVALID_VALUE);
1356     }
1357 
1358     let r = unsafe { CLVec::from_raw(region) };
1359     let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1360     let host_ori = unsafe { CLVec::from_raw(host_origin) };
1361 
1362     // CL_INVALID_VALUE if any region array element is 0.
1363     if r.contains(&0) ||
1364       // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1365       buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1366       // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1367       host_row_pitch != 0 && host_row_pitch < r[0]
1368     {
1369         return Err(CL_INVALID_VALUE);
1370     }
1371 
1372     // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1373     if buffer_row_pitch == 0 {
1374         buffer_row_pitch = r[0];
1375     }
1376 
1377     // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1378     if host_row_pitch == 0 {
1379         host_row_pitch = r[0];
1380     }
1381 
1382     // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1383     if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1384       // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1385       host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1386     {
1387         return Err(CL_INVALID_VALUE);
1388     }
1389 
1390     // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1391     if buffer_slice_pitch == 0 {
1392         buffer_slice_pitch = r[1] * buffer_row_pitch;
1393     }
1394 
1395     // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1396     if host_slice_pitch == 0 {
1397         host_slice_pitch = r[1] * host_row_pitch
1398     }
1399 
1400     // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1401     // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1402     if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1403         return Err(CL_INVALID_VALUE);
1404     }
1405 
1406     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1407     if q.context != buf.context {
1408         return Err(CL_INVALID_CONTEXT);
1409     }
1410 
1411     // SAFETY: it's required that applications do not cause data races
1412     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1413     create_and_queue(
1414         q,
1415         CL_COMMAND_WRITE_BUFFER_RECT,
1416         evs,
1417         event,
1418         block,
1419         Box::new(move |q, ctx| {
1420             buf.write_rect(
1421                 ptr,
1422                 q,
1423                 ctx,
1424                 &r,
1425                 &host_ori,
1426                 host_row_pitch,
1427                 host_slice_pitch,
1428                 &buf_ori,
1429                 buffer_row_pitch,
1430                 buffer_slice_pitch,
1431             )
1432         }),
1433     )
1434 
1435     // TODO
1436     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1437 }
1438 
1439 #[cl_entrypoint(clEnqueueCopyBufferRect)]
enqueue_copy_buffer_rect( command_queue: cl_command_queue, src_buffer: cl_mem, dst_buffer: cl_mem, src_origin: *const usize, dst_origin: *const usize, region: *const usize, mut src_row_pitch: usize, mut src_slice_pitch: usize, mut dst_row_pitch: usize, mut dst_slice_pitch: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1440 fn enqueue_copy_buffer_rect(
1441     command_queue: cl_command_queue,
1442     src_buffer: cl_mem,
1443     dst_buffer: cl_mem,
1444     src_origin: *const usize,
1445     dst_origin: *const usize,
1446     region: *const usize,
1447     mut src_row_pitch: usize,
1448     mut src_slice_pitch: usize,
1449     mut dst_row_pitch: usize,
1450     mut dst_slice_pitch: usize,
1451     num_events_in_wait_list: cl_uint,
1452     event_wait_list: *const cl_event,
1453     event: *mut cl_event,
1454 ) -> CLResult<()> {
1455     let q = Queue::arc_from_raw(command_queue)?;
1456     let src = Buffer::arc_from_raw(src_buffer)?;
1457     let dst = Buffer::arc_from_raw(dst_buffer)?;
1458     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1459 
1460     // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1461     if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1462         return Err(CL_INVALID_VALUE);
1463     }
1464 
1465     let r = unsafe { CLVec::from_raw(region) };
1466     let src_ori = unsafe { CLVec::from_raw(src_origin) };
1467     let dst_ori = unsafe { CLVec::from_raw(dst_origin) };
1468 
1469     // CL_INVALID_VALUE if any region array element is 0.
1470     if r.contains(&0) ||
1471       // CL_INVALID_VALUE if src_row_pitch is not 0 and is less than region[0].
1472       src_row_pitch != 0 && src_row_pitch < r[0] ||
1473       // CL_INVALID_VALUE if dst_row_pitch is not 0 and is less than region[0].
1474       dst_row_pitch != 0 && dst_row_pitch < r[0]
1475     {
1476         return Err(CL_INVALID_VALUE);
1477     }
1478 
1479     // If src_row_pitch is 0, src_row_pitch is computed as region[0].
1480     if src_row_pitch == 0 {
1481         src_row_pitch = r[0];
1482     }
1483 
1484     // If dst_row_pitch is 0, dst_row_pitch is computed as region[0].
1485     if dst_row_pitch == 0 {
1486         dst_row_pitch = r[0];
1487     }
1488 
1489     // CL_INVALID_VALUE if src_slice_pitch is not 0 and is less than region[1] × src_row_pitch
1490     if src_slice_pitch != 0 && src_slice_pitch < r[1] * src_row_pitch ||
1491       // CL_INVALID_VALUE if dst_slice_pitch is not 0 and is less than region[1] × dst_row_pitch
1492       dst_slice_pitch != 0 && dst_slice_pitch < r[1] * dst_row_pitch ||
1493       // if src_slice_pitch is not 0 and is not a multiple of src_row_pitch.
1494       src_slice_pitch != 0 && src_slice_pitch % src_row_pitch != 0 ||
1495       // if dst_slice_pitch is not 0 and is not a multiple of dst_row_pitch.
1496       dst_slice_pitch != 0 && dst_slice_pitch % dst_row_pitch != 0
1497     {
1498         return Err(CL_INVALID_VALUE);
1499     }
1500 
1501     // If src_slice_pitch is 0, src_slice_pitch is computed as region[1] × src_row_pitch.
1502     if src_slice_pitch == 0 {
1503         src_slice_pitch = r[1] * src_row_pitch;
1504     }
1505 
1506     // If dst_slice_pitch is 0, dst_slice_pitch is computed as region[1] × dst_row_pitch.
1507     if dst_slice_pitch == 0 {
1508         dst_slice_pitch = r[1] * dst_row_pitch;
1509     }
1510 
1511     // CL_INVALID_VALUE if src_buffer and dst_buffer are the same buffer object and src_slice_pitch
1512     // is not equal to dst_slice_pitch and src_row_pitch is not equal to dst_row_pitch.
1513     if src_buffer == dst_buffer
1514         && src_slice_pitch != dst_slice_pitch
1515         && src_row_pitch != dst_row_pitch
1516     {
1517         return Err(CL_INVALID_VALUE);
1518     }
1519 
1520     // CL_INVALID_VALUE if (src_origin, region, src_row_pitch, src_slice_pitch) or (dst_origin,
1521     // region, dst_row_pitch, dst_slice_pitch) require accessing elements outside the src_buffer
1522     // and dst_buffer buffer objects respectively.
1523     if CLVec::calc_size(r + src_ori, [1, src_row_pitch, src_slice_pitch]) > src.size
1524         || CLVec::calc_size(r + dst_ori, [1, dst_row_pitch, dst_slice_pitch]) > dst.size
1525     {
1526         return Err(CL_INVALID_VALUE);
1527     }
1528 
1529     // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object and
1530     // the source and destination regions overlap or if src_buffer and dst_buffer are different
1531     // sub-buffers of the same associated buffer object and they overlap.
1532     if src.has_same_parent(&dst)
1533         && check_copy_overlap(
1534             &src_ori,
1535             src.offset,
1536             &dst_ori,
1537             dst.offset,
1538             &r,
1539             src_row_pitch,
1540             src_slice_pitch,
1541         )
1542     {
1543         return Err(CL_MEM_COPY_OVERLAP);
1544     }
1545 
1546     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1547     // are not the same
1548     if src.context != q.context || dst.context != q.context {
1549         return Err(CL_INVALID_CONTEXT);
1550     }
1551 
1552     create_and_queue(
1553         q,
1554         CL_COMMAND_COPY_BUFFER_RECT,
1555         evs,
1556         event,
1557         false,
1558         Box::new(move |q, ctx| {
1559             src.copy_rect(
1560                 &dst,
1561                 q,
1562                 ctx,
1563                 &r,
1564                 &src_ori,
1565                 src_row_pitch,
1566                 src_slice_pitch,
1567                 &dst_ori,
1568                 dst_row_pitch,
1569                 dst_slice_pitch,
1570             )
1571         }),
1572     )
1573 
1574     // TODO
1575     // CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1576 }
1577 
1578 #[cl_entrypoint(clEnqueueFillBuffer)]
enqueue_fill_buffer( command_queue: cl_command_queue, buffer: cl_mem, pattern: *const ::std::os::raw::c_void, pattern_size: usize, offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1579 fn enqueue_fill_buffer(
1580     command_queue: cl_command_queue,
1581     buffer: cl_mem,
1582     pattern: *const ::std::os::raw::c_void,
1583     pattern_size: usize,
1584     offset: usize,
1585     size: usize,
1586     num_events_in_wait_list: cl_uint,
1587     event_wait_list: *const cl_event,
1588     event: *mut cl_event,
1589 ) -> CLResult<()> {
1590     let q = Queue::arc_from_raw(command_queue)?;
1591     let b = Buffer::arc_from_raw(buffer)?;
1592     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1593 
1594     // CL_INVALID_VALUE if offset or offset + size require accessing elements outside the buffer
1595     // buffer object respectively.
1596     if offset + size > b.size {
1597         return Err(CL_INVALID_VALUE);
1598     }
1599 
1600     // CL_INVALID_VALUE if pattern is NULL or if pattern_size is 0 or if pattern_size is not one of
1601     // { 1, 2, 4, 8, 16, 32, 64, 128 }.
1602     if pattern.is_null() || pattern_size.count_ones() != 1 || pattern_size > 128 {
1603         return Err(CL_INVALID_VALUE);
1604     }
1605 
1606     // CL_INVALID_VALUE if offset and size are not a multiple of pattern_size.
1607     if offset % pattern_size != 0 || size % pattern_size != 0 {
1608         return Err(CL_INVALID_VALUE);
1609     }
1610 
1611     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1612     if b.context != q.context {
1613         return Err(CL_INVALID_CONTEXT);
1614     }
1615 
1616     // we have to copy memory
1617     let pattern = unsafe { slice::from_raw_parts(pattern.cast(), pattern_size).to_vec() };
1618     create_and_queue(
1619         q,
1620         CL_COMMAND_FILL_BUFFER,
1621         evs,
1622         event,
1623         false,
1624         Box::new(move |q, ctx| b.fill(q, ctx, &pattern, offset, size)),
1625     )
1626 
1627     // TODO
1628     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1629     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with buffer.
1630 }
1631 
1632 #[cl_entrypoint(clEnqueueMapBuffer)]
enqueue_map_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_map: cl_bool, map_flags: cl_map_flags, offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<*mut c_void>1633 fn enqueue_map_buffer(
1634     command_queue: cl_command_queue,
1635     buffer: cl_mem,
1636     blocking_map: cl_bool,
1637     map_flags: cl_map_flags,
1638     offset: usize,
1639     size: usize,
1640     num_events_in_wait_list: cl_uint,
1641     event_wait_list: *const cl_event,
1642     event: *mut cl_event,
1643 ) -> CLResult<*mut c_void> {
1644     let q = Queue::arc_from_raw(command_queue)?;
1645     let b = Buffer::arc_from_raw(buffer)?;
1646     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
1647     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1648 
1649     validate_map_flags(&b, map_flags)?;
1650 
1651     // CL_INVALID_VALUE if region being mapped given by (offset, size) is out of bounds or if size
1652     // is 0
1653     if offset >= b.size || size > b.size - offset || size == 0 {
1654         return Err(CL_INVALID_VALUE);
1655     }
1656 
1657     // CL_INVALID_CONTEXT if context associated with command_queue and buffer are not the same
1658     if b.context != q.context {
1659         return Err(CL_INVALID_CONTEXT);
1660     }
1661 
1662     let ptr = b.map(size, offset, map_flags != CL_MAP_READ.into())?;
1663     create_and_queue(
1664         q,
1665         CL_COMMAND_MAP_BUFFER,
1666         evs,
1667         event,
1668         block,
1669         Box::new(move |q, ctx| {
1670             if map_flags != CL_MAP_WRITE_INVALIDATE_REGION.into() {
1671                 b.sync_map(q, ctx, ptr)
1672             } else {
1673                 Ok(())
1674             }
1675         }),
1676     )?;
1677 
1678     Ok(ptr.as_ptr())
1679 
1680     // TODO
1681     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for the device associated with queue. This error code is missing before version 1.1.
1682     // CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for buffer objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
1683     // CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
1684 }
1685 
1686 #[cl_entrypoint(clEnqueueReadImage)]
enqueue_read_image( command_queue: cl_command_queue, image: cl_mem, blocking_read: cl_bool, origin: *const usize, region: *const usize, mut row_pitch: usize, mut slice_pitch: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1687 fn enqueue_read_image(
1688     command_queue: cl_command_queue,
1689     image: cl_mem,
1690     blocking_read: cl_bool,
1691     origin: *const usize,
1692     region: *const usize,
1693     mut row_pitch: usize,
1694     mut slice_pitch: usize,
1695     ptr: *mut ::std::os::raw::c_void,
1696     num_events_in_wait_list: cl_uint,
1697     event_wait_list: *const cl_event,
1698     event: *mut cl_event,
1699 ) -> CLResult<()> {
1700     let q = Queue::arc_from_raw(command_queue)?;
1701     let i = Image::arc_from_raw(image)?;
1702     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1703     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1704     let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1705 
1706     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1707     if i.context != q.context {
1708         return Err(CL_INVALID_CONTEXT);
1709     }
1710 
1711     // CL_INVALID_OPERATION if clEnqueueReadImage is called on image which has been created with
1712     // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1713     if bit_check(i.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1714         return Err(CL_INVALID_OPERATION);
1715     }
1716 
1717     // CL_INVALID_VALUE if origin or region is NULL.
1718     // CL_INVALID_VALUE if ptr is NULL.
1719     if origin.is_null() || region.is_null() || ptr.is_null() {
1720         return Err(CL_INVALID_VALUE);
1721     }
1722 
1723     // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1724     if !i.image_desc.has_slice() && slice_pitch != 0 {
1725         return Err(CL_INVALID_VALUE);
1726     }
1727 
1728     let r = unsafe { CLVec::from_raw(region) };
1729     let o = unsafe { CLVec::from_raw(origin) };
1730 
1731     // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1732     // bounds.
1733     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1734     // description for origin and region.
1735     validate_image_bounds(&i, o, r)?;
1736 
1737     // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1738     // on the size of each element in bytes multiplied by width.
1739     if row_pitch == 0 {
1740         row_pitch = r[0] * pixel_size;
1741     }
1742 
1743     // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1744     // based on the row_pitch × height.
1745     if slice_pitch == 0 {
1746         slice_pitch = row_pitch * r[1];
1747     }
1748 
1749     // SAFETY: it's required that applications do not cause data races
1750     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1751     create_and_queue(
1752         q,
1753         CL_COMMAND_READ_IMAGE,
1754         evs,
1755         event,
1756         block,
1757         Box::new(move |q, ctx| i.read(ptr, q, ctx, &r, &o, row_pitch, slice_pitch)),
1758     )
1759 
1760     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1761     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1762     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1763 }
1764 
1765 #[cl_entrypoint(clEnqueueWriteImage)]
enqueue_write_image( command_queue: cl_command_queue, image: cl_mem, blocking_write: cl_bool, origin: *const usize, region: *const usize, mut row_pitch: usize, mut slice_pitch: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1766 fn enqueue_write_image(
1767     command_queue: cl_command_queue,
1768     image: cl_mem,
1769     blocking_write: cl_bool,
1770     origin: *const usize,
1771     region: *const usize,
1772     mut row_pitch: usize,
1773     mut slice_pitch: usize,
1774     ptr: *const ::std::os::raw::c_void,
1775     num_events_in_wait_list: cl_uint,
1776     event_wait_list: *const cl_event,
1777     event: *mut cl_event,
1778 ) -> CLResult<()> {
1779     let q = Queue::arc_from_raw(command_queue)?;
1780     let i = Image::arc_from_raw(image)?;
1781     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1782     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1783     let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1784 
1785     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1786     if i.context != q.context {
1787         return Err(CL_INVALID_CONTEXT);
1788     }
1789 
1790     // CL_INVALID_OPERATION if clEnqueueWriteImage is called on image which has been created with
1791     // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1792     if bit_check(i.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1793         return Err(CL_INVALID_OPERATION);
1794     }
1795 
1796     // CL_INVALID_VALUE if origin or region is NULL.
1797     // CL_INVALID_VALUE if ptr is NULL.
1798     if origin.is_null() || region.is_null() || ptr.is_null() {
1799         return Err(CL_INVALID_VALUE);
1800     }
1801 
1802     // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1803     if !i.image_desc.has_slice() && slice_pitch != 0 {
1804         return Err(CL_INVALID_VALUE);
1805     }
1806 
1807     let r = unsafe { CLVec::from_raw(region) };
1808     let o = unsafe { CLVec::from_raw(origin) };
1809 
1810     // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1811     // bounds.
1812     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1813     // description for origin and region.
1814     validate_image_bounds(&i, o, r)?;
1815 
1816     // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1817     // on the size of each element in bytes multiplied by width.
1818     if row_pitch == 0 {
1819         row_pitch = r[0] * pixel_size;
1820     }
1821 
1822     // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1823     // based on the row_pitch × height.
1824     if slice_pitch == 0 {
1825         slice_pitch = row_pitch * r[1];
1826     }
1827 
1828     // SAFETY: it's required that applications do not cause data races
1829     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1830     create_and_queue(
1831         q,
1832         CL_COMMAND_WRITE_BUFFER_RECT,
1833         evs,
1834         event,
1835         block,
1836         Box::new(move |q, ctx| i.write(ptr, q, ctx, &r, row_pitch, slice_pitch, &o)),
1837     )
1838 
1839     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1840     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1841     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1842 }
1843 
1844 #[cl_entrypoint(clEnqueueCopyImage)]
enqueue_copy_image( command_queue: cl_command_queue, src_image: cl_mem, dst_image: cl_mem, src_origin: *const usize, dst_origin: *const usize, region: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1845 fn enqueue_copy_image(
1846     command_queue: cl_command_queue,
1847     src_image: cl_mem,
1848     dst_image: cl_mem,
1849     src_origin: *const usize,
1850     dst_origin: *const usize,
1851     region: *const usize,
1852     num_events_in_wait_list: cl_uint,
1853     event_wait_list: *const cl_event,
1854     event: *mut cl_event,
1855 ) -> CLResult<()> {
1856     let q = Queue::arc_from_raw(command_queue)?;
1857     let src_image = Image::arc_from_raw(src_image)?;
1858     let dst_image = Image::arc_from_raw(dst_image)?;
1859     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1860 
1861     // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_image are not the same
1862     if src_image.context != q.context || dst_image.context != q.context {
1863         return Err(CL_INVALID_CONTEXT);
1864     }
1865 
1866     // CL_IMAGE_FORMAT_MISMATCH if src_image and dst_image do not use the same image format.
1867     if src_image.image_format != dst_image.image_format {
1868         return Err(CL_IMAGE_FORMAT_MISMATCH);
1869     }
1870 
1871     // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1872     if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1873         return Err(CL_INVALID_VALUE);
1874     }
1875 
1876     let region = unsafe { CLVec::from_raw(region) };
1877     let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
1878     let src_origin = unsafe { CLVec::from_raw(src_origin) };
1879 
1880     // CL_INVALID_VALUE if the 2D or 3D rectangular region specified by src_origin and
1881     // src_origin + region refers to a region outside src_image, or if the 2D or 3D rectangular
1882     // region specified by dst_origin and dst_origin + region refers to a region outside dst_image.
1883     // CL_INVALID_VALUE if values in src_origin, dst_origin and region do not follow rules described
1884     // in the argument description for src_origin, dst_origin and region.
1885     validate_image_bounds(&src_image, src_origin, region)?;
1886     validate_image_bounds(&dst_image, dst_origin, region)?;
1887 
1888     create_and_queue(
1889         q,
1890         CL_COMMAND_COPY_IMAGE,
1891         evs,
1892         event,
1893         false,
1894         Box::new(move |q, ctx| {
1895             src_image.copy_to_image(q, ctx, &dst_image, src_origin, dst_origin, &region)
1896         }),
1897     )
1898 
1899     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image or dst_image are not supported by device associated with queue.
1900     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image or dst_image are not supported by device associated with queue.
1901     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1902     //• CL_MEM_COPY_OVERLAP if src_image and dst_image are the same image object and the source and destination regions overlap.
1903 }
1904 
1905 #[cl_entrypoint(clEnqueueFillImage)]
enqueue_fill_image( command_queue: cl_command_queue, image: cl_mem, fill_color: *const ::std::os::raw::c_void, origin: *const [usize; 3], region: *const [usize; 3], num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1906 fn enqueue_fill_image(
1907     command_queue: cl_command_queue,
1908     image: cl_mem,
1909     fill_color: *const ::std::os::raw::c_void,
1910     origin: *const [usize; 3],
1911     region: *const [usize; 3],
1912     num_events_in_wait_list: cl_uint,
1913     event_wait_list: *const cl_event,
1914     event: *mut cl_event,
1915 ) -> CLResult<()> {
1916     let q = Queue::arc_from_raw(command_queue)?;
1917     let i = Image::arc_from_raw(image)?;
1918     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1919 
1920     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1921     if i.context != q.context {
1922         return Err(CL_INVALID_CONTEXT);
1923     }
1924 
1925     // CL_INVALID_VALUE if fill_color is NULL.
1926     // CL_INVALID_VALUE if origin or region is NULL.
1927     if fill_color.is_null() || origin.is_null() || region.is_null() {
1928         return Err(CL_INVALID_VALUE);
1929     }
1930 
1931     let region = unsafe { CLVec::from_raw(region.cast()) };
1932     let origin = unsafe { CLVec::from_raw(origin.cast()) };
1933 
1934     // CL_INVALID_VALUE if the region being filled as specified by origin and region is out of
1935     // bounds.
1936     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1937     // description for origin and region.
1938     validate_image_bounds(&i, origin, region)?;
1939 
1940     // we have to copy memory and it's always a 4 component int value
1941     // TODO but not for CL_DEPTH
1942     let fill_color = unsafe { slice::from_raw_parts(fill_color.cast(), 4).to_vec() };
1943     create_and_queue(
1944         q,
1945         CL_COMMAND_FILL_BUFFER,
1946         evs,
1947         event,
1948         false,
1949         Box::new(move |q, ctx| i.fill(q, ctx, &fill_color, &origin, &region)),
1950     )
1951 
1952     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1953     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for
1954     //image are not supported by device associated with queue.
1955 }
1956 
1957 #[cl_entrypoint(clEnqueueCopyBufferToImage)]
enqueue_copy_buffer_to_image( command_queue: cl_command_queue, src_buffer: cl_mem, dst_image: cl_mem, src_offset: usize, dst_origin: *const usize, region: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1958 fn enqueue_copy_buffer_to_image(
1959     command_queue: cl_command_queue,
1960     src_buffer: cl_mem,
1961     dst_image: cl_mem,
1962     src_offset: usize,
1963     dst_origin: *const usize,
1964     region: *const usize,
1965     num_events_in_wait_list: cl_uint,
1966     event_wait_list: *const cl_event,
1967     event: *mut cl_event,
1968 ) -> CLResult<()> {
1969     let q = Queue::arc_from_raw(command_queue)?;
1970     let src = Buffer::arc_from_raw(src_buffer)?;
1971     let dst = Image::arc_from_raw(dst_image)?;
1972     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1973 
1974     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_image
1975     // are not the same
1976     if q.context != src.context || q.context != dst.context {
1977         return Err(CL_INVALID_CONTEXT);
1978     }
1979 
1980     // CL_INVALID_VALUE if dst_origin or region is NULL.
1981     if dst_origin.is_null() || region.is_null() {
1982         return Err(CL_INVALID_VALUE);
1983     }
1984 
1985     let region = unsafe { CLVec::from_raw(region) };
1986     let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
1987 
1988     // CL_INVALID_VALUE if values in dst_origin and region do not follow rules described in the
1989     // argument description for dst_origin and region.
1990     // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by dst_origin and
1991     // dst_origin + region refer to a region outside dst_image,
1992     validate_image_bounds(&dst, dst_origin, region)?;
1993 
1994     create_and_queue(
1995         q,
1996         CL_COMMAND_COPY_BUFFER_TO_IMAGE,
1997         evs,
1998         event,
1999         false,
2000         Box::new(move |q, ctx| src.copy_to_image(q, ctx, &dst, src_offset, dst_origin, &region)),
2001     )
2002 
2003     //• CL_INVALID_MEM_OBJECT if src_buffer is not a valid buffer object or dst_image is not a valid image object or if dst_image is a 1D image buffer object created from src_buffer.
2004     //• CL_INVALID_VALUE ... if the region specified by src_offset and src_offset + src_cb refer to a region outside src_buffer.
2005     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
2006     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for dst_image are not supported by device associated with queue.
2007     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for dst_image are not supported by device associated with queue.
2008     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_image.
2009     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2010 }
2011 
2012 #[cl_entrypoint(clEnqueueCopyImageToBuffer)]
enqueue_copy_image_to_buffer( command_queue: cl_command_queue, src_image: cl_mem, dst_buffer: cl_mem, src_origin: *const usize, region: *const usize, dst_offset: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2013 fn enqueue_copy_image_to_buffer(
2014     command_queue: cl_command_queue,
2015     src_image: cl_mem,
2016     dst_buffer: cl_mem,
2017     src_origin: *const usize,
2018     region: *const usize,
2019     dst_offset: usize,
2020     num_events_in_wait_list: cl_uint,
2021     event_wait_list: *const cl_event,
2022     event: *mut cl_event,
2023 ) -> CLResult<()> {
2024     let q = Queue::arc_from_raw(command_queue)?;
2025     let src = Image::arc_from_raw(src_image)?;
2026     let dst = Buffer::arc_from_raw(dst_buffer)?;
2027     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2028 
2029     // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_buffer
2030     // are not the same
2031     if q.context != src.context || q.context != dst.context {
2032         return Err(CL_INVALID_CONTEXT);
2033     }
2034 
2035     // CL_INVALID_VALUE if src_origin or region is NULL.
2036     if src_origin.is_null() || region.is_null() {
2037         return Err(CL_INVALID_VALUE);
2038     }
2039 
2040     let region = unsafe { CLVec::from_raw(region) };
2041     let src_origin = unsafe { CLVec::from_raw(src_origin) };
2042 
2043     // CL_INVALID_VALUE if values in src_origin and region do not follow rules described in the
2044     // argument description for src_origin and region.
2045     // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by src_origin and
2046     // src_origin + region refers to a region outside src_image, or if the region specified by
2047     // dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2048     validate_image_bounds(&src, src_origin, region)?;
2049 
2050     create_and_queue(
2051         q,
2052         CL_COMMAND_COPY_IMAGE_TO_BUFFER,
2053         evs,
2054         event,
2055         false,
2056         Box::new(move |q, ctx| src.copy_to_buffer(q, ctx, &dst, src_origin, dst_offset, &region)),
2057     )
2058 
2059     //• CL_INVALID_MEM_OBJECT if src_image is not a valid image object or dst_buffer is not a valid buffer object or if src_image is a 1D image buffer object created from dst_buffer.
2060     //• CL_INVALID_VALUE ... if the region specified by dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2061     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code is missing before version 1.1.
2062     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image are not supported by device associated with queue.
2063     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image are not supported by device associated with queue.
2064     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_image or dst_buffer.
2065     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2066 }
2067 
2068 #[cl_entrypoint(clEnqueueMapImage)]
enqueue_map_image( command_queue: cl_command_queue, image: cl_mem, blocking_map: cl_bool, map_flags: cl_map_flags, origin: *const usize, region: *const usize, image_row_pitch: *mut usize, image_slice_pitch: *mut usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<*mut ::std::os::raw::c_void>2069 fn enqueue_map_image(
2070     command_queue: cl_command_queue,
2071     image: cl_mem,
2072     blocking_map: cl_bool,
2073     map_flags: cl_map_flags,
2074     origin: *const usize,
2075     region: *const usize,
2076     image_row_pitch: *mut usize,
2077     image_slice_pitch: *mut usize,
2078     num_events_in_wait_list: cl_uint,
2079     event_wait_list: *const cl_event,
2080     event: *mut cl_event,
2081 ) -> CLResult<*mut ::std::os::raw::c_void> {
2082     let q = Queue::arc_from_raw(command_queue)?;
2083     let i = Image::arc_from_raw(image)?;
2084     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2085     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2086 
2087     // CL_INVALID_VALUE ... or if values specified in map_flags are not valid.
2088     validate_map_flags(&i, map_flags)?;
2089 
2090     // CL_INVALID_CONTEXT if context associated with command_queue and image are not the same
2091     if i.context != q.context {
2092         return Err(CL_INVALID_CONTEXT);
2093     }
2094 
2095     // CL_INVALID_VALUE if origin or region is NULL.
2096     // CL_INVALID_VALUE if image_row_pitch is NULL.
2097     if origin.is_null() || region.is_null() || image_row_pitch.is_null() {
2098         return Err(CL_INVALID_VALUE);
2099     }
2100 
2101     let region = unsafe { CLVec::from_raw(region) };
2102     let origin = unsafe { CLVec::from_raw(origin) };
2103 
2104     // CL_INVALID_VALUE if region being mapped given by (origin, origin + region) is out of bounds
2105     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
2106     // description for origin and region.
2107     validate_image_bounds(&i, origin, region)?;
2108 
2109     let mut dummy_slice_pitch: usize = 0;
2110     let image_slice_pitch = if image_slice_pitch.is_null() {
2111         // CL_INVALID_VALUE if image is a 3D image, 1D or 2D image array object and
2112         // image_slice_pitch is NULL.
2113         if i.image_desc.is_array() || i.image_desc.image_type == CL_MEM_OBJECT_IMAGE3D {
2114             return Err(CL_INVALID_VALUE);
2115         }
2116         &mut dummy_slice_pitch
2117     } else {
2118         unsafe { image_slice_pitch.as_mut().unwrap() }
2119     };
2120 
2121     let ptr = i.map(
2122         origin,
2123         region,
2124         unsafe { image_row_pitch.as_mut().unwrap() },
2125         image_slice_pitch,
2126         map_flags != CL_MAP_READ.into(),
2127     )?;
2128 
2129     create_and_queue(
2130         q,
2131         CL_COMMAND_MAP_IMAGE,
2132         evs,
2133         event,
2134         block,
2135         Box::new(move |q, ctx| {
2136             if map_flags != CL_MAP_WRITE_INVALIDATE_REGION.into() {
2137                 i.sync_map(q, ctx, ptr)
2138             } else {
2139                 Ok(())
2140             }
2141         }),
2142     )?;
2143 
2144     Ok(ptr.as_ptr())
2145 
2146     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
2147     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
2148     //• CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for image objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
2149     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2150     //• CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
2151 }
2152 
2153 #[cl_entrypoint(clRetainMemObject)]
retain_mem_object(mem: cl_mem) -> CLResult<()>2154 fn retain_mem_object(mem: cl_mem) -> CLResult<()> {
2155     let m = MemBase::ref_from_raw(mem)?;
2156     match m.base.get_type()? {
2157         RusticlTypes::Buffer => Buffer::retain(mem),
2158         RusticlTypes::Image => Image::retain(mem),
2159         _ => Err(CL_INVALID_MEM_OBJECT),
2160     }
2161 }
2162 
2163 #[cl_entrypoint(clReleaseMemObject)]
release_mem_object(mem: cl_mem) -> CLResult<()>2164 fn release_mem_object(mem: cl_mem) -> CLResult<()> {
2165     let m = MemBase::ref_from_raw(mem)?;
2166     match m.base.get_type()? {
2167         RusticlTypes::Buffer => Buffer::release(mem),
2168         RusticlTypes::Image => Image::release(mem),
2169         _ => Err(CL_INVALID_MEM_OBJECT),
2170     }
2171 }
2172 
2173 #[cl_entrypoint(clEnqueueUnmapMemObject)]
enqueue_unmap_mem_object( command_queue: cl_command_queue, memobj: cl_mem, mapped_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2174 fn enqueue_unmap_mem_object(
2175     command_queue: cl_command_queue,
2176     memobj: cl_mem,
2177     mapped_ptr: *mut ::std::os::raw::c_void,
2178     num_events_in_wait_list: cl_uint,
2179     event_wait_list: *const cl_event,
2180     event: *mut cl_event,
2181 ) -> CLResult<()> {
2182     let q = Queue::arc_from_raw(command_queue)?;
2183     let m = MemBase::arc_from_raw(memobj)?;
2184     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2185 
2186     // CL_INVALID_CONTEXT if context associated with command_queue and memobj are not the same
2187     if q.context != m.context {
2188         return Err(CL_INVALID_CONTEXT);
2189     }
2190 
2191     // CL_INVALID_VALUE if mapped_ptr is not a valid pointer returned by clEnqueueMapBuffer or
2192     // clEnqueueMapImage for memobj.
2193     if !m.is_mapped_ptr(mapped_ptr) {
2194         return Err(CL_INVALID_VALUE);
2195     }
2196 
2197     // SAFETY: it's required that applications do not cause data races
2198     let mapped_ptr = unsafe { MutMemoryPtr::from_ptr(mapped_ptr) };
2199     let needs_sync = m.unmap(mapped_ptr)?;
2200     create_and_queue(
2201         q,
2202         CL_COMMAND_UNMAP_MEM_OBJECT,
2203         evs,
2204         event,
2205         false,
2206         Box::new(move |q, ctx| {
2207             if needs_sync {
2208                 m.sync_unmap(q, ctx, mapped_ptr)
2209             } else {
2210                 Ok(())
2211             }
2212         }),
2213     )
2214 }
2215 
2216 #[cl_entrypoint(clEnqueueMigrateMemObjects)]
enqueue_migrate_mem_objects( command_queue: cl_command_queue, num_mem_objects: cl_uint, mem_objects: *const cl_mem, flags: cl_mem_migration_flags, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2217 fn enqueue_migrate_mem_objects(
2218     command_queue: cl_command_queue,
2219     num_mem_objects: cl_uint,
2220     mem_objects: *const cl_mem,
2221     flags: cl_mem_migration_flags,
2222     num_events_in_wait_list: cl_uint,
2223     event_wait_list: *const cl_event,
2224     event: *mut cl_event,
2225 ) -> CLResult<()> {
2226     let q = Queue::arc_from_raw(command_queue)?;
2227     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2228     let bufs = MemBase::refs_from_arr(mem_objects, num_mem_objects)?;
2229 
2230     // CL_INVALID_VALUE if num_mem_objects is zero or if mem_objects is NULL.
2231     if bufs.is_empty() {
2232         return Err(CL_INVALID_VALUE);
2233     }
2234 
2235     // CL_INVALID_CONTEXT if the context associated with command_queue and memory objects in
2236     // mem_objects are not the same
2237     if bufs.iter().any(|b| b.context != q.context) {
2238         return Err(CL_INVALID_CONTEXT);
2239     }
2240 
2241     // CL_INVALID_VALUE if flags is not 0 or is not any of the values described in the table above.
2242     if flags != 0
2243         && bit_check(
2244             flags,
2245             !(CL_MIGRATE_MEM_OBJECT_HOST | CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED),
2246         )
2247     {
2248         return Err(CL_INVALID_VALUE);
2249     }
2250 
2251     // we should do something, but it's legal to not do anything at all
2252     create_and_queue(
2253         q,
2254         CL_COMMAND_MIGRATE_MEM_OBJECTS,
2255         evs,
2256         event,
2257         false,
2258         Box::new(|_, _| Ok(())),
2259     )
2260 
2261     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for the specified set of memory objects in mem_objects.
2262 }
2263 
2264 #[cl_info_entrypoint(clGetPipeInfo)]
2265 impl CLInfo<cl_pipe_info> for cl_mem {
query(&self, _q: cl_pipe_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>>2266     fn query(&self, _q: cl_pipe_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
2267         // CL_INVALID_MEM_OBJECT if pipe is a not a valid pipe object.
2268         Err(CL_INVALID_MEM_OBJECT)
2269     }
2270 }
2271 
svm_alloc( context: cl_context, flags: cl_svm_mem_flags, size: usize, mut alignment: cl_uint, ) -> CLResult<*mut c_void>2272 pub fn svm_alloc(
2273     context: cl_context,
2274     flags: cl_svm_mem_flags,
2275     size: usize,
2276     mut alignment: cl_uint,
2277 ) -> CLResult<*mut c_void> {
2278     // clSVMAlloc will fail if
2279 
2280     // context is not a valid context
2281     let c = Context::ref_from_raw(context)?;
2282 
2283     // or no devices in context support SVM.
2284     if !c.has_svm_devs() {
2285         return Err(CL_INVALID_OPERATION);
2286     }
2287 
2288     // flags does not contain CL_MEM_SVM_FINE_GRAIN_BUFFER but does contain CL_MEM_SVM_ATOMICS.
2289     if !bit_check(flags, CL_MEM_SVM_FINE_GRAIN_BUFFER) && bit_check(flags, CL_MEM_SVM_ATOMICS) {
2290         return Err(CL_INVALID_VALUE);
2291     }
2292 
2293     // size is 0 or > CL_DEVICE_MAX_MEM_ALLOC_SIZE value for any device in context.
2294     if size == 0 || checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
2295         return Err(CL_INVALID_VALUE);
2296     }
2297 
2298     if alignment == 0 {
2299         alignment = mem::size_of::<[u64; 16]>() as cl_uint;
2300     }
2301 
2302     // alignment is not a power of two
2303     if !alignment.is_power_of_two() {
2304         return Err(CL_INVALID_VALUE);
2305     }
2306 
2307     let layout;
2308     let ptr;
2309 
2310     // SAFETY: we already verify the parameters to from_size_align above and layout is of non zero
2311     // size
2312     unsafe {
2313         layout = Layout::from_size_align_unchecked(size, alignment as usize);
2314         ptr = alloc::alloc(layout);
2315     }
2316 
2317     if ptr.is_null() {
2318         return Err(CL_OUT_OF_HOST_MEMORY);
2319     }
2320 
2321     c.add_svm_ptr(ptr as usize, layout);
2322     Ok(ptr.cast())
2323 
2324     // Values specified in flags do not follow rules described for supported values in the SVM Memory Flags table.
2325     // CL_MEM_SVM_FINE_GRAIN_BUFFER or CL_MEM_SVM_ATOMICS is specified in flags and these are not supported by at least one device in context.
2326     // The values specified in flags are not valid, i.e. don’t match those defined in the SVM Memory Flags table.
2327     // the OpenCL implementation cannot support the specified alignment for at least one device in context.
2328     // There was a failure to allocate resources.
2329 }
2330 
svm_free_impl(c: &Context, svm_pointer: usize)2331 fn svm_free_impl(c: &Context, svm_pointer: usize) {
2332     if let Some(layout) = c.remove_svm_ptr(svm_pointer) {
2333         // SAFETY: we make sure that svm_pointer is a valid allocation and reuse the same layout
2334         // from the allocation
2335         unsafe {
2336             alloc::dealloc(svm_pointer as *mut u8, layout);
2337         }
2338     }
2339 }
2340 
svm_free(context: cl_context, svm_pointer: usize) -> CLResult<()>2341 pub fn svm_free(context: cl_context, svm_pointer: usize) -> CLResult<()> {
2342     let c = Context::ref_from_raw(context)?;
2343     svm_free_impl(c, svm_pointer);
2344     Ok(())
2345 }
2346 
enqueue_svm_free_impl( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2347 fn enqueue_svm_free_impl(
2348     command_queue: cl_command_queue,
2349     num_svm_pointers: cl_uint,
2350     svm_pointers: *mut *mut c_void,
2351     pfn_free_func: Option<FuncSVMFreeCb>,
2352     user_data: *mut c_void,
2353     num_events_in_wait_list: cl_uint,
2354     event_wait_list: *const cl_event,
2355     event: *mut cl_event,
2356     cmd_type: cl_command_type,
2357 ) -> CLResult<()> {
2358     let q = Queue::arc_from_raw(command_queue)?;
2359     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2360 
2361     // CL_INVALID_VALUE if num_svm_pointers is 0 and svm_pointers is non-NULL, or if svm_pointers is
2362     // NULL and num_svm_pointers is not 0.
2363     if num_svm_pointers == 0 && !svm_pointers.is_null()
2364         || num_svm_pointers != 0 && svm_pointers.is_null()
2365     {
2366         return Err(CL_INVALID_VALUE);
2367     }
2368 
2369     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2370     if !q.device.svm_supported() {
2371         return Err(CL_INVALID_OPERATION);
2372     }
2373 
2374     // The application is allowed to reuse or free the memory referenced by `svm_pointers` after this
2375     // function returns so we have to make a copy.
2376     // SAFETY: num_svm_pointers specifies the amount of elements in svm_pointers
2377     let mut svm_pointers =
2378         unsafe { slice::from_raw_parts(svm_pointers.cast(), num_svm_pointers as usize) }.to_vec();
2379     // SAFETY: The requirements on `SVMFreeCb::new` match the requirements
2380     // imposed by the OpenCL specification. It is the caller's duty to uphold them.
2381     let cb_opt = unsafe { SVMFreeCb::new(pfn_free_func, user_data) }.ok();
2382 
2383     create_and_queue(
2384         q,
2385         cmd_type,
2386         evs,
2387         event,
2388         false,
2389         Box::new(move |q, _| {
2390             if let Some(cb) = cb_opt {
2391                 cb.call(q, &mut svm_pointers);
2392             } else {
2393                 for ptr in svm_pointers {
2394                     svm_free_impl(&q.context, ptr);
2395                 }
2396             }
2397 
2398             Ok(())
2399         }),
2400     )
2401 }
2402 
2403 #[cl_entrypoint(clEnqueueSVMFree)]
enqueue_svm_free( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2404 fn enqueue_svm_free(
2405     command_queue: cl_command_queue,
2406     num_svm_pointers: cl_uint,
2407     svm_pointers: *mut *mut c_void,
2408     pfn_free_func: Option<FuncSVMFreeCb>,
2409     user_data: *mut c_void,
2410     num_events_in_wait_list: cl_uint,
2411     event_wait_list: *const cl_event,
2412     event: *mut cl_event,
2413 ) -> CLResult<()> {
2414     enqueue_svm_free_impl(
2415         command_queue,
2416         num_svm_pointers,
2417         svm_pointers,
2418         pfn_free_func,
2419         user_data,
2420         num_events_in_wait_list,
2421         event_wait_list,
2422         event,
2423         CL_COMMAND_SVM_FREE,
2424     )
2425 }
2426 
2427 #[cl_entrypoint(clEnqueueSVMFreeARM)]
enqueue_svm_free_arm( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2428 fn enqueue_svm_free_arm(
2429     command_queue: cl_command_queue,
2430     num_svm_pointers: cl_uint,
2431     svm_pointers: *mut *mut c_void,
2432     pfn_free_func: Option<FuncSVMFreeCb>,
2433     user_data: *mut c_void,
2434     num_events_in_wait_list: cl_uint,
2435     event_wait_list: *const cl_event,
2436     event: *mut cl_event,
2437 ) -> CLResult<()> {
2438     enqueue_svm_free_impl(
2439         command_queue,
2440         num_svm_pointers,
2441         svm_pointers,
2442         pfn_free_func,
2443         user_data,
2444         num_events_in_wait_list,
2445         event_wait_list,
2446         event,
2447         CL_COMMAND_SVM_FREE_ARM,
2448     )
2449 }
2450 
enqueue_svm_memcpy_impl( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2451 fn enqueue_svm_memcpy_impl(
2452     command_queue: cl_command_queue,
2453     blocking_copy: cl_bool,
2454     dst_ptr: *mut c_void,
2455     src_ptr: *const c_void,
2456     size: usize,
2457     num_events_in_wait_list: cl_uint,
2458     event_wait_list: *const cl_event,
2459     event: *mut cl_event,
2460     cmd_type: cl_command_type,
2461 ) -> CLResult<()> {
2462     let q = Queue::arc_from_raw(command_queue)?;
2463     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2464     let block = check_cl_bool(blocking_copy).ok_or(CL_INVALID_VALUE)?;
2465 
2466     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2467     if !q.device.svm_supported() {
2468         return Err(CL_INVALID_OPERATION);
2469     }
2470 
2471     // CL_MEM_COPY_OVERLAP if the values specified for dst_ptr, src_ptr and size result in an
2472     // overlapping copy.
2473     let dst_ptr_addr = dst_ptr as usize;
2474     let src_ptr_addr = src_ptr as usize;
2475     if (src_ptr_addr <= dst_ptr_addr && dst_ptr_addr < src_ptr_addr + size)
2476         || (dst_ptr_addr <= src_ptr_addr && src_ptr_addr < dst_ptr_addr + size)
2477     {
2478         return Err(CL_MEM_COPY_OVERLAP);
2479     }
2480 
2481     // CAST: We have no idea about the type or initialization status of these bytes.
2482     // MaybeUninit<u8> is the safe bet.
2483     let src_ptr = src_ptr.cast::<MaybeUninit<u8>>();
2484 
2485     // CAST: We have no idea about the type or initialization status of these bytes.
2486     // MaybeUninit<u8> is the safe bet.
2487     let dst_ptr = dst_ptr.cast::<MaybeUninit<u8>>();
2488 
2489     // SAFETY: It is up to the application to ensure the memory is valid to read for `size` bytes
2490     // and that it doesn't modify it until the command has completed.
2491     let src = unsafe { cl_slice::from_raw_parts(src_ptr, size)? };
2492 
2493     // SAFETY: We've ensured there's no aliasing between src and dst. It is up to the application
2494     // to ensure the memory is valid to read and write for `size` bytes and that it doesn't modify
2495     // or read from it until the command has completed.
2496     let dst = unsafe { cl_slice::from_raw_parts_mut(dst_ptr, size)? };
2497 
2498     create_and_queue(
2499         q,
2500         cmd_type,
2501         evs,
2502         event,
2503         block,
2504         Box::new(move |_, _| {
2505             dst.copy_from_slice(src);
2506             Ok(())
2507         }),
2508     )
2509 }
2510 
2511 #[cl_entrypoint(clEnqueueSVMMemcpy)]
enqueue_svm_memcpy( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2512 fn enqueue_svm_memcpy(
2513     command_queue: cl_command_queue,
2514     blocking_copy: cl_bool,
2515     dst_ptr: *mut c_void,
2516     src_ptr: *const c_void,
2517     size: usize,
2518     num_events_in_wait_list: cl_uint,
2519     event_wait_list: *const cl_event,
2520     event: *mut cl_event,
2521 ) -> CLResult<()> {
2522     enqueue_svm_memcpy_impl(
2523         command_queue,
2524         blocking_copy,
2525         dst_ptr,
2526         src_ptr,
2527         size,
2528         num_events_in_wait_list,
2529         event_wait_list,
2530         event,
2531         CL_COMMAND_SVM_MEMCPY,
2532     )
2533 }
2534 
2535 #[cl_entrypoint(clEnqueueSVMMemcpyARM)]
enqueue_svm_memcpy_arm( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2536 fn enqueue_svm_memcpy_arm(
2537     command_queue: cl_command_queue,
2538     blocking_copy: cl_bool,
2539     dst_ptr: *mut c_void,
2540     src_ptr: *const c_void,
2541     size: usize,
2542     num_events_in_wait_list: cl_uint,
2543     event_wait_list: *const cl_event,
2544     event: *mut cl_event,
2545 ) -> CLResult<()> {
2546     enqueue_svm_memcpy_impl(
2547         command_queue,
2548         blocking_copy,
2549         dst_ptr,
2550         src_ptr,
2551         size,
2552         num_events_in_wait_list,
2553         event_wait_list,
2554         event,
2555         CL_COMMAND_SVM_MEMCPY_ARM,
2556     )
2557 }
2558 
enqueue_svm_mem_fill_impl( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2559 fn enqueue_svm_mem_fill_impl(
2560     command_queue: cl_command_queue,
2561     svm_ptr: *mut ::std::os::raw::c_void,
2562     pattern: *const ::std::os::raw::c_void,
2563     pattern_size: usize,
2564     size: usize,
2565     num_events_in_wait_list: cl_uint,
2566     event_wait_list: *const cl_event,
2567     event: *mut cl_event,
2568     cmd_type: cl_command_type,
2569 ) -> CLResult<()> {
2570     let q = Queue::arc_from_raw(command_queue)?;
2571     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2572 
2573     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2574     if !q.device.svm_supported() {
2575         return Err(CL_INVALID_OPERATION);
2576     }
2577 
2578     // CL_INVALID_VALUE if pattern is NULL [...]
2579     if pattern.is_null() {
2580         return Err(CL_INVALID_VALUE);
2581     }
2582 
2583     // CL_INVALID_VALUE if size is not a multiple of pattern_size.
2584     if size % pattern_size != 0 {
2585         return Err(CL_INVALID_VALUE);
2586     }
2587 
2588     // The provided `$bytesize` must equal `pattern_size`.
2589     macro_rules! generate_fill_closure {
2590         ($bytesize:literal) => {{
2591             // We need the value of `$bytesize`` at compile time, so we need to pass it in, but it
2592             // should always match `pattern_size`.
2593             assert!($bytesize == pattern_size);
2594 
2595             // Three reasons we define our own bag-of-bytes type here:
2596             //
2597             // We'd otherwise have to pass a type to this macro. Verifying that the type we passed
2598             // upholds all the properties we need or want is more trouble than defining our own.
2599             //
2600             // The primitive Rust types only go up to `u128` anyway and their alignments are
2601             // platfrom defined. E.g. At the time of this writing `u128` only has an alignment of 8
2602             // on x86-64, even though its size is 16. Defining our own type with an alignment of 16
2603             // allows the compiler to generate better code.
2604             //
2605             // The alignment of OpenCL types is currently what we need on x86-64, but the spec
2606             // explicitly states that's just a recommendation and ultimately it's up to the
2607             // cl_platform.h header. The very descriptive names of the CL types don't make
2608             // verifying the match calling this macro any easier on a glance.
2609             // "Was `cl_uint` 4 byte or 8 byte? Eh, I'm sure nobody got it wrong by accident."
2610             #[repr(C)]
2611             #[repr(align($bytesize))]
2612             #[derive(Copy, Clone)]
2613             struct Pattern([u8; $bytesize]);
2614 
2615             // Just to make sure the compiler didn't generate anything weird.
2616             static_assert!($bytesize == mem::size_of::<Pattern>());
2617             static_assert!($bytesize == mem::align_of::<Pattern>());
2618 
2619             // CAST: We don't know exactly which type `pattern` points to, but we know it's an
2620             // Application Scalar Data Type (cl_char, cl_ulong, etc.) or an Application Vector Data
2621             // Type (cl_double4, etc.). All of them are `Copy`, do not contain padding bytes, and
2622             // have no invalid bit patterns. AKA they are POD data types.
2623             // Since we only copy it around, we can cast to any POD type as long as its size
2624             // matches `pattern_size`.
2625             let pattern_ptr = pattern.cast::<Pattern>();
2626 
2627             // The application is allowed to reuse or free the memory referenced by `pattern_ptr`
2628             // after this function returns, so we need to create a copy.
2629             //
2630             // There's no explicit alignment guarantee and we don't rely on `Pattern` matching the
2631             // alignment of whichever Application Data Type we're actually presented with. Thus, do
2632             // an unaligned read.
2633             //
2634             // SAFETY: We've checked that `pattern_ptr` is not NULL above. It is otherwise the
2635             // calling application's responsibility to ensure that it is valid for reads of
2636             // `pattern_size` bytes and properly initialized.
2637             // Creating a bitwise copy can't create memory safety issues, since `Pattern` is `Copy`.
2638             let pattern = unsafe { pattern_ptr.read_unaligned() };
2639 
2640             // CAST: Same as with `pattern`, we don't know the exact type of `svm_ptr`, but we do
2641             // know it's fine if we choose the same type here. The application might reasonably
2642             // give us uninitialized memory though, so cast to a `MaybeUninit<Pattern>`, which has
2643             // the same layout as `Pattern`.
2644             let svm_ptr = svm_ptr.cast::<MaybeUninit<Pattern>>();
2645 
2646             // SAFETY: It is the calling application's responsibility to ensure that `svm_ptr` is
2647             // valid for reads and writes up to `size` bytes.
2648             // Since `pattern_size == mem::size_of::<Pattern>()` and `MaybeUninit<Pattern>` has the
2649             // same layout as `Pattern`, we know that
2650             // `size / pattern_size * mem::size_of<MaybeUninit<Pattern>>` equals `size`.
2651             //
2652             // Since we're creating a `&[MaybeUninit<Pattern>]` the initialization status does not
2653             // matter.
2654             //
2655             // From here on out we only access the referenced memory though this slice. In
2656             // particular, since we've made a copy of `pattern`, it doesn't matter if the memory
2657             // region referenced by `pattern` aliases the one referenced by this slice. It is up to
2658             // the application not to access it at all until this command has been completed.
2659             let svm_slice = unsafe { cl_slice::from_raw_parts_mut(svm_ptr, size / pattern_size)? };
2660 
2661             Box::new(move |_, _| {
2662                 for x in svm_slice {
2663                     x.write(pattern);
2664                 }
2665 
2666                 Ok(())
2667             })
2668         }};
2669     }
2670 
2671     // Generate optimized code paths for each of the possible pattern sizes.
2672     let work: EventSig = match pattern_size {
2673         1 => generate_fill_closure!(1),
2674         2 => generate_fill_closure!(2),
2675         4 => generate_fill_closure!(4),
2676         8 => generate_fill_closure!(8),
2677         16 => generate_fill_closure!(16),
2678         32 => generate_fill_closure!(32),
2679         64 => generate_fill_closure!(64),
2680         128 => generate_fill_closure!(128),
2681         _ => {
2682             // CL_INVALID_VALUE if [...] pattern_size is 0 or if pattern_size is not one of
2683             // {1, 2, 4, 8, 16, 32, 64, 128}.
2684             return Err(CL_INVALID_VALUE);
2685         }
2686     };
2687 
2688     create_and_queue(q, cmd_type, evs, event, false, work)
2689 }
2690 
2691 #[cl_entrypoint(clEnqueueSVMMemFill)]
enqueue_svm_mem_fill( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2692 fn enqueue_svm_mem_fill(
2693     command_queue: cl_command_queue,
2694     svm_ptr: *mut ::std::os::raw::c_void,
2695     pattern: *const ::std::os::raw::c_void,
2696     pattern_size: usize,
2697     size: usize,
2698     num_events_in_wait_list: cl_uint,
2699     event_wait_list: *const cl_event,
2700     event: *mut cl_event,
2701 ) -> CLResult<()> {
2702     enqueue_svm_mem_fill_impl(
2703         command_queue,
2704         svm_ptr,
2705         pattern,
2706         pattern_size,
2707         size,
2708         num_events_in_wait_list,
2709         event_wait_list,
2710         event,
2711         CL_COMMAND_SVM_MEMFILL,
2712     )
2713 }
2714 
2715 #[cl_entrypoint(clEnqueueSVMMemFillARM)]
enqueue_svm_mem_fill_arm( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2716 fn enqueue_svm_mem_fill_arm(
2717     command_queue: cl_command_queue,
2718     svm_ptr: *mut ::std::os::raw::c_void,
2719     pattern: *const ::std::os::raw::c_void,
2720     pattern_size: usize,
2721     size: usize,
2722     num_events_in_wait_list: cl_uint,
2723     event_wait_list: *const cl_event,
2724     event: *mut cl_event,
2725 ) -> CLResult<()> {
2726     enqueue_svm_mem_fill_impl(
2727         command_queue,
2728         svm_ptr,
2729         pattern,
2730         pattern_size,
2731         size,
2732         num_events_in_wait_list,
2733         event_wait_list,
2734         event,
2735         CL_COMMAND_SVM_MEMFILL_ARM,
2736     )
2737 }
2738 
enqueue_svm_map_impl( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2739 fn enqueue_svm_map_impl(
2740     command_queue: cl_command_queue,
2741     blocking_map: cl_bool,
2742     flags: cl_map_flags,
2743     svm_ptr: *mut ::std::os::raw::c_void,
2744     size: usize,
2745     num_events_in_wait_list: cl_uint,
2746     event_wait_list: *const cl_event,
2747     event: *mut cl_event,
2748     cmd_type: cl_command_type,
2749 ) -> CLResult<()> {
2750     let q = Queue::arc_from_raw(command_queue)?;
2751     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2752     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2753 
2754     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2755     if !q.device.svm_supported() {
2756         return Err(CL_INVALID_OPERATION);
2757     }
2758 
2759     // CL_INVALID_VALUE if svm_ptr is NULL.
2760     if svm_ptr.is_null() {
2761         return Err(CL_INVALID_VALUE);
2762     }
2763 
2764     // CL_INVALID_VALUE if size is 0 ...
2765     if size == 0 {
2766         return Err(CL_INVALID_VALUE);
2767     }
2768 
2769     // ... or if values specified in map_flags are not valid.
2770     validate_map_flags_common(flags)?;
2771 
2772     create_and_queue(q, cmd_type, evs, event, block, Box::new(|_, _| Ok(())))
2773 }
2774 
2775 #[cl_entrypoint(clEnqueueSVMMap)]
enqueue_svm_map( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2776 fn enqueue_svm_map(
2777     command_queue: cl_command_queue,
2778     blocking_map: cl_bool,
2779     flags: cl_map_flags,
2780     svm_ptr: *mut ::std::os::raw::c_void,
2781     size: usize,
2782     num_events_in_wait_list: cl_uint,
2783     event_wait_list: *const cl_event,
2784     event: *mut cl_event,
2785 ) -> CLResult<()> {
2786     enqueue_svm_map_impl(
2787         command_queue,
2788         blocking_map,
2789         flags,
2790         svm_ptr,
2791         size,
2792         num_events_in_wait_list,
2793         event_wait_list,
2794         event,
2795         CL_COMMAND_SVM_MAP,
2796     )
2797 }
2798 
2799 #[cl_entrypoint(clEnqueueSVMMapARM)]
enqueue_svm_map_arm( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2800 fn enqueue_svm_map_arm(
2801     command_queue: cl_command_queue,
2802     blocking_map: cl_bool,
2803     flags: cl_map_flags,
2804     svm_ptr: *mut ::std::os::raw::c_void,
2805     size: usize,
2806     num_events_in_wait_list: cl_uint,
2807     event_wait_list: *const cl_event,
2808     event: *mut cl_event,
2809 ) -> CLResult<()> {
2810     enqueue_svm_map_impl(
2811         command_queue,
2812         blocking_map,
2813         flags,
2814         svm_ptr,
2815         size,
2816         num_events_in_wait_list,
2817         event_wait_list,
2818         event,
2819         CL_COMMAND_SVM_MAP_ARM,
2820     )
2821 }
2822 
enqueue_svm_unmap_impl( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2823 fn enqueue_svm_unmap_impl(
2824     command_queue: cl_command_queue,
2825     svm_ptr: *mut ::std::os::raw::c_void,
2826     num_events_in_wait_list: cl_uint,
2827     event_wait_list: *const cl_event,
2828     event: *mut cl_event,
2829     cmd_type: cl_command_type,
2830 ) -> CLResult<()> {
2831     let q = Queue::arc_from_raw(command_queue)?;
2832     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2833 
2834     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2835     if !q.device.svm_supported() {
2836         return Err(CL_INVALID_OPERATION);
2837     }
2838 
2839     // CL_INVALID_VALUE if svm_ptr is NULL.
2840     if svm_ptr.is_null() {
2841         return Err(CL_INVALID_VALUE);
2842     }
2843 
2844     create_and_queue(q, cmd_type, evs, event, false, Box::new(|_, _| Ok(())))
2845 }
2846 
2847 #[cl_entrypoint(clEnqueueSVMUnmap)]
enqueue_svm_unmap( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2848 fn enqueue_svm_unmap(
2849     command_queue: cl_command_queue,
2850     svm_ptr: *mut ::std::os::raw::c_void,
2851     num_events_in_wait_list: cl_uint,
2852     event_wait_list: *const cl_event,
2853     event: *mut cl_event,
2854 ) -> CLResult<()> {
2855     enqueue_svm_unmap_impl(
2856         command_queue,
2857         svm_ptr,
2858         num_events_in_wait_list,
2859         event_wait_list,
2860         event,
2861         CL_COMMAND_SVM_UNMAP,
2862     )
2863 }
2864 
2865 #[cl_entrypoint(clEnqueueSVMUnmapARM)]
enqueue_svm_unmap_arm( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2866 fn enqueue_svm_unmap_arm(
2867     command_queue: cl_command_queue,
2868     svm_ptr: *mut ::std::os::raw::c_void,
2869     num_events_in_wait_list: cl_uint,
2870     event_wait_list: *const cl_event,
2871     event: *mut cl_event,
2872 ) -> CLResult<()> {
2873     enqueue_svm_unmap_impl(
2874         command_queue,
2875         svm_ptr,
2876         num_events_in_wait_list,
2877         event_wait_list,
2878         event,
2879         CL_COMMAND_SVM_UNMAP_ARM,
2880     )
2881 }
2882 
2883 #[cl_entrypoint(clEnqueueSVMMigrateMem)]
enqueue_svm_migrate_mem( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *const ::std::os::raw::c_void, sizes: *const usize, flags: cl_mem_migration_flags, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2884 fn enqueue_svm_migrate_mem(
2885     command_queue: cl_command_queue,
2886     num_svm_pointers: cl_uint,
2887     svm_pointers: *mut *const ::std::os::raw::c_void,
2888     sizes: *const usize,
2889     flags: cl_mem_migration_flags,
2890     num_events_in_wait_list: cl_uint,
2891     event_wait_list: *const cl_event,
2892     event: *mut cl_event,
2893 ) -> CLResult<()> {
2894     let q = Queue::arc_from_raw(command_queue)?;
2895     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2896 
2897     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2898     if !q.device.svm_supported() {
2899         return Err(CL_INVALID_OPERATION);
2900     }
2901 
2902     // CL_INVALID_VALUE if num_svm_pointers is zero
2903     if num_svm_pointers == 0 {
2904         return Err(CL_INVALID_VALUE);
2905     }
2906 
2907     let num_svm_pointers = num_svm_pointers as usize;
2908     // SAFETY: Just hoping the application is alright.
2909     let mut svm_pointers: Vec<usize> =
2910         unsafe { cl_slice::from_raw_parts(svm_pointers.cast(), num_svm_pointers)? }.to_owned();
2911     // if sizes is NULL, every allocation containing the pointers need to be migrated
2912     let mut sizes = if sizes.is_null() {
2913         vec![0; num_svm_pointers]
2914     } else {
2915         unsafe { cl_slice::from_raw_parts(sizes, num_svm_pointers)? }.to_owned()
2916     };
2917 
2918     // CL_INVALID_VALUE if sizes[i] is non-zero range [svm_pointers[i], svm_pointers[i]+sizes[i]) is
2919     // not contained within an existing clSVMAlloc allocation.
2920     for (ptr, size) in svm_pointers.iter_mut().zip(&mut sizes) {
2921         if let Some((alloc, alloc_size)) = q.context.find_svm_alloc(*ptr) {
2922             let ptr_addr = *ptr;
2923             let alloc_addr = alloc as usize;
2924 
2925             // if the offset + size is bigger than the allocation we are out of bounds
2926             if (ptr_addr - alloc_addr) + *size <= alloc_size {
2927                 // if the size is 0, the entire allocation should be migrated
2928                 if *size == 0 {
2929                     *ptr = alloc as usize;
2930                     *size = alloc_size;
2931                 }
2932                 continue;
2933             }
2934         }
2935 
2936         return Err(CL_INVALID_VALUE);
2937     }
2938 
2939     let to_device = !bit_check(flags, CL_MIGRATE_MEM_OBJECT_HOST);
2940     let content_undefined = bit_check(flags, CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED);
2941 
2942     create_and_queue(
2943         q,
2944         CL_COMMAND_SVM_MIGRATE_MEM,
2945         evs,
2946         event,
2947         false,
2948         Box::new(move |_, ctx| {
2949             ctx.svm_migrate(&svm_pointers, &sizes, to_device, content_undefined);
2950             Ok(())
2951         }),
2952     )
2953 }
2954 
2955 #[cl_entrypoint(clCreatePipe)]
create_pipe( _context: cl_context, _flags: cl_mem_flags, _pipe_packet_size: cl_uint, _pipe_max_packets: cl_uint, _properties: *const cl_pipe_properties, ) -> CLResult<cl_mem>2956 fn create_pipe(
2957     _context: cl_context,
2958     _flags: cl_mem_flags,
2959     _pipe_packet_size: cl_uint,
2960     _pipe_max_packets: cl_uint,
2961     _properties: *const cl_pipe_properties,
2962 ) -> CLResult<cl_mem> {
2963     Err(CL_INVALID_OPERATION)
2964 }
2965 
2966 #[cl_info_entrypoint(clGetGLTextureInfo)]
2967 impl CLInfo<cl_gl_texture_info> for cl_mem {
query(&self, q: cl_gl_texture_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>>2968     fn query(&self, q: cl_gl_texture_info, _: &[u8]) -> CLResult<Vec<MaybeUninit<u8>>> {
2969         let mem = MemBase::ref_from_raw(*self)?;
2970         Ok(match *q {
2971             CL_GL_MIPMAP_LEVEL => cl_prop::<cl_GLint>(0),
2972             CL_GL_TEXTURE_TARGET => cl_prop::<cl_GLenum>(
2973                 mem.gl_obj
2974                     .as_ref()
2975                     .ok_or(CL_INVALID_GL_OBJECT)?
2976                     .gl_object_target,
2977             ),
2978             _ => return Err(CL_INVALID_VALUE),
2979         })
2980     }
2981 }
2982 
create_from_gl( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>2983 fn create_from_gl(
2984     context: cl_context,
2985     flags: cl_mem_flags,
2986     target: cl_GLenum,
2987     miplevel: cl_GLint,
2988     texture: cl_GLuint,
2989 ) -> CLResult<cl_mem> {
2990     let c = Context::arc_from_raw(context)?;
2991     let gl_ctx_manager = &c.gl_ctx_manager;
2992 
2993     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
2994     if gl_ctx_manager.is_none() {
2995         return Err(CL_INVALID_CONTEXT);
2996     }
2997 
2998     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
2999     // texture_target is not one of the values specified in the description of texture_target.
3000     validate_mem_flags(flags, target == GL_ARRAY_BUFFER)?;
3001 
3002     // CL_INVALID_MIP_LEVEL if miplevel is greather than zero and the OpenGL
3003     // implementation does not support creating from non-zero mipmap levels.
3004     if miplevel > 0 {
3005         return Err(CL_INVALID_MIP_LEVEL);
3006     }
3007 
3008     // CL_INVALID_CONTEXT if context [..] was not created from a GL context.
3009     if let Some(gl_ctx_manager) = gl_ctx_manager {
3010         let gl_export_manager =
3011             gl_ctx_manager.export_object(&c, target, flags as u32, miplevel, texture)?;
3012 
3013         Ok(MemBase::from_gl(c, flags, &gl_export_manager)?)
3014     } else {
3015         Err(CL_INVALID_CONTEXT)
3016     }
3017 }
3018 
3019 #[cl_entrypoint(clCreateFromGLTexture)]
create_from_gl_texture( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3020 fn create_from_gl_texture(
3021     context: cl_context,
3022     flags: cl_mem_flags,
3023     target: cl_GLenum,
3024     miplevel: cl_GLint,
3025     texture: cl_GLuint,
3026 ) -> CLResult<cl_mem> {
3027     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3028     // texture_target is not one of the values specified in the description of texture_target.
3029     if !is_valid_gl_texture(target) {
3030         return Err(CL_INVALID_VALUE);
3031     }
3032 
3033     create_from_gl(context, flags, target, miplevel, texture)
3034 }
3035 
3036 #[cl_entrypoint(clCreateFromGLTexture2D)]
create_from_gl_texture_2d( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3037 fn create_from_gl_texture_2d(
3038     context: cl_context,
3039     flags: cl_mem_flags,
3040     target: cl_GLenum,
3041     miplevel: cl_GLint,
3042     texture: cl_GLuint,
3043 ) -> CLResult<cl_mem> {
3044     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3045     // texture_target is not one of the values specified in the description of texture_target.
3046     if !is_valid_gl_texture_2d(target) {
3047         return Err(CL_INVALID_VALUE);
3048     }
3049 
3050     create_from_gl(context, flags, target, miplevel, texture)
3051 }
3052 
3053 #[cl_entrypoint(clCreateFromGLTexture3D)]
create_from_gl_texture_3d( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3054 fn create_from_gl_texture_3d(
3055     context: cl_context,
3056     flags: cl_mem_flags,
3057     target: cl_GLenum,
3058     miplevel: cl_GLint,
3059     texture: cl_GLuint,
3060 ) -> CLResult<cl_mem> {
3061     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3062     // texture_target is not one of the values specified in the description of texture_target.
3063     if target != GL_TEXTURE_3D {
3064         return Err(CL_INVALID_VALUE);
3065     }
3066 
3067     create_from_gl(context, flags, target, miplevel, texture)
3068 }
3069 
3070 #[cl_entrypoint(clCreateFromGLBuffer)]
create_from_gl_buffer( context: cl_context, flags: cl_mem_flags, bufobj: cl_GLuint, ) -> CLResult<cl_mem>3071 fn create_from_gl_buffer(
3072     context: cl_context,
3073     flags: cl_mem_flags,
3074     bufobj: cl_GLuint,
3075 ) -> CLResult<cl_mem> {
3076     create_from_gl(context, flags, GL_ARRAY_BUFFER, 0, bufobj)
3077 }
3078 
3079 #[cl_entrypoint(clCreateFromGLRenderbuffer)]
create_from_gl_renderbuffer( context: cl_context, flags: cl_mem_flags, renderbuffer: cl_GLuint, ) -> CLResult<cl_mem>3080 fn create_from_gl_renderbuffer(
3081     context: cl_context,
3082     flags: cl_mem_flags,
3083     renderbuffer: cl_GLuint,
3084 ) -> CLResult<cl_mem> {
3085     create_from_gl(context, flags, GL_RENDERBUFFER, 0, renderbuffer)
3086 }
3087 
3088 #[cl_entrypoint(clGetGLObjectInfo)]
get_gl_object_info( memobj: cl_mem, gl_object_type: *mut cl_gl_object_type, gl_object_name: *mut cl_GLuint, ) -> CLResult<()>3089 fn get_gl_object_info(
3090     memobj: cl_mem,
3091     gl_object_type: *mut cl_gl_object_type,
3092     gl_object_name: *mut cl_GLuint,
3093 ) -> CLResult<()> {
3094     let m = MemBase::ref_from_raw(memobj)?;
3095 
3096     match &m.gl_obj {
3097         Some(gl_obj) => {
3098             gl_object_type.write_checked(gl_obj.gl_object_type);
3099             gl_object_name.write_checked(gl_obj.gl_object_name);
3100         }
3101         None => {
3102             // CL_INVALID_GL_OBJECT if there is no GL object associated with memobj.
3103             return Err(CL_INVALID_GL_OBJECT);
3104         }
3105     }
3106 
3107     Ok(())
3108 }
3109 
3110 #[cl_entrypoint(clEnqueueAcquireGLObjects)]
enqueue_acquire_gl_objects( command_queue: cl_command_queue, num_objects: cl_uint, mem_objects: *const cl_mem, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>3111 fn enqueue_acquire_gl_objects(
3112     command_queue: cl_command_queue,
3113     num_objects: cl_uint,
3114     mem_objects: *const cl_mem,
3115     num_events_in_wait_list: cl_uint,
3116     event_wait_list: *const cl_event,
3117     event: *mut cl_event,
3118 ) -> CLResult<()> {
3119     let q = Queue::arc_from_raw(command_queue)?;
3120     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
3121     let objs = MemBase::arcs_from_arr(mem_objects, num_objects)?;
3122     let gl_ctx_manager = &q.context.gl_ctx_manager;
3123 
3124     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
3125     if gl_ctx_manager.is_none() {
3126         return Err(CL_INVALID_CONTEXT);
3127     }
3128 
3129     // CL_INVALID_GL_OBJECT if memory objects in mem_objects have not been created from a GL object(s).
3130     if objs.iter().any(|o| o.gl_obj.is_none()) {
3131         return Err(CL_INVALID_GL_OBJECT);
3132     }
3133 
3134     create_and_queue(
3135         q,
3136         CL_COMMAND_ACQUIRE_GL_OBJECTS,
3137         evs,
3138         event,
3139         false,
3140         Box::new(move |q, ctx| copy_cube_to_slice(q, ctx, &objs)),
3141     )
3142 }
3143 
3144 #[cl_entrypoint(clEnqueueReleaseGLObjects)]
enqueue_release_gl_objects( command_queue: cl_command_queue, num_objects: cl_uint, mem_objects: *const cl_mem, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>3145 fn enqueue_release_gl_objects(
3146     command_queue: cl_command_queue,
3147     num_objects: cl_uint,
3148     mem_objects: *const cl_mem,
3149     num_events_in_wait_list: cl_uint,
3150     event_wait_list: *const cl_event,
3151     event: *mut cl_event,
3152 ) -> CLResult<()> {
3153     let q = Queue::arc_from_raw(command_queue)?;
3154     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
3155     let objs = MemBase::arcs_from_arr(mem_objects, num_objects)?;
3156     let gl_ctx_manager = &q.context.gl_ctx_manager;
3157 
3158     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
3159     if gl_ctx_manager.is_none() {
3160         return Err(CL_INVALID_CONTEXT);
3161     }
3162 
3163     // CL_INVALID_GL_OBJECT if memory objects in mem_objects have not been created from a GL object(s).
3164     if objs.iter().any(|o| o.gl_obj.is_none()) {
3165         return Err(CL_INVALID_GL_OBJECT);
3166     }
3167 
3168     create_and_queue(
3169         q,
3170         CL_COMMAND_RELEASE_GL_OBJECTS,
3171         evs,
3172         event,
3173         false,
3174         Box::new(move |q, ctx| copy_slice_to_cube(q, ctx, &objs)),
3175     )
3176 }
3177