1 use crate::api::icd::*;
2 use crate::api::types::*;
3 use crate::core::context::*;
4 use crate::core::device::*;
5 use crate::core::format::*;
6 use crate::core::memory::*;
7 use crate::core::queue::*;
8 use crate::core::util::*;
9
10 use libc_rust_gen::{close, dlsym};
11 use rusticl_opencl_gen::*;
12
13 use mesa_rust::pipe::context::*;
14 use mesa_rust::pipe::fence::*;
15 use mesa_rust::pipe::resource::*;
16 use mesa_rust::pipe::screen::*;
17
18 use std::collections::HashMap;
19 use std::ffi::CStr;
20 use std::ffi::CString;
21 use std::mem;
22 use std::os::raw::c_void;
23 use std::ptr;
24 use std::sync::Arc;
25
26 type CLGLMappings = Option<HashMap<Arc<PipeResource>, Arc<PipeResource>>>;
27
28 pub struct XPlatManager {
29 #[cfg(glx)]
30 glx_get_proc_addr: PFNGLXGETPROCADDRESSPROC,
31 egl_get_proc_addr: PFNEGLGETPROCADDRESSPROC,
32 }
33
34 impl Default for XPlatManager {
default() -> Self35 fn default() -> Self {
36 Self::new()
37 }
38 }
39
40 impl XPlatManager {
new() -> Self41 pub fn new() -> Self {
42 Self {
43 #[cfg(glx)]
44 glx_get_proc_addr: Self::get_proc_address_func("glXGetProcAddress"),
45 egl_get_proc_addr: Self::get_proc_address_func("eglGetProcAddress"),
46 }
47 }
48
get_proc_address_func<T>(name: &str) -> T49 fn get_proc_address_func<T>(name: &str) -> T {
50 let cname = CString::new(name).unwrap();
51 unsafe {
52 let pfn = dlsym(ptr::null_mut(), cname.as_ptr());
53 mem::transmute_copy(&pfn)
54 }
55 }
56
57 #[cfg(glx)]
get_func_glx(&self, cname: &CStr) -> CLResult<__GLXextFuncPtr>58 unsafe fn get_func_glx(&self, cname: &CStr) -> CLResult<__GLXextFuncPtr> {
59 unsafe {
60 Ok(self
61 .glx_get_proc_addr
62 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?(
63 cname.as_ptr().cast(),
64 ))
65 }
66 }
67
68 // in theory it should return CLResult<__GLXextFuncPtr> but luckily it's identical
69 #[cfg(not(glx))]
get_func_glx(&self, _: &CStr) -> CLResult<__eglMustCastToProperFunctionPointerType>70 unsafe fn get_func_glx(&self, _: &CStr) -> CLResult<__eglMustCastToProperFunctionPointerType> {
71 Err(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)
72 }
73
get_func<T>(&self, name: &str) -> CLResult<T>74 fn get_func<T>(&self, name: &str) -> CLResult<T> {
75 let cname = CString::new(name).unwrap();
76 unsafe {
77 let raw_func = if name.starts_with("glX") {
78 self.get_func_glx(&cname)?
79 } else if name.starts_with("egl") {
80 self.egl_get_proc_addr
81 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?(
82 cname.as_ptr().cast()
83 )
84 } else {
85 panic!();
86 };
87
88 Ok(mem::transmute_copy(&raw_func))
89 }
90 }
91
92 #[allow(non_snake_case)]
MesaGLInteropEGLQueryDeviceInfo( &self, ) -> CLResult<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC>93 pub fn MesaGLInteropEGLQueryDeviceInfo(
94 &self,
95 ) -> CLResult<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC> {
96 self.get_func::<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC>("eglGLInteropQueryDeviceInfoMESA")
97 }
98
99 #[allow(non_snake_case)]
MesaGLInteropGLXQueryDeviceInfo( &self, ) -> CLResult<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC>100 pub fn MesaGLInteropGLXQueryDeviceInfo(
101 &self,
102 ) -> CLResult<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC> {
103 self.get_func::<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC>("glXGLInteropQueryDeviceInfoMESA")
104 }
105
106 #[allow(non_snake_case)]
MesaGLInteropEGLExportObject(&self) -> CLResult<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC>107 pub fn MesaGLInteropEGLExportObject(&self) -> CLResult<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC> {
108 self.get_func::<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC>("eglGLInteropExportObjectMESA")
109 }
110
111 #[allow(non_snake_case)]
MesaGLInteropGLXExportObject(&self) -> CLResult<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC>112 pub fn MesaGLInteropGLXExportObject(&self) -> CLResult<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC> {
113 self.get_func::<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC>("glXGLInteropExportObjectMESA")
114 }
115
116 #[allow(non_snake_case)]
MesaGLInteropEGLFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC>117 pub fn MesaGLInteropEGLFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC> {
118 self.get_func::<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC>("eglGLInteropFlushObjectsMESA")
119 }
120
121 #[allow(non_snake_case)]
MesaGLInteropGLXFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC>122 pub fn MesaGLInteropGLXFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC> {
123 self.get_func::<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC>("glXGLInteropFlushObjectsMESA")
124 }
125 }
126
127 #[allow(clippy::upper_case_acronyms)]
128 #[derive(PartialEq, Eq)]
129 enum GLCtx {
130 EGL(EGLDisplay, EGLContext),
131 GLX(*mut _XDisplay, *mut __GLXcontextRec),
132 }
133
134 pub struct GLCtxManager {
135 pub interop_dev_info: mesa_glinterop_device_info,
136 pub xplat_manager: XPlatManager,
137 gl_ctx: GLCtx,
138 }
139
140 // SAFETY: We do have a few pointers inside [GLCtxManager], but nothing really relevant here:
141 // * pointers of the GLX/EGL context and _XDisplay/EGLDisplay, but we don't do much with them
142 // except calling into our mesa internal GL sharing extension, which properly locks data.
143 // * pointer to the _XDisplay/EGLDisplay
144 unsafe impl Send for GLCtxManager {}
145 unsafe impl Sync for GLCtxManager {}
146
147 impl GLCtxManager {
new( gl_context: *mut c_void, glx_display: *mut _XDisplay, egl_display: EGLDisplay, ) -> CLResult<Option<Self>>148 pub fn new(
149 gl_context: *mut c_void,
150 glx_display: *mut _XDisplay,
151 egl_display: EGLDisplay,
152 ) -> CLResult<Option<Self>> {
153 let mut info = mesa_glinterop_device_info {
154 version: 4,
155 ..Default::default()
156 };
157 let xplat_manager = XPlatManager::new();
158
159 // More than one of the attributes CL_CGL_SHAREGROUP_KHR, CL_EGL_DISPLAY_KHR,
160 // CL_GLX_DISPLAY_KHR, and CL_WGL_HDC_KHR is set to a non-default value.
161 if !egl_display.is_null() && !glx_display.is_null() {
162 return Err(CL_INVALID_OPERATION);
163 }
164
165 if gl_context.is_null() {
166 return Ok(None);
167 }
168
169 if !egl_display.is_null() {
170 let egl_query_device_info_func = xplat_manager
171 .MesaGLInteropEGLQueryDeviceInfo()?
172 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
173
174 let err = unsafe {
175 egl_query_device_info_func(egl_display.cast(), gl_context.cast(), &mut info)
176 };
177
178 if err != MESA_GLINTEROP_SUCCESS as i32 {
179 return Err(interop_to_cl_error(err));
180 }
181
182 Ok(Some(GLCtxManager {
183 gl_ctx: GLCtx::EGL(egl_display.cast(), gl_context),
184 interop_dev_info: info,
185 xplat_manager: xplat_manager,
186 }))
187 } else if !glx_display.is_null() && cfg!(glx) {
188 let glx_query_device_info_func = xplat_manager
189 .MesaGLInteropGLXQueryDeviceInfo()?
190 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
191
192 let err = unsafe {
193 glx_query_device_info_func(glx_display.cast(), gl_context.cast(), &mut info)
194 };
195
196 if err != MESA_GLINTEROP_SUCCESS as i32 {
197 return Err(interop_to_cl_error(err));
198 }
199
200 Ok(Some(GLCtxManager {
201 gl_ctx: GLCtx::GLX(glx_display.cast(), gl_context.cast()),
202 interop_dev_info: info,
203 xplat_manager: xplat_manager,
204 }))
205 } else {
206 Err(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)
207 }
208 }
209
export_object( &self, cl_ctx: &Arc<Context>, target: cl_GLenum, flags: u32, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<GLExportManager>210 pub fn export_object(
211 &self,
212 cl_ctx: &Arc<Context>,
213 target: cl_GLenum,
214 flags: u32,
215 miplevel: cl_GLint,
216 texture: cl_GLuint,
217 ) -> CLResult<GLExportManager> {
218 let xplat_manager = &self.xplat_manager;
219 let mut export_in = mesa_glinterop_export_in {
220 version: 2,
221 target: target,
222 obj: texture,
223 miplevel: miplevel as u32,
224 access: cl_to_interop_flags(flags),
225 ..Default::default()
226 };
227
228 let mut export_out = mesa_glinterop_export_out {
229 version: 2,
230 ..Default::default()
231 };
232
233 let mut fd = -1;
234
235 let mut flush_out = mesa_glinterop_flush_out {
236 version: 1,
237 fence_fd: &mut fd,
238 ..Default::default()
239 };
240
241 let err = unsafe {
242 match &self.gl_ctx {
243 GLCtx::EGL(disp, ctx) => {
244 let egl_export_object_func = xplat_manager
245 .MesaGLInteropEGLExportObject()?
246 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
247
248 let egl_flush_objects_func = xplat_manager
249 .MesaGLInteropEGLFlushObjects()?
250 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
251
252 let err_flush = egl_flush_objects_func(
253 disp.cast(),
254 ctx.cast(),
255 1,
256 &mut export_in,
257 &mut flush_out,
258 );
259 // TODO: use fence_server_sync in ctx inside the queue thread
260 let fence_fd = FenceFd { fd };
261 cl_ctx.devs.iter().for_each(|dev| {
262 let fence = dev.helper_ctx().import_fence(&fence_fd);
263 fence.wait();
264 });
265
266 if err_flush != 0 {
267 err_flush
268 } else {
269 egl_export_object_func(
270 disp.cast(),
271 ctx.cast(),
272 &mut export_in,
273 &mut export_out,
274 )
275 }
276 }
277 GLCtx::GLX(disp, ctx) => {
278 let glx_export_object_func = xplat_manager
279 .MesaGLInteropGLXExportObject()?
280 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
281
282 let glx_flush_objects_func = xplat_manager
283 .MesaGLInteropGLXFlushObjects()?
284 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
285
286 let err_flush = glx_flush_objects_func(
287 disp.cast(),
288 ctx.cast(),
289 1,
290 &mut export_in,
291 &mut flush_out,
292 );
293 // TODO: use fence_server_sync in ctx inside the queue thread
294 let fence_fd = FenceFd { fd };
295 cl_ctx.devs.iter().for_each(|dev| {
296 let fence = dev.helper_ctx().import_fence(&fence_fd);
297 fence.wait();
298 });
299
300 if err_flush != 0 {
301 err_flush
302 } else {
303 glx_export_object_func(
304 disp.cast(),
305 ctx.cast(),
306 &mut export_in,
307 &mut export_out,
308 )
309 }
310 }
311 }
312 };
313
314 if err != MESA_GLINTEROP_SUCCESS as i32 {
315 return Err(interop_to_cl_error(err));
316 }
317
318 // CL_INVALID_GL_OBJECT if bufobj is not a GL buffer object or is a GL buffer
319 // object but does not have an existing data store or the size of the buffer is 0.
320 if [GL_ARRAY_BUFFER, GL_TEXTURE_BUFFER].contains(&target) && export_out.buf_size == 0 {
321 return Err(CL_INVALID_GL_OBJECT);
322 }
323
324 Ok(GLExportManager {
325 export_in: export_in,
326 export_out: export_out,
327 })
328 }
329 }
330
331 #[derive(Clone)]
332 pub struct GLMemProps {
333 pub height: u16,
334 pub depth: u16,
335 pub width: u32,
336 pub offset: u32,
337 pub array_size: u16,
338 pub pixel_size: u8,
339 pub stride: u32,
340 }
341
342 impl GLMemProps {
size(&self) -> usize343 pub fn size(&self) -> usize {
344 self.height as usize
345 * self.depth as usize
346 * self.array_size as usize
347 * self.width as usize
348 * self.pixel_size as usize
349 }
350 }
351
352 pub struct GLExportManager {
353 pub export_in: mesa_glinterop_export_in,
354 pub export_out: mesa_glinterop_export_out,
355 }
356
357 impl GLExportManager {
get_gl_mem_props(&self) -> CLResult<GLMemProps>358 pub fn get_gl_mem_props(&self) -> CLResult<GLMemProps> {
359 let pixel_size = if self.is_gl_buffer() {
360 1
361 } else {
362 format_from_gl(self.export_out.internal_format)
363 .ok_or(CL_OUT_OF_HOST_MEMORY)?
364 .pixel_size()
365 .unwrap()
366 };
367
368 let mut height = self.export_out.height as u16;
369 let mut depth = self.export_out.depth as u16;
370 let mut width = self.export_out.width;
371 let mut array_size = 1;
372 let mut offset = 0;
373
374 // some fixups
375 match self.export_in.target {
376 GL_TEXTURE_1D_ARRAY => {
377 array_size = height;
378 height = 1;
379 depth = 1;
380 }
381 GL_TEXTURE_2D_ARRAY => {
382 array_size = depth;
383 depth = 1;
384 }
385 GL_ARRAY_BUFFER | GL_TEXTURE_BUFFER => {
386 array_size = 1;
387 width = self.export_out.buf_size as u32;
388 offset = self.export_out.buf_offset as u32;
389 height = 1;
390 depth = 1;
391 }
392 _ => {}
393 }
394 if is_cube_map_face(self.export_in.target) {
395 array_size = 6;
396 }
397
398 Ok(GLMemProps {
399 height: height,
400 depth: depth,
401 width: width,
402 offset: offset,
403 array_size: array_size,
404 pixel_size: pixel_size,
405 stride: self.export_out.stride,
406 })
407 }
408
is_gl_buffer(&self) -> bool409 pub fn is_gl_buffer(&self) -> bool {
410 self.export_out.internal_format == GL_NONE
411 }
412 }
413
414 impl Drop for GLExportManager {
drop(&mut self)415 fn drop(&mut self) {
416 unsafe {
417 close(self.export_out.dmabuf_fd);
418 }
419 }
420 }
421
422 pub struct GLObject {
423 pub gl_object_target: cl_GLenum,
424 pub gl_object_type: cl_gl_object_type,
425 pub gl_object_name: cl_GLuint,
426 pub shadow_map: CLGLMappings,
427 }
428
create_shadow_slice( cube_map: &HashMap<&'static Device, Arc<PipeResource>>, image_format: cl_image_format, ) -> CLResult<HashMap<&'static Device, Arc<PipeResource>>>429 pub fn create_shadow_slice(
430 cube_map: &HashMap<&'static Device, Arc<PipeResource>>,
431 image_format: cl_image_format,
432 ) -> CLResult<HashMap<&'static Device, Arc<PipeResource>>> {
433 let mut slice = HashMap::new();
434
435 for (dev, imported_gl_res) in cube_map {
436 let width = imported_gl_res.width();
437 let height = imported_gl_res.height();
438
439 let shadow = dev
440 .screen()
441 .resource_create_texture(
442 width,
443 height,
444 1,
445 1,
446 cl_mem_type_to_texture_target(CL_MEM_OBJECT_IMAGE2D),
447 image_format.to_pipe_format().unwrap(),
448 ResourceType::Normal,
449 false,
450 )
451 .ok_or(CL_OUT_OF_HOST_MEMORY)?;
452
453 slice.insert(*dev, Arc::new(shadow));
454 }
455
456 Ok(slice)
457 }
458
copy_cube_to_slice(q: &Arc<Queue>, ctx: &PipeContext, mem_objects: &[Mem]) -> CLResult<()>459 pub fn copy_cube_to_slice(q: &Arc<Queue>, ctx: &PipeContext, mem_objects: &[Mem]) -> CLResult<()> {
460 for mem in mem_objects {
461 let Mem::Image(image) = mem else {
462 continue;
463 };
464 let gl_obj = image.gl_obj.as_ref().unwrap();
465 if !is_cube_map_face(gl_obj.gl_object_target) {
466 continue;
467 }
468 let width = image.image_desc.image_width;
469 let height = image.image_desc.image_height;
470
471 // Fill in values for doing the copy
472 let idx = get_array_slice_idx(gl_obj.gl_object_target);
473 let src_origin = CLVec::<usize>::new([0, 0, idx]);
474 let dst_offset: [u32; 3] = [0, 0, 0];
475 let region = CLVec::<usize>::new([width, height, 1]);
476 let src_bx = create_pipe_box(src_origin, region, CL_MEM_OBJECT_IMAGE2D_ARRAY)?;
477
478 let cl_res = image.get_res_of_dev(q.device)?;
479 let gl_res = gl_obj.shadow_map.as_ref().unwrap().get(cl_res).unwrap();
480
481 ctx.resource_copy_region(gl_res.as_ref(), cl_res.as_ref(), &dst_offset, &src_bx);
482 }
483
484 Ok(())
485 }
486
copy_slice_to_cube(q: &Arc<Queue>, ctx: &PipeContext, mem_objects: &[Mem]) -> CLResult<()>487 pub fn copy_slice_to_cube(q: &Arc<Queue>, ctx: &PipeContext, mem_objects: &[Mem]) -> CLResult<()> {
488 for mem in mem_objects {
489 let Mem::Image(image) = mem else {
490 continue;
491 };
492 let gl_obj = image.gl_obj.as_ref().unwrap();
493 if !is_cube_map_face(gl_obj.gl_object_target) {
494 continue;
495 }
496 let width = image.image_desc.image_width;
497 let height = image.image_desc.image_height;
498
499 // Fill in values for doing the copy
500 let idx = get_array_slice_idx(gl_obj.gl_object_target) as u32;
501 let src_origin = CLVec::<usize>::new([0, 0, 0]);
502 let dst_offset: [u32; 3] = [0, 0, idx];
503 let region = CLVec::<usize>::new([width, height, 1]);
504 let src_bx = create_pipe_box(src_origin, region, CL_MEM_OBJECT_IMAGE2D_ARRAY)?;
505
506 let cl_res = image.get_res_of_dev(q.device)?;
507 let gl_res = gl_obj.shadow_map.as_ref().unwrap().get(cl_res).unwrap();
508
509 ctx.resource_copy_region(cl_res.as_ref(), gl_res.as_ref(), &dst_offset, &src_bx);
510 }
511
512 Ok(())
513 }
514
interop_to_cl_error(error: i32) -> CLError515 pub fn interop_to_cl_error(error: i32) -> CLError {
516 match error.try_into().unwrap() {
517 MESA_GLINTEROP_OUT_OF_RESOURCES => CL_OUT_OF_RESOURCES,
518 MESA_GLINTEROP_OUT_OF_HOST_MEMORY => CL_OUT_OF_HOST_MEMORY,
519 MESA_GLINTEROP_INVALID_OPERATION => CL_INVALID_OPERATION,
520 MESA_GLINTEROP_INVALID_CONTEXT | MESA_GLINTEROP_INVALID_DISPLAY => {
521 CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR
522 }
523 MESA_GLINTEROP_INVALID_TARGET | MESA_GLINTEROP_INVALID_OBJECT => CL_INVALID_GL_OBJECT,
524 MESA_GLINTEROP_INVALID_MIP_LEVEL => CL_INVALID_MIP_LEVEL,
525 _ => CL_OUT_OF_HOST_MEMORY,
526 }
527 }
528
cl_to_interop_flags(flags: u32) -> u32529 pub fn cl_to_interop_flags(flags: u32) -> u32 {
530 match flags {
531 CL_MEM_READ_WRITE => MESA_GLINTEROP_ACCESS_READ_WRITE,
532 CL_MEM_READ_ONLY => MESA_GLINTEROP_ACCESS_READ_ONLY,
533 CL_MEM_WRITE_ONLY => MESA_GLINTEROP_ACCESS_WRITE_ONLY,
534 _ => 0,
535 }
536 }
537
target_from_gl(target: u32) -> CLResult<(u32, u32)>538 pub fn target_from_gl(target: u32) -> CLResult<(u32, u32)> {
539 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if the OpenGL texture
540 // internal format does not map to a supported OpenCL image format.
541 Ok(match target {
542 GL_ARRAY_BUFFER => (CL_MEM_OBJECT_BUFFER, CL_GL_OBJECT_BUFFER),
543 GL_TEXTURE_BUFFER => (CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_GL_OBJECT_TEXTURE_BUFFER),
544 GL_RENDERBUFFER => (CL_MEM_OBJECT_IMAGE2D, CL_GL_OBJECT_RENDERBUFFER),
545 GL_TEXTURE_1D => (CL_MEM_OBJECT_IMAGE1D, CL_GL_OBJECT_TEXTURE1D),
546 GL_TEXTURE_1D_ARRAY => (CL_MEM_OBJECT_IMAGE1D_ARRAY, CL_GL_OBJECT_TEXTURE1D_ARRAY),
547 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
548 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
549 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
550 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
551 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
552 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
553 | GL_TEXTURE_2D
554 | GL_TEXTURE_RECTANGLE => (CL_MEM_OBJECT_IMAGE2D, CL_GL_OBJECT_TEXTURE2D),
555 GL_TEXTURE_2D_ARRAY => (CL_MEM_OBJECT_IMAGE2D_ARRAY, CL_GL_OBJECT_TEXTURE2D_ARRAY),
556 GL_TEXTURE_3D => (CL_MEM_OBJECT_IMAGE3D, CL_GL_OBJECT_TEXTURE3D),
557 _ => return Err(CL_INVALID_VALUE),
558 })
559 }
560
is_valid_gl_texture(target: u32) -> bool561 pub fn is_valid_gl_texture(target: u32) -> bool {
562 matches!(
563 target,
564 GL_TEXTURE_1D
565 | GL_TEXTURE_1D_ARRAY
566 | GL_TEXTURE_BUFFER
567 | GL_TEXTURE_2D_ARRAY
568 | GL_TEXTURE_3D
569 ) || is_valid_gl_texture_2d(target)
570 }
571
is_valid_gl_texture_2d(target: u32) -> bool572 pub fn is_valid_gl_texture_2d(target: u32) -> bool {
573 matches!(
574 target,
575 GL_TEXTURE_2D
576 | GL_TEXTURE_RECTANGLE
577 | GL_TEXTURE_CUBE_MAP_NEGATIVE_X
578 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
579 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
580 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
581 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
582 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
583 )
584 }
585
get_array_slice_idx(target: u32) -> usize586 pub fn get_array_slice_idx(target: u32) -> usize {
587 match target {
588 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
589 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
590 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
591 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
592 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
593 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z => (target - GL_TEXTURE_CUBE_MAP_POSITIVE_X) as usize,
594 _ => 0,
595 }
596 }
597
is_cube_map_face(target: u32) -> bool598 pub fn is_cube_map_face(target: u32) -> bool {
599 matches!(
600 target,
601 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
602 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
603 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
604 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
605 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
606 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
607 )
608 }
609