1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) The Asahi Linux Contributors 4 * 5 * Based on asahi_drm.h which is 6 * 7 * Copyright © 2014-2018 Broadcom 8 * Copyright © 2019 Collabora ltd. 9 */ 10 #ifndef _ASAHI_DRM_H_ 11 #define _ASAHI_DRM_H_ 12 13 #include "drm-uapi/drm.h" 14 15 #if defined(__cplusplus) 16 extern "C" { 17 #endif 18 19 /* 20 * The UAPI defined in this file MUST NOT BE USED. End users, DO NOT attempt to 21 * use upstream Mesa with asahi kernels, it will blow up. Distro packagers, DO 22 * NOT patch upstream Mesa to do the same. 23 */ 24 #define DRM_ASAHI_UNSTABLE_UABI_VERSION (0xDEADBEEF) 25 26 #define DRM_ASAHI_GET_PARAMS 0x00 27 #define DRM_ASAHI_VM_CREATE 0x01 28 #define DRM_ASAHI_VM_DESTROY 0x02 29 #define DRM_ASAHI_GEM_CREATE 0x03 30 #define DRM_ASAHI_GEM_MMAP_OFFSET 0x04 31 #define DRM_ASAHI_GEM_BIND 0x05 32 #define DRM_ASAHI_QUEUE_CREATE 0x06 33 #define DRM_ASAHI_QUEUE_DESTROY 0x07 34 #define DRM_ASAHI_SUBMIT 0x08 35 #define DRM_ASAHI_GET_TIME 0x09 36 37 #define DRM_ASAHI_MAX_CLUSTERS 32 38 39 struct drm_asahi_params_global { 40 __u32 unstable_uabi_version; 41 __u32 pad0; 42 43 __u64 feat_compat; 44 __u64 feat_incompat; 45 46 __u32 gpu_generation; 47 __u32 gpu_variant; 48 __u32 gpu_revision; 49 __u32 chip_id; 50 51 __u32 num_dies; 52 __u32 num_clusters_total; 53 __u32 num_cores_per_cluster; 54 __u32 num_frags_per_cluster; 55 __u32 num_gps_per_cluster; 56 __u32 num_cores_total_active; 57 __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS]; 58 59 __u32 vm_page_size; 60 __u32 pad1; 61 __u64 vm_user_start; 62 __u64 vm_user_end; 63 __u64 vm_usc_start; 64 __u64 vm_usc_end; 65 __u64 vm_kernel_min_size; 66 67 __u32 max_syncs_per_submission; 68 __u32 max_commands_per_submission; 69 __u32 max_commands_in_flight; 70 __u32 max_attachments; 71 72 __u32 timer_frequency_hz; 73 __u32 min_frequency_khz; 74 __u32 max_frequency_khz; 75 __u32 max_power_mw; 76 77 __u32 result_render_size; 78 __u32 result_compute_size; 79 80 __u32 firmware_version[4]; 81 }; 82 83 enum drm_asahi_feat_compat { 84 DRM_ASAHI_FEAT_SOFT_FAULTS = (1UL) << 0, 85 }; 86 87 enum drm_asahi_feat_incompat { 88 DRM_ASAHI_FEAT_MANDATORY_ZS_COMPRESSION = (1UL) << 0, 89 }; 90 91 struct drm_asahi_get_params { 92 /** @extensions: Pointer to the first extension struct, if any */ 93 __u64 extensions; 94 95 /** @param: Parameter group to fetch (MBZ) */ 96 __u32 param_group; 97 98 /** @pad: MBZ */ 99 __u32 pad; 100 101 /** @value: User pointer to write parameter struct */ 102 __u64 pointer; 103 104 /** @value: Size of user buffer, max size supported on return */ 105 __u64 size; 106 }; 107 108 struct drm_asahi_vm_create { 109 /** @extensions: Pointer to the first extension struct, if any */ 110 __u64 extensions; 111 112 /** @kernel_start: Start of the kernel-reserved address range */ 113 __u64 kernel_start; 114 115 /** @kernel_end: End of the kernel-reserved address range */ 116 __u64 kernel_end; 117 118 /** @value: Returned VM ID */ 119 __u32 vm_id; 120 121 /** @pad: MBZ */ 122 __u32 pad; 123 }; 124 125 struct drm_asahi_vm_destroy { 126 /** @extensions: Pointer to the first extension struct, if any */ 127 __u64 extensions; 128 129 /** @value: VM ID to be destroyed */ 130 __u32 vm_id; 131 132 /** @pad: MBZ */ 133 __u32 pad; 134 }; 135 136 #define ASAHI_GEM_WRITEBACK (1L << 0) 137 #define ASAHI_GEM_VM_PRIVATE (1L << 1) 138 139 struct drm_asahi_gem_create { 140 /** @extensions: Pointer to the first extension struct, if any */ 141 __u64 extensions; 142 143 /** @size: Size of the BO */ 144 __u64 size; 145 146 /** @flags: BO creation flags */ 147 __u32 flags; 148 149 /** @handle: VM ID to assign to the BO, if ASAHI_GEM_VM_PRIVATE is set. */ 150 __u32 vm_id; 151 152 /** @handle: Returned GEM handle for the BO */ 153 __u32 handle; 154 }; 155 156 struct drm_asahi_gem_mmap_offset { 157 /** @extensions: Pointer to the first extension struct, if any */ 158 __u64 extensions; 159 160 /** @handle: Handle for the object being mapped. */ 161 __u32 handle; 162 163 /** @flags: Must be zero */ 164 __u32 flags; 165 166 /** @offset: The fake offset to use for subsequent mmap call */ 167 __u64 offset; 168 }; 169 170 enum drm_asahi_bind_op { 171 ASAHI_BIND_OP_BIND = 0, 172 ASAHI_BIND_OP_UNBIND = 1, 173 ASAHI_BIND_OP_UNBIND_ALL = 2, 174 }; 175 176 #define ASAHI_BIND_READ (1L << 0) 177 #define ASAHI_BIND_WRITE (1L << 1) 178 179 struct drm_asahi_gem_bind { 180 /** @extensions: Pointer to the first extension struct, if any */ 181 __u64 extensions; 182 183 /** @obj: Bind operation */ 184 __u32 op; 185 186 /** @flags: One or more of ASAHI_BIND_* */ 187 __u32 flags; 188 189 /** @obj: GEM object to bind */ 190 __u32 handle; 191 192 /** @vm_id: The ID of the VM to bind to */ 193 __u32 vm_id; 194 195 /** @offset: Offset into the object */ 196 __u64 offset; 197 198 /** @range: Number of bytes from the object to bind to addr */ 199 __u64 range; 200 201 /** @addr: Address to bind to */ 202 __u64 addr; 203 }; 204 205 enum drm_asahi_cmd_type { 206 DRM_ASAHI_CMD_RENDER = 0, 207 DRM_ASAHI_CMD_BLIT = 1, 208 DRM_ASAHI_CMD_COMPUTE = 2, 209 }; 210 211 /* Note: this is an enum so that it can be resolved by Rust bindgen. */ 212 enum drm_asahi_queue_cap { 213 DRM_ASAHI_QUEUE_CAP_RENDER = (1UL << DRM_ASAHI_CMD_RENDER), 214 DRM_ASAHI_QUEUE_CAP_BLIT = (1UL << DRM_ASAHI_CMD_BLIT), 215 DRM_ASAHI_QUEUE_CAP_COMPUTE = (1UL << DRM_ASAHI_CMD_COMPUTE), 216 }; 217 218 struct drm_asahi_queue_create { 219 /** @extensions: Pointer to the first extension struct, if any */ 220 __u64 extensions; 221 222 /** @flags: MBZ */ 223 __u32 flags; 224 225 /** @vm_id: The ID of the VM this queue is bound to */ 226 __u32 vm_id; 227 228 /** @type: Bitmask of DRM_ASAHI_QUEUE_CAP_* */ 229 __u32 queue_caps; 230 231 /** @priority: Queue priority, 0-3 */ 232 __u32 priority; 233 234 /** @queue_id: The returned queue ID */ 235 __u32 queue_id; 236 }; 237 238 struct drm_asahi_queue_destroy { 239 /** @extensions: Pointer to the first extension struct, if any */ 240 __u64 extensions; 241 242 /** @queue_id: The queue ID to be destroyed */ 243 __u32 queue_id; 244 }; 245 246 enum drm_asahi_sync_type { 247 DRM_ASAHI_SYNC_SYNCOBJ = 0, 248 DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1, 249 }; 250 251 struct drm_asahi_sync { 252 /** @extensions: Pointer to the first extension struct, if any */ 253 __u64 extensions; 254 255 /** @sync_type: One of drm_asahi_sync_type */ 256 __u32 sync_type; 257 258 /** @handle: The sync object handle */ 259 __u32 handle; 260 261 /** @timeline_value: Timeline value for timeline sync objects */ 262 __u64 timeline_value; 263 }; 264 265 enum drm_asahi_subqueue { 266 DRM_ASAHI_SUBQUEUE_RENDER = 0, /* Also blit */ 267 DRM_ASAHI_SUBQUEUE_COMPUTE = 1, 268 DRM_ASAHI_SUBQUEUE_COUNT = 2, 269 }; 270 271 #define DRM_ASAHI_BARRIER_NONE ~(0U) 272 273 struct drm_asahi_command { 274 /** @extensions: Pointer to the first extension struct, if any */ 275 __u64 extensions; 276 277 /** @type: One of drm_asahi_cmd_type */ 278 __u32 cmd_type; 279 280 /** @flags: Flags for command submission */ 281 __u32 flags; 282 283 /** @cmdbuf: Pointer to the appropriate command buffer structure */ 284 __u64 cmd_buffer; 285 286 /** @cmdbuf: Size of the command buffer structure */ 287 __u64 cmd_buffer_size; 288 289 /** @cmdbuf: Offset into the result BO to return information about this 290 * command */ 291 __u64 result_offset; 292 293 /** @cmdbuf: Size of the result data structure */ 294 __u64 result_size; 295 296 /** @barriers: Array of command indices per subqueue to wait on */ 297 __u32 barriers[DRM_ASAHI_SUBQUEUE_COUNT]; 298 }; 299 300 struct drm_asahi_submit { 301 /** @extensions: Pointer to the first extension struct, if any */ 302 __u64 extensions; 303 304 /** @in_syncs: An optional array of drm_asahi_sync to wait on before starting 305 * this job. */ 306 __u64 in_syncs; 307 308 /** @in_syncs: An optional array of drm_asahi_sync objects to signal upon 309 * completion. */ 310 __u64 out_syncs; 311 312 /** @commands: Pointer to the drm_asahi_command array of commands to submit. */ 313 __u64 commands; 314 315 /** @flags: Flags for command submission (MBZ) */ 316 __u32 flags; 317 318 /** @queue_id: The queue ID to be submitted to */ 319 __u32 queue_id; 320 321 /** @result_handle: An optional BO handle to place result data in */ 322 __u32 result_handle; 323 324 /** @in_sync_count: Number of sync objects to wait on before starting this 325 * job. */ 326 __u32 in_sync_count; 327 328 /** @in_sync_count: Number of sync objects to signal upon completion of this 329 * job. */ 330 __u32 out_sync_count; 331 332 /** @pad: Number of commands to be submitted */ 333 __u32 command_count; 334 }; 335 336 struct drm_asahi_attachment { 337 /** @pointer: Base address of the attachment */ 338 __u64 pointer; 339 /** @size: Size of the attachment in bytes */ 340 __u64 size; 341 /** @order: Power of 2 exponent related to attachment size (?) */ 342 __u32 order; 343 /** @flags: MBZ */ 344 __u32 flags; 345 }; 346 347 #define ASAHI_RENDER_NO_CLEAR_PIPELINE_TEXTURES (1UL << 0) 348 #define ASAHI_RENDER_SET_WHEN_RELOADING_Z_OR_S (1UL << 1) 349 #define ASAHI_RENDER_VERTEX_SPILLS (1UL << 2) 350 #define ASAHI_RENDER_PROCESS_EMPTY_TILES (1UL << 3) 351 #define ASAHI_RENDER_NO_VERTEX_CLUSTERING (1UL << 4) 352 #define ASAHI_RENDER_MSAA_ZS (1UL << 5) 353 /* XXX check */ 354 #define ASAHI_RENDER_NO_PREEMPTION (1UL << 6) 355 356 struct drm_asahi_cmd_render { 357 /** @extensions: Pointer to the first extension struct, if any */ 358 __u64 extensions; 359 360 __u64 flags; 361 362 __u64 encoder_ptr; 363 __u64 vertex_usc_base; 364 __u64 fragment_usc_base; 365 366 __u64 vertex_attachments; 367 __u64 fragment_attachments; 368 __u32 vertex_attachment_count; 369 __u32 fragment_attachment_count; 370 371 __u32 vertex_helper_program; 372 __u32 fragment_helper_program; 373 __u32 vertex_helper_cfg; 374 __u32 fragment_helper_cfg; 375 __u64 vertex_helper_arg; 376 __u64 fragment_helper_arg; 377 378 __u64 depth_buffer_load; 379 __u64 depth_buffer_load_stride; 380 __u64 depth_buffer_store; 381 __u64 depth_buffer_store_stride; 382 __u64 depth_buffer_partial; 383 __u64 depth_buffer_partial_stride; 384 __u64 depth_meta_buffer_load; 385 __u64 depth_meta_buffer_load_stride; 386 __u64 depth_meta_buffer_store; 387 __u64 depth_meta_buffer_store_stride; 388 __u64 depth_meta_buffer_partial; 389 __u64 depth_meta_buffer_partial_stride; 390 391 __u64 stencil_buffer_load; 392 __u64 stencil_buffer_load_stride; 393 __u64 stencil_buffer_store; 394 __u64 stencil_buffer_store_stride; 395 __u64 stencil_buffer_partial; 396 __u64 stencil_buffer_partial_stride; 397 __u64 stencil_meta_buffer_load; 398 __u64 stencil_meta_buffer_load_stride; 399 __u64 stencil_meta_buffer_store; 400 __u64 stencil_meta_buffer_store_stride; 401 __u64 stencil_meta_buffer_partial; 402 __u64 stencil_meta_buffer_partial_stride; 403 404 __u64 scissor_array; 405 __u64 depth_bias_array; 406 __u64 visibility_result_buffer; 407 408 __u64 vertex_sampler_array; 409 __u32 vertex_sampler_count; 410 __u32 vertex_sampler_max; 411 412 __u64 fragment_sampler_array; 413 __u32 fragment_sampler_count; 414 __u32 fragment_sampler_max; 415 416 __u64 zls_ctrl; 417 __u64 ppp_multisamplectl; 418 __u32 ppp_ctrl; 419 420 __u32 fb_width; 421 __u32 fb_height; 422 423 __u32 utile_width; 424 __u32 utile_height; 425 426 __u32 samples; 427 __u32 layers; 428 429 __u32 encoder_id; 430 __u32 cmd_ta_id; 431 __u32 cmd_3d_id; 432 433 __u32 sample_size; 434 __u32 tib_blocks; 435 __u32 iogpu_unk_214; 436 437 __u32 merge_upper_x; 438 __u32 merge_upper_y; 439 440 __u32 load_pipeline; 441 __u32 load_pipeline_bind; 442 443 __u32 store_pipeline; 444 __u32 store_pipeline_bind; 445 446 __u32 partial_reload_pipeline; 447 __u32 partial_reload_pipeline_bind; 448 449 __u32 partial_store_pipeline; 450 __u32 partial_store_pipeline_bind; 451 452 __u32 depth_dimensions; 453 __u32 isp_bgobjdepth; 454 __u32 isp_bgobjvals; 455 }; 456 457 #define ASAHI_RENDER_UNK_UNK1 (1UL << 0) 458 #define ASAHI_RENDER_UNK_SET_TILE_CONFIG (1UL << 1) 459 #define ASAHI_RENDER_UNK_SET_UTILE_CONFIG (1UL << 2) 460 #define ASAHI_RENDER_UNK_SET_AUX_FB_UNK (1UL << 3) 461 #define ASAHI_RENDER_UNK_SET_G14_UNK (1UL << 4) 462 463 #define ASAHI_RENDER_UNK_SET_FRG_UNK_140 (1UL << 20) 464 #define ASAHI_RENDER_UNK_SET_FRG_UNK_158 (1UL << 21) 465 #define ASAHI_RENDER_UNK_SET_FRG_TILECFG (1UL << 22) 466 #define ASAHI_RENDER_UNK_SET_LOAD_BGOBJVALS (1UL << 23) 467 #define ASAHI_RENDER_UNK_SET_FRG_UNK_38 (1UL << 24) 468 #define ASAHI_RENDER_UNK_SET_FRG_UNK_3C (1UL << 25) 469 470 #define ASAHI_RENDER_UNK_SET_RELOAD_ZLSCTRL (1UL << 27) 471 #define ASAHI_RENDER_UNK_SET_UNK_BUF_10 (1UL << 28) 472 #define ASAHI_RENDER_UNK_SET_FRG_UNK_MASK (1UL << 29) 473 474 #define ASAHI_RENDER_UNK_SET_IOGPU_UNK54 (1UL << 40) 475 #define ASAHI_RENDER_UNK_SET_IOGPU_UNK56 (1UL << 41) 476 #define ASAHI_RENDER_UNK_SET_TILING_CONTROL (1UL << 42) 477 #define ASAHI_RENDER_UNK_SET_TILING_CONTROL_2 (1UL << 43) 478 #define ASAHI_RENDER_UNK_SET_VTX_UNK_F0 (1UL << 44) 479 #define ASAHI_RENDER_UNK_SET_VTX_UNK_F8 (1UL << 45) 480 #define ASAHI_RENDER_UNK_SET_VTX_UNK_118 (1UL << 46) 481 #define ASAHI_RENDER_UNK_SET_VTX_UNK_MASK (1UL << 47) 482 483 #define ASAHI_RENDER_EXT_UNKNOWNS 0xff00 484 485 /* XXX: Do not upstream this struct */ 486 struct drm_asahi_cmd_render_unknowns { 487 /** @type: Type ID of this extension */ 488 __u32 type; 489 __u32 pad; 490 /** @next: Pointer to the next extension struct, if any */ 491 __u64 next; 492 493 __u64 flags; 494 495 __u64 tile_config; 496 __u64 utile_config; 497 498 __u64 aux_fb_unk; 499 __u64 g14_unk; 500 __u64 frg_unk_140; 501 __u64 frg_unk_158; 502 __u64 frg_tilecfg; 503 __u64 load_bgobjvals; 504 __u64 frg_unk_38; 505 __u64 frg_unk_3c; 506 __u64 reload_zlsctrl; 507 __u64 unk_buf_10; 508 __u64 frg_unk_mask; 509 510 __u64 iogpu_unk54; 511 __u64 iogpu_unk56; 512 __u64 tiling_control; 513 __u64 tiling_control_2; 514 __u64 vtx_unk_f0; 515 __u64 vtx_unk_f8; 516 __u64 vtx_unk_118; 517 __u64 vtx_unk_mask; 518 }; 519 520 /* XXX check */ 521 #define ASAHI_COMPUTE_NO_PREEMPTION (1UL << 0) 522 523 struct drm_asahi_cmd_compute { 524 __u64 flags; 525 526 __u64 encoder_ptr; 527 __u64 encoder_end; 528 __u64 usc_base; 529 530 __u64 attachments; 531 __u32 attachment_count; 532 __u32 pad; 533 534 __u32 helper_program; 535 __u32 helper_cfg; 536 __u64 helper_arg; 537 538 __u32 encoder_id; 539 __u32 cmd_id; 540 541 __u64 sampler_array; 542 __u32 sampler_count; 543 __u32 sampler_max; 544 545 __u32 iogpu_unk_40; 546 __u32 unk_mask; 547 }; 548 549 enum drm_asahi_status { 550 DRM_ASAHI_STATUS_PENDING = 0, 551 DRM_ASAHI_STATUS_COMPLETE, 552 DRM_ASAHI_STATUS_UNKNOWN_ERROR, 553 DRM_ASAHI_STATUS_TIMEOUT, 554 DRM_ASAHI_STATUS_FAULT, 555 DRM_ASAHI_STATUS_KILLED, 556 DRM_ASAHI_STATUS_NO_DEVICE, 557 }; 558 559 enum drm_asahi_fault { 560 DRM_ASAHI_FAULT_NONE = 0, 561 DRM_ASAHI_FAULT_UNKNOWN, 562 DRM_ASAHI_FAULT_UNMAPPED, 563 DRM_ASAHI_FAULT_AF_FAULT, 564 DRM_ASAHI_FAULT_WRITE_ONLY, 565 DRM_ASAHI_FAULT_READ_ONLY, 566 DRM_ASAHI_FAULT_NO_ACCESS, 567 }; 568 569 struct drm_asahi_result_info { 570 /** @status: One of enum drm_asahi_status */ 571 __u32 status; 572 573 /** @reason: One of drm_asahi_fault_type */ 574 __u32 fault_type; 575 576 /** @unit: Unit number, hardware dependent */ 577 __u32 unit; 578 579 /** @sideband: Sideband information, hardware dependent */ 580 __u32 sideband; 581 582 /** @level: Page table level at which the fault occurred, hardware dependent */ 583 __u8 level; 584 585 /** @read: Fault was a read */ 586 __u8 is_read; 587 588 /** @pad: MBZ */ 589 __u16 pad; 590 591 /** @unk_5: Extra bits, hardware dependent */ 592 __u32 extra; 593 594 /** @address: Fault address, cache line aligned */ 595 __u64 address; 596 }; 597 598 #define DRM_ASAHI_RESULT_RENDER_TVB_GROW_OVF (1UL << 0) 599 #define DRM_ASAHI_RESULT_RENDER_TVB_GROW_MIN (1UL << 1) 600 #define DRM_ASAHI_RESULT_RENDER_TVB_OVERFLOWED (1UL << 2) 601 602 struct drm_asahi_result_render { 603 /** @address: Common result information */ 604 struct drm_asahi_result_info info; 605 606 /** @flags: Zero or more of of DRM_ASAHI_RESULT_RENDER_* */ 607 __u64 flags; 608 609 /** @vertex_ts_start: Timestamp of the start of vertex processing */ 610 __u64 vertex_ts_start; 611 612 /** @vertex_ts_end: Timestamp of the end of vertex processing */ 613 __u64 vertex_ts_end; 614 615 /** @fragment_ts_start: Timestamp of the start of fragment processing */ 616 __u64 fragment_ts_start; 617 618 /** @fragment_ts_end: Timestamp of the end of fragment processing */ 619 __u64 fragment_ts_end; 620 621 /** @tvb_size_bytes: TVB size at the start of this render */ 622 __u64 tvb_size_bytes; 623 624 /** @tvb_usage_bytes: Total TVB usage in bytes for this render */ 625 __u64 tvb_usage_bytes; 626 627 /** @num_tvb_overflows: Number of TVB overflows that occurred for this render 628 */ 629 __u32 num_tvb_overflows; 630 }; 631 632 struct drm_asahi_result_compute { 633 /** @address: Common result information */ 634 struct drm_asahi_result_info info; 635 636 /** @flags: Zero or more of of DRM_ASAHI_RESULT_COMPUTE_* */ 637 __u64 flags; 638 639 /** @ts_start: Timestamp of the start of this compute command */ 640 __u64 ts_start; 641 642 /** @vertex_ts_end: Timestamp of the end of this compute command */ 643 __u64 ts_end; 644 }; 645 646 struct drm_asahi_get_time { 647 /** @extensions: Pointer to the first extension struct, if any */ 648 __u64 extensions; 649 650 /** @flags: MBZ. */ 651 __u64 flags; 652 653 /** @tv_sec: On return, seconds part of a point in time */ 654 __s64 tv_sec; 655 656 /** @tv_nsec: On return, nanoseconds part of a point in time */ 657 __s64 tv_nsec; 658 659 /** @gpu_timestamp: On return, the GPU timestamp at that point in time */ 660 __u64 gpu_timestamp; 661 }; 662 663 /* Note: this is an enum so that it can be resolved by Rust bindgen. */ 664 enum { 665 DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOWR( 666 DRM_COMMAND_BASE + DRM_ASAHI_GET_PARAMS, struct drm_asahi_get_params), 667 DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_VM_CREATE, 668 struct drm_asahi_vm_create), 669 DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_VM_DESTROY, 670 struct drm_asahi_vm_destroy), 671 DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOWR( 672 DRM_COMMAND_BASE + DRM_ASAHI_GEM_CREATE, struct drm_asahi_gem_create), 673 DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = 674 DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GEM_MMAP_OFFSET, 675 struct drm_asahi_gem_mmap_offset), 676 DRM_IOCTL_ASAHI_GEM_BIND = 677 DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_GEM_BIND, struct drm_asahi_gem_bind), 678 DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOWR( 679 DRM_COMMAND_BASE + DRM_ASAHI_QUEUE_CREATE, struct drm_asahi_queue_create), 680 DRM_IOCTL_ASAHI_QUEUE_DESTROY = 681 DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_QUEUE_DESTROY, 682 struct drm_asahi_queue_destroy), 683 DRM_IOCTL_ASAHI_SUBMIT = 684 DRM_IOW(DRM_COMMAND_BASE + DRM_ASAHI_SUBMIT, struct drm_asahi_submit), 685 DRM_IOCTL_ASAHI_GET_TIME = DRM_IOWR(DRM_COMMAND_BASE + DRM_ASAHI_GET_TIME, 686 struct drm_asahi_get_time), 687 }; 688 689 #if defined(__cplusplus) 690 } 691 #endif 692 693 #endif /* _ASAHI_DRM_H_ */ 694