xref: /aosp_15_r20/external/libdrm/include/drm/drm.h (revision 7688df22e49036ff52a766b7101da3a49edadb8c)
1 /*
2  * Header for the Direct Rendering Manager
3  *
4  * Author: Rickard E. (Rik) Faith <[email protected]>
5  *
6  * Acknowledgments:
7  * Dec 1999, Richard Henderson <[email protected]>, move to generic cmpxchg.
8  */
9 
10 /*
11  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13  * All rights reserved.
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a
16  * copy of this software and associated documentation files (the "Software"),
17  * to deal in the Software without restriction, including without limitation
18  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19  * and/or sell copies of the Software, and to permit persons to whom the
20  * Software is furnished to do so, subject to the following conditions:
21  *
22  * The above copyright notice and this permission notice (including the next
23  * paragraph) shall be included in all copies or substantial portions of the
24  * Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32  * OTHER DEALINGS IN THE SOFTWARE.
33  */
34 
35 #ifndef _DRM_H_
36 #define _DRM_H_
37 
38 #if   defined(__linux__)
39 
40 #include <linux/types.h>
41 #include <asm/ioctl.h>
42 typedef unsigned int drm_handle_t;
43 
44 #else /* One of the BSDs */
45 
46 #include <stdint.h>
47 #include <sys/ioccom.h>
48 #include <sys/types.h>
49 typedef int8_t   __s8;
50 typedef uint8_t  __u8;
51 typedef int16_t  __s16;
52 typedef uint16_t __u16;
53 typedef int32_t  __s32;
54 typedef uint32_t __u32;
55 typedef int64_t  __s64;
56 typedef uint64_t __u64;
57 typedef size_t   __kernel_size_t;
58 typedef unsigned long drm_handle_t;
59 
60 #endif
61 
62 #if defined(__cplusplus)
63 extern "C" {
64 #endif
65 
66 #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
67 #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
68 #define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
69 #define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
70 
71 #define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
72 #define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
73 #define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
74 #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
75 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
76 
77 typedef unsigned int drm_context_t;
78 typedef unsigned int drm_drawable_t;
79 typedef unsigned int drm_magic_t;
80 
81 /*
82  * Cliprect.
83  *
84  * \warning: If you change this structure, make sure you change
85  * XF86DRIClipRectRec in the server as well
86  *
87  * \note KW: Actually it's illegal to change either for
88  * backwards-compatibility reasons.
89  */
90 struct drm_clip_rect {
91 	unsigned short x1;
92 	unsigned short y1;
93 	unsigned short x2;
94 	unsigned short y2;
95 };
96 
97 /*
98  * Drawable information.
99  */
100 struct drm_drawable_info {
101 	unsigned int num_rects;
102 	struct drm_clip_rect *rects;
103 };
104 
105 /*
106  * Texture region,
107  */
108 struct drm_tex_region {
109 	unsigned char next;
110 	unsigned char prev;
111 	unsigned char in_use;
112 	unsigned char padding;
113 	unsigned int age;
114 };
115 
116 /*
117  * Hardware lock.
118  *
119  * The lock structure is a simple cache-line aligned integer.  To avoid
120  * processor bus contention on a multiprocessor system, there should not be any
121  * other data stored in the same cache line.
122  */
123 struct drm_hw_lock {
124 	__volatile__ unsigned int lock;		/**< lock variable */
125 	char padding[60];			/**< Pad to cache line */
126 };
127 
128 /*
129  * DRM_IOCTL_VERSION ioctl argument type.
130  *
131  * \sa drmGetVersion().
132  */
133 struct drm_version {
134 	int version_major;	  /**< Major version */
135 	int version_minor;	  /**< Minor version */
136 	int version_patchlevel;	  /**< Patch level */
137 	__kernel_size_t name_len;	  /**< Length of name buffer */
138 	char *name;	  /**< Name of driver */
139 	__kernel_size_t date_len;	  /**< Length of date buffer */
140 	char *date;	  /**< User-space buffer to hold date */
141 	__kernel_size_t desc_len;	  /**< Length of desc buffer */
142 	char *desc;	  /**< User-space buffer to hold desc */
143 };
144 
145 /*
146  * DRM_IOCTL_GET_UNIQUE ioctl argument type.
147  *
148  * \sa drmGetBusid() and drmSetBusId().
149  */
150 struct drm_unique {
151 	__kernel_size_t unique_len;	  /**< Length of unique */
152 	char *unique;	  /**< Unique name for driver instantiation */
153 };
154 
155 struct drm_list {
156 	int count;		  /**< Length of user-space structures */
157 	struct drm_version *version;
158 };
159 
160 struct drm_block {
161 	int unused;
162 };
163 
164 /*
165  * DRM_IOCTL_CONTROL ioctl argument type.
166  *
167  * \sa drmCtlInstHandler() and drmCtlUninstHandler().
168  */
169 struct drm_control {
170 	enum {
171 		DRM_ADD_COMMAND,
172 		DRM_RM_COMMAND,
173 		DRM_INST_HANDLER,
174 		DRM_UNINST_HANDLER
175 	} func;
176 	int irq;
177 };
178 
179 /*
180  * Type of memory to map.
181  */
182 enum drm_map_type {
183 	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
184 	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
185 	_DRM_SHM = 2,		  /**< shared, cached */
186 	_DRM_AGP = 3,		  /**< AGP/GART */
187 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
188 	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
189 };
190 
191 /*
192  * Memory mapping flags.
193  */
194 enum drm_map_flags {
195 	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
196 	_DRM_READ_ONLY = 0x02,
197 	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
198 	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
199 	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
200 	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
201 	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
202 	_DRM_DRIVER = 0x80	     /**< Managed by driver */
203 };
204 
205 struct drm_ctx_priv_map {
206 	unsigned int ctx_id;	 /**< Context requesting private mapping */
207 	void *handle;		 /**< Handle of map */
208 };
209 
210 /*
211  * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
212  * argument type.
213  *
214  * \sa drmAddMap().
215  */
216 struct drm_map {
217 	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
218 	unsigned long size;	 /**< Requested physical size (bytes) */
219 	enum drm_map_type type;	 /**< Type of memory to map */
220 	enum drm_map_flags flags;	 /**< Flags */
221 	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
222 				 /**< Kernel-space: kernel-virtual address */
223 	int mtrr;		 /**< MTRR slot used */
224 	/*   Private data */
225 };
226 
227 /*
228  * DRM_IOCTL_GET_CLIENT ioctl argument type.
229  */
230 struct drm_client {
231 	int idx;		/**< Which client desired? */
232 	int auth;		/**< Is client authenticated? */
233 	unsigned long pid;	/**< Process ID */
234 	unsigned long uid;	/**< User ID */
235 	unsigned long magic;	/**< Magic */
236 	unsigned long iocs;	/**< Ioctl count */
237 };
238 
239 enum drm_stat_type {
240 	_DRM_STAT_LOCK,
241 	_DRM_STAT_OPENS,
242 	_DRM_STAT_CLOSES,
243 	_DRM_STAT_IOCTLS,
244 	_DRM_STAT_LOCKS,
245 	_DRM_STAT_UNLOCKS,
246 	_DRM_STAT_VALUE,	/**< Generic value */
247 	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
248 	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
249 
250 	_DRM_STAT_IRQ,		/**< IRQ */
251 	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
252 	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
253 	_DRM_STAT_DMA,		/**< DMA */
254 	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
255 	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
256 	    /* Add to the *END* of the list */
257 };
258 
259 /*
260  * DRM_IOCTL_GET_STATS ioctl argument type.
261  */
262 struct drm_stats {
263 	unsigned long count;
264 	struct {
265 		unsigned long value;
266 		enum drm_stat_type type;
267 	} data[15];
268 };
269 
270 /*
271  * Hardware locking flags.
272  */
273 enum drm_lock_flags {
274 	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
275 	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
276 	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
277 	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
278 	/* These *HALT* flags aren't supported yet
279 	   -- they will be used to support the
280 	   full-screen DGA-like mode. */
281 	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
282 	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
283 };
284 
285 /*
286  * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
287  *
288  * \sa drmGetLock() and drmUnlock().
289  */
290 struct drm_lock {
291 	int context;
292 	enum drm_lock_flags flags;
293 };
294 
295 /*
296  * DMA flags
297  *
298  * \warning
299  * These values \e must match xf86drm.h.
300  *
301  * \sa drm_dma.
302  */
303 enum drm_dma_flags {
304 	/* Flags for DMA buffer dispatch */
305 	_DRM_DMA_BLOCK = 0x01,	      /**<
306 				       * Block until buffer dispatched.
307 				       *
308 				       * \note The buffer may not yet have
309 				       * been processed by the hardware --
310 				       * getting a hardware lock with the
311 				       * hardware quiescent will ensure
312 				       * that the buffer has been
313 				       * processed.
314 				       */
315 	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
316 	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
317 
318 	/* Flags for DMA buffer request */
319 	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
320 	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
321 	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
322 };
323 
324 /*
325  * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
326  *
327  * \sa drmAddBufs().
328  */
329 struct drm_buf_desc {
330 	int count;		 /**< Number of buffers of this size */
331 	int size;		 /**< Size in bytes */
332 	int low_mark;		 /**< Low water mark */
333 	int high_mark;		 /**< High water mark */
334 	enum {
335 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
336 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
337 		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
338 		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
339 		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
340 	} flags;
341 	unsigned long agp_start; /**<
342 				  * Start address of where the AGP buffers are
343 				  * in the AGP aperture
344 				  */
345 };
346 
347 /*
348  * DRM_IOCTL_INFO_BUFS ioctl argument type.
349  */
350 struct drm_buf_info {
351 	int count;		/**< Entries in list */
352 	struct drm_buf_desc *list;
353 };
354 
355 /*
356  * DRM_IOCTL_FREE_BUFS ioctl argument type.
357  */
358 struct drm_buf_free {
359 	int count;
360 	int *list;
361 };
362 
363 /*
364  * Buffer information
365  *
366  * \sa drm_buf_map.
367  */
368 struct drm_buf_pub {
369 	int idx;		       /**< Index into the master buffer list */
370 	int total;		       /**< Buffer size */
371 	int used;		       /**< Amount of buffer in use (for DMA) */
372 	void *address;	       /**< Address of buffer */
373 };
374 
375 /*
376  * DRM_IOCTL_MAP_BUFS ioctl argument type.
377  */
378 struct drm_buf_map {
379 	int count;		/**< Length of the buffer list */
380 #ifdef __cplusplus
381 	void *virt;
382 #else
383 	void *virtual;		/**< Mmap'd area in user-virtual */
384 #endif
385 	struct drm_buf_pub *list;	/**< Buffer information */
386 };
387 
388 /*
389  * DRM_IOCTL_DMA ioctl argument type.
390  *
391  * Indices here refer to the offset into the buffer list in drm_buf_get.
392  *
393  * \sa drmDMA().
394  */
395 struct drm_dma {
396 	int context;			  /**< Context handle */
397 	int send_count;			  /**< Number of buffers to send */
398 	int *send_indices;	  /**< List of handles to buffers */
399 	int *send_sizes;		  /**< Lengths of data to send */
400 	enum drm_dma_flags flags;	  /**< Flags */
401 	int request_count;		  /**< Number of buffers requested */
402 	int request_size;		  /**< Desired size for buffers */
403 	int *request_indices;	  /**< Buffer information */
404 	int *request_sizes;
405 	int granted_count;		  /**< Number of buffers granted */
406 };
407 
408 enum drm_ctx_flags {
409 	_DRM_CONTEXT_PRESERVED = 0x01,
410 	_DRM_CONTEXT_2DONLY = 0x02
411 };
412 
413 /*
414  * DRM_IOCTL_ADD_CTX ioctl argument type.
415  *
416  * \sa drmCreateContext() and drmDestroyContext().
417  */
418 struct drm_ctx {
419 	drm_context_t handle;
420 	enum drm_ctx_flags flags;
421 };
422 
423 /*
424  * DRM_IOCTL_RES_CTX ioctl argument type.
425  */
426 struct drm_ctx_res {
427 	int count;
428 	struct drm_ctx *contexts;
429 };
430 
431 /*
432  * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
433  */
434 struct drm_draw {
435 	drm_drawable_t handle;
436 };
437 
438 /*
439  * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
440  */
441 typedef enum {
442 	DRM_DRAWABLE_CLIPRECTS
443 } drm_drawable_info_type_t;
444 
445 struct drm_update_draw {
446 	drm_drawable_t handle;
447 	unsigned int type;
448 	unsigned int num;
449 	unsigned long long data;
450 };
451 
452 /*
453  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
454  */
455 struct drm_auth {
456 	drm_magic_t magic;
457 };
458 
459 /*
460  * DRM_IOCTL_IRQ_BUSID ioctl argument type.
461  *
462  * \sa drmGetInterruptFromBusID().
463  */
464 struct drm_irq_busid {
465 	int irq;	/**< IRQ number */
466 	int busnum;	/**< bus number */
467 	int devnum;	/**< device number */
468 	int funcnum;	/**< function number */
469 };
470 
471 enum drm_vblank_seq_type {
472 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
473 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
474 	/* bits 1-6 are reserved for high crtcs */
475 	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
476 	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
477 	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
478 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
479 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
480 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
481 };
482 #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
483 
484 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
485 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
486 				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
487 
488 struct drm_wait_vblank_request {
489 	enum drm_vblank_seq_type type;
490 	unsigned int sequence;
491 	unsigned long signal;
492 };
493 
494 struct drm_wait_vblank_reply {
495 	enum drm_vblank_seq_type type;
496 	unsigned int sequence;
497 	long tval_sec;
498 	long tval_usec;
499 };
500 
501 /*
502  * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
503  *
504  * \sa drmWaitVBlank().
505  */
506 union drm_wait_vblank {
507 	struct drm_wait_vblank_request request;
508 	struct drm_wait_vblank_reply reply;
509 };
510 
511 #define _DRM_PRE_MODESET 1
512 #define _DRM_POST_MODESET 2
513 
514 /*
515  * DRM_IOCTL_MODESET_CTL ioctl argument type
516  *
517  * \sa drmModesetCtl().
518  */
519 struct drm_modeset_ctl {
520 	__u32 crtc;
521 	__u32 cmd;
522 };
523 
524 /*
525  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
526  *
527  * \sa drmAgpEnable().
528  */
529 struct drm_agp_mode {
530 	unsigned long mode;	/**< AGP mode */
531 };
532 
533 /*
534  * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
535  *
536  * \sa drmAgpAlloc() and drmAgpFree().
537  */
538 struct drm_agp_buffer {
539 	unsigned long size;	/**< In bytes -- will round to page boundary */
540 	unsigned long handle;	/**< Used for binding / unbinding */
541 	unsigned long type;	/**< Type of memory to allocate */
542 	unsigned long physical;	/**< Physical used by i810 */
543 };
544 
545 /*
546  * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
547  *
548  * \sa drmAgpBind() and drmAgpUnbind().
549  */
550 struct drm_agp_binding {
551 	unsigned long handle;	/**< From drm_agp_buffer */
552 	unsigned long offset;	/**< In bytes -- will round to page boundary */
553 };
554 
555 /*
556  * DRM_IOCTL_AGP_INFO ioctl argument type.
557  *
558  * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
559  * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
560  * drmAgpVendorId() and drmAgpDeviceId().
561  */
562 struct drm_agp_info {
563 	int agp_version_major;
564 	int agp_version_minor;
565 	unsigned long mode;
566 	unsigned long aperture_base;	/* physical address */
567 	unsigned long aperture_size;	/* bytes */
568 	unsigned long memory_allowed;	/* bytes */
569 	unsigned long memory_used;
570 
571 	/* PCI information */
572 	unsigned short id_vendor;
573 	unsigned short id_device;
574 };
575 
576 /*
577  * DRM_IOCTL_SG_ALLOC ioctl argument type.
578  */
579 struct drm_scatter_gather {
580 	unsigned long size;	/**< In bytes -- will round to page boundary */
581 	unsigned long handle;	/**< Used for mapping / unmapping */
582 };
583 
584 /*
585  * DRM_IOCTL_SET_VERSION ioctl argument type.
586  */
587 struct drm_set_version {
588 	int drm_di_major;
589 	int drm_di_minor;
590 	int drm_dd_major;
591 	int drm_dd_minor;
592 };
593 
594 /* DRM_IOCTL_GEM_CLOSE ioctl argument type */
595 struct drm_gem_close {
596 	/** Handle of the object to be closed. */
597 	__u32 handle;
598 	__u32 pad;
599 };
600 
601 /* DRM_IOCTL_GEM_FLINK ioctl argument type */
602 struct drm_gem_flink {
603 	/** Handle for the object being named */
604 	__u32 handle;
605 
606 	/** Returned global name */
607 	__u32 name;
608 };
609 
610 /* DRM_IOCTL_GEM_OPEN ioctl argument type */
611 struct drm_gem_open {
612 	/** Name of object being opened */
613 	__u32 name;
614 
615 	/** Returned handle for the object */
616 	__u32 handle;
617 
618 	/** Returned size of the object */
619 	__u64 size;
620 };
621 
622 /**
623  * DRM_CAP_DUMB_BUFFER
624  *
625  * If set to 1, the driver supports creating dumb buffers via the
626  * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
627  */
628 #define DRM_CAP_DUMB_BUFFER		0x1
629 /**
630  * DRM_CAP_VBLANK_HIGH_CRTC
631  *
632  * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
633  * in the high bits of &drm_wait_vblank_request.type.
634  *
635  * Starting kernel version 2.6.39, this capability is always set to 1.
636  */
637 #define DRM_CAP_VBLANK_HIGH_CRTC	0x2
638 /**
639  * DRM_CAP_DUMB_PREFERRED_DEPTH
640  *
641  * The preferred bit depth for dumb buffers.
642  *
643  * The bit depth is the number of bits used to indicate the color of a single
644  * pixel excluding any padding. This is different from the number of bits per
645  * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
646  * pixel.
647  *
648  * Note that this preference only applies to dumb buffers, it's irrelevant for
649  * other types of buffers.
650  */
651 #define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
652 /**
653  * DRM_CAP_DUMB_PREFER_SHADOW
654  *
655  * If set to 1, the driver prefers userspace to render to a shadow buffer
656  * instead of directly rendering to a dumb buffer. For best speed, userspace
657  * should do streaming ordered memory copies into the dumb buffer and never
658  * read from it.
659  *
660  * Note that this preference only applies to dumb buffers, it's irrelevant for
661  * other types of buffers.
662  */
663 #define DRM_CAP_DUMB_PREFER_SHADOW	0x4
664 /**
665  * DRM_CAP_PRIME
666  *
667  * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
668  * and &DRM_PRIME_CAP_EXPORT.
669  *
670  * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
671  * &DRM_PRIME_CAP_EXPORT are always advertised.
672  *
673  * PRIME buffers are exposed as dma-buf file descriptors.
674  * See :ref:`prime_buffer_sharing`.
675  */
676 #define DRM_CAP_PRIME			0x5
677 /**
678  * DRM_PRIME_CAP_IMPORT
679  *
680  * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
681  * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
682  *
683  * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
684  */
685 #define  DRM_PRIME_CAP_IMPORT		0x1
686 /**
687  * DRM_PRIME_CAP_EXPORT
688  *
689  * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
690  * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
691  *
692  * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
693  */
694 #define  DRM_PRIME_CAP_EXPORT		0x2
695 /**
696  * DRM_CAP_TIMESTAMP_MONOTONIC
697  *
698  * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
699  * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
700  * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
701  * clocks.
702  *
703  * Starting from kernel version 2.6.39, the default value for this capability
704  * is 1. Starting kernel version 4.15, this capability is always set to 1.
705  */
706 #define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
707 /**
708  * DRM_CAP_ASYNC_PAGE_FLIP
709  *
710  * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
711  * page-flips.
712  */
713 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
714 /**
715  * DRM_CAP_CURSOR_WIDTH
716  *
717  * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
718  * width x height combination for the hardware cursor. The intention is that a
719  * hardware agnostic userspace can query a cursor plane size to use.
720  *
721  * Note that the cross-driver contract is to merely return a valid size;
722  * drivers are free to attach another meaning on top, eg. i915 returns the
723  * maximum plane size.
724  */
725 #define DRM_CAP_CURSOR_WIDTH		0x8
726 /**
727  * DRM_CAP_CURSOR_HEIGHT
728  *
729  * See &DRM_CAP_CURSOR_WIDTH.
730  */
731 #define DRM_CAP_CURSOR_HEIGHT		0x9
732 /**
733  * DRM_CAP_ADDFB2_MODIFIERS
734  *
735  * If set to 1, the driver supports supplying modifiers in the
736  * &DRM_IOCTL_MODE_ADDFB2 ioctl.
737  */
738 #define DRM_CAP_ADDFB2_MODIFIERS	0x10
739 /**
740  * DRM_CAP_PAGE_FLIP_TARGET
741  *
742  * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
743  * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
744  * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
745  * ioctl.
746  */
747 #define DRM_CAP_PAGE_FLIP_TARGET	0x11
748 /**
749  * DRM_CAP_CRTC_IN_VBLANK_EVENT
750  *
751  * If set to 1, the kernel supports reporting the CRTC ID in
752  * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
753  * &DRM_EVENT_FLIP_COMPLETE events.
754  *
755  * Starting kernel version 4.12, this capability is always set to 1.
756  */
757 #define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
758 /**
759  * DRM_CAP_SYNCOBJ
760  *
761  * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
762  */
763 #define DRM_CAP_SYNCOBJ		0x13
764 /**
765  * DRM_CAP_SYNCOBJ_TIMELINE
766  *
767  * If set to 1, the driver supports timeline operations on sync objects. See
768  * :ref:`drm_sync_objects`.
769  */
770 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
771 /**
772  * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
773  *
774  * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
775  * commits.
776  */
777 #define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP	0x15
778 
779 /* DRM_IOCTL_GET_CAP ioctl argument type */
780 struct drm_get_cap {
781 	__u64 capability;
782 	__u64 value;
783 };
784 
785 /**
786  * DRM_CLIENT_CAP_STEREO_3D
787  *
788  * If set to 1, the DRM core will expose the stereo 3D capabilities of the
789  * monitor by advertising the supported 3D layouts in the flags of struct
790  * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
791  *
792  * This capability is always supported for all drivers starting from kernel
793  * version 3.13.
794  */
795 #define DRM_CLIENT_CAP_STEREO_3D	1
796 
797 /**
798  * DRM_CLIENT_CAP_UNIVERSAL_PLANES
799  *
800  * If set to 1, the DRM core will expose all planes (overlay, primary, and
801  * cursor) to userspace.
802  *
803  * This capability has been introduced in kernel version 3.15. Starting from
804  * kernel version 3.17, this capability is always supported for all drivers.
805  */
806 #define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
807 
808 /**
809  * DRM_CLIENT_CAP_ATOMIC
810  *
811  * If set to 1, the DRM core will expose atomic properties to userspace. This
812  * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
813  * &DRM_CLIENT_CAP_ASPECT_RATIO.
814  *
815  * If the driver doesn't support atomic mode-setting, enabling this capability
816  * will fail with -EOPNOTSUPP.
817  *
818  * This capability has been introduced in kernel version 4.0. Starting from
819  * kernel version 4.2, this capability is always supported for atomic-capable
820  * drivers.
821  */
822 #define DRM_CLIENT_CAP_ATOMIC	3
823 
824 /**
825  * DRM_CLIENT_CAP_ASPECT_RATIO
826  *
827  * If set to 1, the DRM core will provide aspect ratio information in modes.
828  * See ``DRM_MODE_FLAG_PIC_AR_*``.
829  *
830  * This capability is always supported for all drivers starting from kernel
831  * version 4.18.
832  */
833 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
834 
835 /**
836  * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
837  *
838  * If set to 1, the DRM core will expose special connectors to be used for
839  * writing back to memory the scene setup in the commit. The client must enable
840  * &DRM_CLIENT_CAP_ATOMIC first.
841  *
842  * This capability is always supported for atomic-capable drivers starting from
843  * kernel version 4.19.
844  */
845 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
846 
847 /**
848  * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
849  *
850  * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
851  * virtualbox) have additional restrictions for cursor planes (thus
852  * making cursor planes on those drivers not truly universal,) e.g.
853  * they need cursor planes to act like one would expect from a mouse
854  * cursor and have correctly set hotspot properties.
855  * If this client cap is not set the DRM core will hide cursor plane on
856  * those virtualized drivers because not setting it implies that the
857  * client is not capable of dealing with those extra restictions.
858  * Clients which do set cursor hotspot and treat the cursor plane
859  * like a mouse cursor should set this property.
860  * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
861  *
862  * Setting this property on drivers which do not special case
863  * cursor planes (i.e. non-virtualized drivers) will return
864  * EOPNOTSUPP, which can be used by userspace to gauge
865  * requirements of the hardware/drivers they're running on.
866  *
867  * This capability is always supported for atomic-capable virtualized
868  * drivers starting from kernel version 6.6.
869  */
870 #define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT	6
871 
872 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
873 struct drm_set_client_cap {
874 	__u64 capability;
875 	__u64 value;
876 };
877 
878 #define DRM_RDWR O_RDWR
879 #define DRM_CLOEXEC O_CLOEXEC
880 struct drm_prime_handle {
881 	__u32 handle;
882 
883 	/** Flags.. only applicable for handle->fd */
884 	__u32 flags;
885 
886 	/** Returned dmabuf file descriptor */
887 	__s32 fd;
888 };
889 
890 struct drm_syncobj_create {
891 	__u32 handle;
892 #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
893 	__u32 flags;
894 };
895 
896 struct drm_syncobj_destroy {
897 	__u32 handle;
898 	__u32 pad;
899 };
900 
901 #define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
902 #define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
903 struct drm_syncobj_handle {
904 	__u32 handle;
905 	__u32 flags;
906 
907 	__s32 fd;
908 	__u32 pad;
909 };
910 
911 struct drm_syncobj_transfer {
912 	__u32 src_handle;
913 	__u32 dst_handle;
914 	__u64 src_point;
915 	__u64 dst_point;
916 	__u32 flags;
917 	__u32 pad;
918 };
919 
920 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
921 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
922 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
923 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
924 struct drm_syncobj_wait {
925 	__u64 handles;
926 	/* absolute timeout */
927 	__s64 timeout_nsec;
928 	__u32 count_handles;
929 	__u32 flags;
930 	__u32 first_signaled; /* only valid when not waiting all */
931 	__u32 pad;
932 	/**
933 	 * @deadline_nsec - fence deadline hint
934 	 *
935 	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
936 	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
937 	 * set.
938 	 */
939 	__u64 deadline_nsec;
940 };
941 
942 struct drm_syncobj_timeline_wait {
943 	__u64 handles;
944 	/* wait on specific timeline point for every handles*/
945 	__u64 points;
946 	/* absolute timeout */
947 	__s64 timeout_nsec;
948 	__u32 count_handles;
949 	__u32 flags;
950 	__u32 first_signaled; /* only valid when not waiting all */
951 	__u32 pad;
952 	/**
953 	 * @deadline_nsec - fence deadline hint
954 	 *
955 	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
956 	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
957 	 * set.
958 	 */
959 	__u64 deadline_nsec;
960 };
961 
962 /**
963  * struct drm_syncobj_eventfd
964  * @handle: syncobj handle.
965  * @flags: Zero to wait for the point to be signalled, or
966  *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
967  *         available for the point.
968  * @point: syncobj timeline point (set to zero for binary syncobjs).
969  * @fd: Existing eventfd to sent events to.
970  * @pad: Must be zero.
971  *
972  * Register an eventfd to be signalled by a syncobj. The eventfd counter will
973  * be incremented by one.
974  */
975 struct drm_syncobj_eventfd {
976 	__u32 handle;
977 	__u32 flags;
978 	__u64 point;
979 	__s32 fd;
980 	__u32 pad;
981 };
982 
983 
984 struct drm_syncobj_array {
985 	__u64 handles;
986 	__u32 count_handles;
987 	__u32 pad;
988 };
989 
990 #define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
991 struct drm_syncobj_timeline_array {
992 	__u64 handles;
993 	__u64 points;
994 	__u32 count_handles;
995 	__u32 flags;
996 };
997 
998 
999 /* Query current scanout sequence number */
1000 struct drm_crtc_get_sequence {
1001 	__u32 crtc_id;		/* requested crtc_id */
1002 	__u32 active;		/* return: crtc output is active */
1003 	__u64 sequence;		/* return: most recent vblank sequence */
1004 	__s64 sequence_ns;	/* return: most recent time of first pixel out */
1005 };
1006 
1007 /* Queue event to be delivered at specified sequence. Time stamp marks
1008  * when the first pixel of the refresh cycle leaves the display engine
1009  * for the display
1010  */
1011 #define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */
1012 #define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */
1013 
1014 struct drm_crtc_queue_sequence {
1015 	__u32 crtc_id;
1016 	__u32 flags;
1017 	__u64 sequence;		/* on input, target sequence. on output, actual sequence */
1018 	__u64 user_data;	/* user data passed to event */
1019 };
1020 
1021 #if defined(__cplusplus)
1022 }
1023 #endif
1024 
1025 #include "drm_mode.h"
1026 
1027 #if defined(__cplusplus)
1028 extern "C" {
1029 #endif
1030 
1031 #define DRM_IOCTL_BASE			'd'
1032 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
1033 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
1034 #define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
1035 #define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
1036 
1037 #define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
1038 #define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
1039 #define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
1040 #define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
1041 #define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
1042 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
1043 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
1044 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
1045 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
1046 /**
1047  * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1048  *
1049  * GEM handles are not reference-counted by the kernel. User-space is
1050  * responsible for managing their lifetime. For example, if user-space imports
1051  * the same memory object twice on the same DRM file description, the same GEM
1052  * handle is returned by both imports, and user-space needs to ensure
1053  * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1054  * when a memory object is allocated, then exported and imported again on the
1055  * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1056  * and always returns fresh new GEM handles even if an existing GEM handle
1057  * already refers to the same memory object before the IOCTL is performed.
1058  */
1059 #define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
1060 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
1061 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
1062 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
1063 #define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
1064 
1065 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
1066 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
1067 #define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
1068 #define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
1069 #define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
1070 #define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1071 #define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1072 #define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1073 #define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1074 #define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1075 #define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1076 
1077 #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1078 
1079 #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1080 #define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1081 
1082 #define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
1083 #define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
1084 
1085 #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1086 #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1087 #define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1088 #define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1089 #define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1090 #define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1091 #define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1092 #define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1093 #define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1094 #define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1095 #define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1096 #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1097 #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1098 
1099 /**
1100  * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1101  *
1102  * User-space sets &drm_prime_handle.handle with the GEM handle to export and
1103  * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1104  * &drm_prime_handle.fd.
1105  *
1106  * The export can fail for any driver-specific reason, e.g. because export is
1107  * not supported for this specific GEM handle (but might be for others).
1108  *
1109  * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1110  */
1111 #define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
1112 /**
1113  * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1114  *
1115  * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1116  * import, and gets back a GEM handle in &drm_prime_handle.handle.
1117  * &drm_prime_handle.flags is unused.
1118  *
1119  * If an existing GEM handle refers to the memory object backing the DMA-BUF,
1120  * that GEM handle is returned. Therefore user-space which needs to handle
1121  * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1122  * reference-count duplicated GEM handles. For more information see
1123  * &DRM_IOCTL_GEM_CLOSE.
1124  *
1125  * The import can fail for any driver-specific reason, e.g. because import is
1126  * only supported for DMA-BUFs allocated on this DRM device.
1127  *
1128  * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1129  */
1130 #define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
1131 
1132 #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1133 #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1134 #define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1135 #define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1136 #define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1137 #define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1138 #define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1139 #define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1140 
1141 #define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1142 #define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1143 
1144 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1145 
1146 #define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1147 #define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1148 
1149 #define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
1150 
1151 #define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
1152 #define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
1153 #define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
1154 #define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
1155 #define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1156 #define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1157 #define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1158 #define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
1159 #define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1160 #define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1161 
1162 #define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
1163 #define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1164 #define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
1165 #define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1166 #define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1167 /**
1168  * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1169  *
1170  * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1171  * argument is a framebuffer object ID.
1172  *
1173  * Warning: removing a framebuffer currently in-use on an enabled plane will
1174  * disable that plane. The CRTC the plane is linked to may also be disabled
1175  * (depending on driver capabilities).
1176  */
1177 #define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
1178 #define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1179 #define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1180 
1181 /**
1182  * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
1183  *
1184  * KMS dumb buffers provide a very primitive way to allocate a buffer object
1185  * suitable for scanout and map it for software rendering. KMS dumb buffers are
1186  * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
1187  * buffers are not suitable to be displayed on any other device than the KMS
1188  * device where they were allocated from. Also see
1189  * :ref:`kms_dumb_buffer_objects`.
1190  *
1191  * The IOCTL argument is a struct drm_mode_create_dumb.
1192  *
1193  * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
1194  * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
1195  * &DRM_IOCTL_MODE_MAP_DUMB.
1196  *
1197  * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
1198  * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
1199  * driver preferences for dumb buffers.
1200  */
1201 #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1202 #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1203 #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1204 #define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1205 #define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
1206 #define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
1207 #define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1208 #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1209 #define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1210 #define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
1211 #define DRM_IOCTL_MODE_ATOMIC		DRM_IOWR(0xBC, struct drm_mode_atomic)
1212 #define DRM_IOCTL_MODE_CREATEPROPBLOB	DRM_IOWR(0xBD, struct drm_mode_create_blob)
1213 #define DRM_IOCTL_MODE_DESTROYPROPBLOB	DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1214 
1215 #define DRM_IOCTL_SYNCOBJ_CREATE	DRM_IOWR(0xBF, struct drm_syncobj_create)
1216 #define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1217 #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
1218 #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
1219 #define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
1220 #define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)
1221 #define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array)
1222 
1223 #define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease)
1224 #define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1225 #define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
1226 #define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1227 
1228 #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1229 #define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1230 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1231 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1232 
1233 /**
1234  * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1235  *
1236  * This queries metadata about a framebuffer. User-space fills
1237  * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1238  * struct as the output.
1239  *
1240  * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1241  * will be filled with GEM buffer handles. Fresh new GEM handles are always
1242  * returned, even if another GEM handle referring to the same memory object
1243  * already exists on the DRM file description. The caller is responsible for
1244  * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1245  * new handle will be returned for multiple planes in case they use the same
1246  * memory object. Planes are valid until one has a zero handle -- this can be
1247  * used to compute the number of planes.
1248  *
1249  * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1250  * until one has a zero &drm_mode_fb_cmd2.pitches.
1251  *
1252  * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1253  * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1254  * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1255  *
1256  * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1257  * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1258  * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1259  * double-close handles which are specified multiple times in the array.
1260  */
1261 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1262 
1263 #define DRM_IOCTL_SYNCOBJ_EVENTFD	DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1264 
1265 /**
1266  * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
1267  *
1268  * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1269  * argument is a framebuffer object ID.
1270  *
1271  * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
1272  * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
1273  * alive. When the plane no longer uses the framebuffer (because the
1274  * framebuffer is replaced with another one, or the plane is disabled), the
1275  * framebuffer is cleaned up.
1276  *
1277  * This is useful to implement flicker-free transitions between two processes.
1278  *
1279  * Depending on the threat model, user-space may want to ensure that the
1280  * framebuffer doesn't expose any sensitive user information: closed
1281  * framebuffers attached to a plane can be read back by the next DRM master.
1282  */
1283 #define DRM_IOCTL_MODE_CLOSEFB		DRM_IOWR(0xD0, struct drm_mode_closefb)
1284 
1285 /*
1286  * Device specific ioctls should only be in their respective headers
1287  * The device specific ioctl range is from 0x40 to 0x9f.
1288  * Generic IOCTLS restart at 0xA0.
1289  *
1290  * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1291  * drmCommandReadWrite().
1292  */
1293 #define DRM_COMMAND_BASE                0x40
1294 #define DRM_COMMAND_END			0xA0
1295 
1296 /**
1297  * struct drm_event - Header for DRM events
1298  * @type: event type.
1299  * @length: total number of payload bytes (including header).
1300  *
1301  * This struct is a header for events written back to user-space on the DRM FD.
1302  * A read on the DRM FD will always only return complete events: e.g. if the
1303  * read buffer is 100 bytes large and there are two 64 byte events pending,
1304  * only one will be returned.
1305  *
1306  * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1307  * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1308  * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1309  */
1310 struct drm_event {
1311 	__u32 type;
1312 	__u32 length;
1313 };
1314 
1315 /**
1316  * DRM_EVENT_VBLANK - vertical blanking event
1317  *
1318  * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1319  * &_DRM_VBLANK_EVENT flag set.
1320  *
1321  * The event payload is a struct drm_event_vblank.
1322  */
1323 #define DRM_EVENT_VBLANK 0x01
1324 /**
1325  * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1326  *
1327  * This event is sent in response to an atomic commit or legacy page-flip with
1328  * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1329  *
1330  * The event payload is a struct drm_event_vblank.
1331  */
1332 #define DRM_EVENT_FLIP_COMPLETE 0x02
1333 /**
1334  * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1335  *
1336  * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1337  *
1338  * The event payload is a struct drm_event_crtc_sequence.
1339  */
1340 #define DRM_EVENT_CRTC_SEQUENCE	0x03
1341 
1342 struct drm_event_vblank {
1343 	struct drm_event base;
1344 	__u64 user_data;
1345 	__u32 tv_sec;
1346 	__u32 tv_usec;
1347 	__u32 sequence;
1348 	__u32 crtc_id; /* 0 on older kernels that do not support this */
1349 };
1350 
1351 /* Event delivered at sequence. Time stamp marks when the first pixel
1352  * of the refresh cycle leaves the display engine for the display
1353  */
1354 struct drm_event_crtc_sequence {
1355 	struct drm_event	base;
1356 	__u64			user_data;
1357 	__s64			time_ns;
1358 	__u64			sequence;
1359 };
1360 
1361 /* typedef area */
1362 typedef struct drm_clip_rect drm_clip_rect_t;
1363 typedef struct drm_drawable_info drm_drawable_info_t;
1364 typedef struct drm_tex_region drm_tex_region_t;
1365 typedef struct drm_hw_lock drm_hw_lock_t;
1366 typedef struct drm_version drm_version_t;
1367 typedef struct drm_unique drm_unique_t;
1368 typedef struct drm_list drm_list_t;
1369 typedef struct drm_block drm_block_t;
1370 typedef struct drm_control drm_control_t;
1371 typedef enum drm_map_type drm_map_type_t;
1372 typedef enum drm_map_flags drm_map_flags_t;
1373 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1374 typedef struct drm_map drm_map_t;
1375 typedef struct drm_client drm_client_t;
1376 typedef enum drm_stat_type drm_stat_type_t;
1377 typedef struct drm_stats drm_stats_t;
1378 typedef enum drm_lock_flags drm_lock_flags_t;
1379 typedef struct drm_lock drm_lock_t;
1380 typedef enum drm_dma_flags drm_dma_flags_t;
1381 typedef struct drm_buf_desc drm_buf_desc_t;
1382 typedef struct drm_buf_info drm_buf_info_t;
1383 typedef struct drm_buf_free drm_buf_free_t;
1384 typedef struct drm_buf_pub drm_buf_pub_t;
1385 typedef struct drm_buf_map drm_buf_map_t;
1386 typedef struct drm_dma drm_dma_t;
1387 typedef union drm_wait_vblank drm_wait_vblank_t;
1388 typedef struct drm_agp_mode drm_agp_mode_t;
1389 typedef enum drm_ctx_flags drm_ctx_flags_t;
1390 typedef struct drm_ctx drm_ctx_t;
1391 typedef struct drm_ctx_res drm_ctx_res_t;
1392 typedef struct drm_draw drm_draw_t;
1393 typedef struct drm_update_draw drm_update_draw_t;
1394 typedef struct drm_auth drm_auth_t;
1395 typedef struct drm_irq_busid drm_irq_busid_t;
1396 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1397 
1398 typedef struct drm_agp_buffer drm_agp_buffer_t;
1399 typedef struct drm_agp_binding drm_agp_binding_t;
1400 typedef struct drm_agp_info drm_agp_info_t;
1401 typedef struct drm_scatter_gather drm_scatter_gather_t;
1402 typedef struct drm_set_version drm_set_version_t;
1403 
1404 #if defined(__cplusplus)
1405 }
1406 #endif
1407 
1408 #endif
1409