1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_REGISTERS_H_
4 #define _IDXD_REGISTERS_H_
5 
6 #include <uapi/linux/idxd.h>
7 
8 /* PCI Config */
9 #define PCI_DEVICE_ID_INTEL_DSA_GNRD	0x11fb
10 #define PCI_DEVICE_ID_INTEL_DSA_DMR	0x1212
11 #define PCI_DEVICE_ID_INTEL_IAA_DMR	0x1216
12 #define PCI_DEVICE_ID_INTEL_IAA_PTL	0xb02d
13 
14 #define DEVICE_VERSION_1		0x100
15 #define DEVICE_VERSION_2		0x200
16 
17 #define IDXD_MMIO_BAR		0
18 #define IDXD_WQ_BAR		2
19 #define IDXD_PORTAL_SIZE	PAGE_SIZE
20 
21 /* MMIO Device BAR0 Registers */
22 #define IDXD_VER_OFFSET			0x00
23 #define IDXD_VER_MAJOR_MASK		0xf0
24 #define IDXD_VER_MINOR_MASK		0x0f
25 #define GET_IDXD_VER_MAJOR(x)		(((x) & IDXD_VER_MAJOR_MASK) >> 4)
26 #define GET_IDXD_VER_MINOR(x)		((x) & IDXD_VER_MINOR_MASK)
27 
28 union gen_cap_reg {
29 	struct {
30 		u64 block_on_fault:1;
31 		u64 overlap_copy:1;
32 		u64 cache_control_mem:1;
33 		u64 cache_control_cache:1;
34 		u64 cmd_cap:1;
35 		u64 rsvd:3;
36 		u64 dest_readback:1;
37 		u64 drain_readback:1;
38 		u64 rsvd2:3;
39 		u64 evl_support:2;
40 		u64 batch_continuation:1;
41 		u64 max_xfer_shift:5;
42 		u64 max_batch_shift:4;
43 		u64 max_ims_mult:6;
44 		u64 config_en:1;
45 		u64 rsvd3:32;
46 	};
47 	u64 bits;
48 } __packed;
49 #define IDXD_GENCAP_OFFSET		0x10
50 
51 union wq_cap_reg {
52 	struct {
53 		u64 total_wq_size:16;
54 		u64 num_wqs:8;
55 		u64 wqcfg_size:4;
56 		u64 rsvd:20;
57 		u64 shared_mode:1;
58 		u64 dedicated_mode:1;
59 		u64 wq_ats_support:1;
60 		u64 priority:1;
61 		u64 occupancy:1;
62 		u64 occupancy_int:1;
63 		u64 op_config:1;
64 		u64 wq_prs_support:1;
65 		u64 rsvd4:8;
66 	};
67 	u64 bits;
68 } __packed;
69 #define IDXD_WQCAP_OFFSET		0x20
70 #define IDXD_WQCFG_MIN			5
71 
72 union group_cap_reg {
73 	struct {
74 		u64 num_groups:8;
75 		u64 total_rdbufs:8;	/* formerly total_tokens */
76 		u64 rdbuf_ctrl:1;	/* formerly token_en */
77 		u64 rdbuf_limit:1;	/* formerly token_limit */
78 		u64 progress_limit:1;	/* descriptor and batch descriptor */
79 		u64 rsvd:45;
80 	};
81 	u64 bits;
82 } __packed;
83 #define IDXD_GRPCAP_OFFSET		0x30
84 
85 union engine_cap_reg {
86 	struct {
87 		u64 num_engines:8;
88 		u64 rsvd:56;
89 	};
90 	u64 bits;
91 } __packed;
92 
93 #define IDXD_ENGCAP_OFFSET		0x38
94 
95 #define IDXD_OPCAP_NOOP			0x0001
96 #define IDXD_OPCAP_BATCH			0x0002
97 #define IDXD_OPCAP_MEMMOVE		0x0008
98 struct opcap {
99 	u64 bits[4];
100 };
101 
102 #define IDXD_MAX_OPCAP_BITS		256U
103 
104 #define IDXD_OPCAP_OFFSET		0x40
105 
106 #define IDXD_TABLE_OFFSET		0x60
107 union offsets_reg {
108 	struct {
109 		u64 grpcfg:16;
110 		u64 wqcfg:16;
111 		u64 msix_perm:16;
112 		u64 ims:16;
113 		u64 perfmon:16;
114 		u64 rsvd:48;
115 	};
116 	u64 bits[2];
117 } __packed;
118 
119 #define IDXD_TABLE_MULT			0x100
120 
121 #define IDXD_GENCFG_OFFSET		0x80
122 union gencfg_reg {
123 	struct {
124 		u32 rdbuf_limit:8;
125 		u32 rsvd:4;
126 		u32 user_int_en:1;
127 		u32 evl_en:1;
128 		u32 rsvd2:18;
129 	};
130 	u32 bits;
131 } __packed;
132 
133 #define IDXD_GENCTRL_OFFSET		0x88
134 union genctrl_reg {
135 	struct {
136 		u32 softerr_int_en:1;
137 		u32 halt_int_en:1;
138 		u32 evl_int_en:1;
139 		u32 rsvd:29;
140 	};
141 	u32 bits;
142 } __packed;
143 
144 #define IDXD_GENSTATS_OFFSET		0x90
145 union gensts_reg {
146 	struct {
147 		u32 state:2;
148 		u32 reset_type:2;
149 		u32 rsvd:28;
150 	};
151 	u32 bits;
152 } __packed;
153 
154 enum idxd_device_status_state {
155 	IDXD_DEVICE_STATE_DISABLED = 0,
156 	IDXD_DEVICE_STATE_ENABLED,
157 	IDXD_DEVICE_STATE_DRAIN,
158 	IDXD_DEVICE_STATE_HALT,
159 };
160 
161 enum idxd_device_reset_type {
162 	IDXD_DEVICE_RESET_SOFTWARE = 0,
163 	IDXD_DEVICE_RESET_FLR,
164 	IDXD_DEVICE_RESET_WARM,
165 	IDXD_DEVICE_RESET_COLD,
166 };
167 
168 #define IDXD_INTCAUSE_OFFSET		0x98
169 #define IDXD_INTC_ERR			0x01
170 #define IDXD_INTC_CMD			0x02
171 #define IDXD_INTC_OCCUPY			0x04
172 #define IDXD_INTC_PERFMON_OVFL		0x08
173 #define IDXD_INTC_HALT_STATE		0x10
174 #define IDXD_INTC_EVL			0x20
175 #define IDXD_INTC_INT_HANDLE_REVOKED	0x80000000
176 
177 #define IDXD_CMD_OFFSET			0xa0
178 union idxd_command_reg {
179 	struct {
180 		u32 operand:20;
181 		u32 cmd:5;
182 		u32 rsvd:6;
183 		u32 int_req:1;
184 	};
185 	u32 bits;
186 } __packed;
187 
188 enum idxd_cmd {
189 	IDXD_CMD_ENABLE_DEVICE = 1,
190 	IDXD_CMD_DISABLE_DEVICE,
191 	IDXD_CMD_DRAIN_ALL,
192 	IDXD_CMD_ABORT_ALL,
193 	IDXD_CMD_RESET_DEVICE,
194 	IDXD_CMD_ENABLE_WQ,
195 	IDXD_CMD_DISABLE_WQ,
196 	IDXD_CMD_DRAIN_WQ,
197 	IDXD_CMD_ABORT_WQ,
198 	IDXD_CMD_RESET_WQ,
199 	IDXD_CMD_DRAIN_PASID,
200 	IDXD_CMD_ABORT_PASID,
201 	IDXD_CMD_REQUEST_INT_HANDLE,
202 	IDXD_CMD_RELEASE_INT_HANDLE,
203 };
204 
205 #define CMD_INT_HANDLE_IMS		0x10000
206 
207 #define IDXD_CMDSTS_OFFSET		0xa8
208 union cmdsts_reg {
209 	struct {
210 		u8 err;
211 		u16 result;
212 		u8 rsvd:7;
213 		u8 active:1;
214 	};
215 	u32 bits;
216 } __packed;
217 #define IDXD_CMDSTS_ACTIVE		0x80000000
218 #define IDXD_CMDSTS_ERR_MASK		0xff
219 #define IDXD_CMDSTS_RES_SHIFT		8
220 
221 enum idxd_cmdsts_err {
222 	IDXD_CMDSTS_SUCCESS = 0,
223 	IDXD_CMDSTS_INVAL_CMD,
224 	IDXD_CMDSTS_INVAL_WQIDX,
225 	IDXD_CMDSTS_HW_ERR,
226 	/* enable device errors */
227 	IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
228 	IDXD_CMDSTS_ERR_CONFIG,
229 	IDXD_CMDSTS_ERR_BUSMASTER_EN,
230 	IDXD_CMDSTS_ERR_PASID_INVAL,
231 	IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
232 	IDXD_CMDSTS_ERR_GRP_CONFIG,
233 	IDXD_CMDSTS_ERR_GRP_CONFIG2,
234 	IDXD_CMDSTS_ERR_GRP_CONFIG3,
235 	IDXD_CMDSTS_ERR_GRP_CONFIG4,
236 	/* enable wq errors */
237 	IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
238 	IDXD_CMDSTS_ERR_WQ_ENABLED,
239 	IDXD_CMDSTS_ERR_WQ_SIZE,
240 	IDXD_CMDSTS_ERR_WQ_PRIOR,
241 	IDXD_CMDSTS_ERR_WQ_MODE,
242 	IDXD_CMDSTS_ERR_BOF_EN,
243 	IDXD_CMDSTS_ERR_PASID_EN,
244 	IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
245 	IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
246 	/* disable device errors */
247 	IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
248 	/* disable WQ, drain WQ, abort WQ, reset WQ */
249 	IDXD_CMDSTS_ERR_DEV_NOT_EN,
250 	/* request interrupt handle */
251 	IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
252 	IDXD_CMDSTS_ERR_NO_HANDLE,
253 };
254 
255 #define IDXD_CMDCAP_OFFSET		0xb0
256 
257 #define IDXD_SWERR_OFFSET		0xc0
258 #define IDXD_SWERR_VALID		0x00000001
259 #define IDXD_SWERR_OVERFLOW		0x00000002
260 #define IDXD_SWERR_ACK			(IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
261 union sw_err_reg {
262 	struct {
263 		u64 valid:1;
264 		u64 overflow:1;
265 		u64 desc_valid:1;
266 		u64 wq_idx_valid:1;
267 		u64 batch:1;
268 		u64 fault_rw:1;
269 		u64 priv:1;
270 		u64 rsvd:1;
271 		u64 error:8;
272 		u64 wq_idx:8;
273 		u64 rsvd2:8;
274 		u64 operation:8;
275 		u64 pasid:20;
276 		u64 rsvd3:4;
277 
278 		u64 batch_idx:16;
279 		u64 rsvd4:16;
280 		u64 invalid_flags:32;
281 
282 		u64 fault_addr;
283 
284 		u64 rsvd5;
285 	};
286 	u64 bits[4];
287 } __packed;
288 
289 union iaa_cap_reg {
290 	struct {
291 		u64 dec_aecs_format_ver:1;
292 		u64 drop_init_bits:1;
293 		u64 chaining:1;
294 		u64 force_array_output_mod:1;
295 		u64 load_part_aecs:1;
296 		u64 comp_early_abort:1;
297 		u64 nested_comp:1;
298 		u64 diction_comp:1;
299 		u64 header_gen:1;
300 		u64 crypto_gcm:1;
301 		u64 crypto_cfb:1;
302 		u64 crypto_xts:1;
303 		u64 rsvd:52;
304 	};
305 	u64 bits;
306 } __packed;
307 
308 #define IDXD_IAACAP_OFFSET	0x180
309 
310 #define IDXD_EVLCFG_OFFSET	0xe0
311 union evlcfg_reg {
312 	struct {
313 		u64 pasid_en:1;
314 		u64 priv:1;
315 		u64 rsvd:10;
316 		u64 base_addr:52;
317 
318 		u64 size:16;
319 		u64 pasid:20;
320 		u64 rsvd2:28;
321 	};
322 	u64 bits[2];
323 } __packed;
324 
325 #define IDXD_EVL_SIZE_MIN	0x0040
326 #define IDXD_EVL_SIZE_MAX	0xffff
327 
328 union msix_perm {
329 	struct {
330 		u32 rsvd:2;
331 		u32 ignore:1;
332 		u32 pasid_en:1;
333 		u32 rsvd2:8;
334 		u32 pasid:20;
335 	};
336 	u32 bits;
337 } __packed;
338 
339 union group_flags {
340 	struct {
341 		u64 tc_a:3;
342 		u64 tc_b:3;
343 		u64 rsvd:1;
344 		u64 use_rdbuf_limit:1;
345 		u64 rdbufs_reserved:8;
346 		u64 rsvd2:4;
347 		u64 rdbufs_allowed:8;
348 		u64 rsvd3:4;
349 		u64 desc_progress_limit:2;
350 		u64 rsvd4:2;
351 		u64 batch_progress_limit:2;
352 		u64 rsvd5:26;
353 	};
354 	u64 bits;
355 } __packed;
356 
357 struct grpcfg {
358 	u64 wqs[4];
359 	u64 engines;
360 	union group_flags flags;
361 } __packed;
362 
363 union wqcfg {
364 	struct {
365 		/* bytes 0-3 */
366 		u16 wq_size;
367 		u16 rsvd;
368 
369 		/* bytes 4-7 */
370 		u16 wq_thresh;
371 		u16 rsvd1;
372 
373 		/* bytes 8-11 */
374 		u32 mode:1;	/* shared or dedicated */
375 		u32 bof:1;	/* block on fault */
376 		u32 wq_ats_disable:1;
377 		u32 wq_prs_disable:1;
378 		u32 priority:4;
379 		u32 pasid:20;
380 		u32 pasid_en:1;
381 		u32 priv:1;
382 		u32 rsvd3:2;
383 
384 		/* bytes 12-15 */
385 		u32 max_xfer_shift:5;
386 		u32 max_batch_shift:4;
387 		u32 rsvd4:23;
388 
389 		/* bytes 16-19 */
390 		u16 occupancy_inth;
391 		u16 occupancy_table_sel:1;
392 		u16 rsvd5:15;
393 
394 		/* bytes 20-23 */
395 		u16 occupancy_limit;
396 		u16 occupancy_int_en:1;
397 		u16 rsvd6:15;
398 
399 		/* bytes 24-27 */
400 		u16 occupancy;
401 		u16 occupancy_int:1;
402 		u16 rsvd7:12;
403 		u16 mode_support:1;
404 		u16 wq_state:2;
405 
406 		/* bytes 28-31 */
407 		u32 rsvd8;
408 
409 		/* bytes 32-63 */
410 		u64 op_config[4];
411 	};
412 	u32 bits[16];
413 } __packed;
414 
415 #define WQCFG_PASID_IDX                2
416 #define WQCFG_PRIVL_IDX		2
417 #define WQCFG_OCCUP_IDX		6
418 
419 #define WQCFG_OCCUP_MASK	0xffff
420 
421 /*
422  * This macro calculates the offset into the WQCFG register
423  * idxd - struct idxd *
424  * n - wq id
425  * ofs - the index of the 32b dword for the config register
426  *
427  * The WQCFG register block is divided into groups per each wq. The n index
428  * allows us to move to the register group that's for that particular wq.
429  * Each register is 32bits. The ofs gives us the number of register to access.
430  */
431 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \
432 ({\
433 	typeof(_idxd_dev) __idxd_dev = (_idxd_dev);	\
434 	(__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs);	\
435 })
436 
437 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
438 
439 #define GRPCFG_SIZE		64
440 #define GRPWQCFG_STRIDES	4
441 
442 /*
443  * This macro calculates the offset into the GRPCFG register
444  * idxd - struct idxd *
445  * n - group id
446  * ofs - the index of the 64b qword for the config register
447  *
448  * The GRPCFG register block is divided into three sub-registers, which
449  * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
450  * to the register block that contains the three sub-registers.
451  * Each register block is 64bits. And the ofs gives us the offset
452  * within the GRPWQCFG register to access.
453  */
454 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
455 					   (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
456 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
457 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
458 
459 /* Following is performance monitor registers */
460 #define IDXD_PERFCAP_OFFSET		0x0
461 union idxd_perfcap {
462 	struct {
463 		u64 num_perf_counter:6;
464 		u64 rsvd1:2;
465 		u64 counter_width:8;
466 		u64 num_event_category:4;
467 		u64 global_event_category:16;
468 		u64 filter:8;
469 		u64 rsvd2:8;
470 		u64 cap_per_counter:1;
471 		u64 writeable_counter:1;
472 		u64 counter_freeze:1;
473 		u64 overflow_interrupt:1;
474 		u64 rsvd3:8;
475 	};
476 	u64 bits;
477 } __packed;
478 
479 #define IDXD_EVNTCAP_OFFSET		0x80
480 union idxd_evntcap {
481 	struct {
482 		u64 events:28;
483 		u64 rsvd:36;
484 	};
485 	u64 bits;
486 } __packed;
487 
488 struct idxd_event {
489 	union {
490 		struct {
491 			u32 event_category:4;
492 			u32 events:28;
493 		};
494 		u32 val;
495 	};
496 } __packed;
497 
498 #define IDXD_CNTRCAP_OFFSET		0x800
499 struct idxd_cntrcap {
500 	union {
501 		struct {
502 			u32 counter_width:8;
503 			u32 rsvd:20;
504 			u32 num_events:4;
505 		};
506 		u32 val;
507 	};
508 	struct idxd_event events[];
509 } __packed;
510 
511 #define IDXD_PERFRST_OFFSET		0x10
512 union idxd_perfrst {
513 	struct {
514 		u32 perfrst_config:1;
515 		u32 perfrst_counter:1;
516 		u32 rsvd:30;
517 	};
518 	u32 val;
519 } __packed;
520 
521 #define IDXD_OVFSTATUS_OFFSET		0x30
522 #define IDXD_PERFFRZ_OFFSET		0x20
523 #define IDXD_CNTRCFG_OFFSET		0x100
524 union idxd_cntrcfg {
525 	struct {
526 		u64 enable:1;
527 		u64 interrupt_ovf:1;
528 		u64 global_freeze_ovf:1;
529 		u64 rsvd1:5;
530 		u64 event_category:4;
531 		u64 rsvd2:20;
532 		u64 events:28;
533 		u64 rsvd3:4;
534 	};
535 	u64 val;
536 } __packed;
537 
538 #define IDXD_FLTCFG_OFFSET		0x300
539 
540 #define IDXD_CNTRDATA_OFFSET		0x200
541 union idxd_cntrdata {
542 	struct {
543 		u64 event_count_value;
544 	};
545 	u64 val;
546 } __packed;
547 
548 union event_cfg {
549 	struct {
550 		u64 event_cat:4;
551 		u64 event_enc:28;
552 	};
553 	u64 val;
554 } __packed;
555 
556 union filter_cfg {
557 	struct {
558 		u64 wq:32;
559 		u64 tc:8;
560 		u64 pg_sz:4;
561 		u64 xfer_sz:8;
562 		u64 eng:8;
563 	};
564 	u64 val;
565 } __packed;
566 
567 #define IDXD_EVLSTATUS_OFFSET		0xf0
568 
569 union evl_status_reg {
570 	struct {
571 		u32 head:16;
572 		u32 rsvd:16;
573 		u32 tail:16;
574 		u32 rsvd2:14;
575 		u32 int_pending:1;
576 		u32 rsvd3:1;
577 	};
578 	struct {
579 		u32 bits_lower32;
580 		u32 bits_upper32;
581 	};
582 	u64 bits;
583 } __packed;
584 
585 #define IDXD_MAX_BATCH_IDENT	256
586 
587 struct __evl_entry {
588 	u64 rsvd:2;
589 	u64 desc_valid:1;
590 	u64 wq_idx_valid:1;
591 	u64 batch:1;
592 	u64 fault_rw:1;
593 	u64 priv:1;
594 	u64 err_info_valid:1;
595 	u64 error:8;
596 	u64 wq_idx:8;
597 	u64 batch_id:8;
598 	u64 operation:8;
599 	u64 pasid:20;
600 	u64 rsvd2:4;
601 
602 	u16 batch_idx;
603 	u16 rsvd3;
604 	union {
605 		/* Invalid Flags 0x11 */
606 		u32 invalid_flags;
607 		/* Invalid Int Handle 0x19 */
608 		/* Page fault 0x1a */
609 		/* Page fault 0x06, 0x1f, only operand_id */
610 		/* Page fault before drain or in batch, 0x26, 0x27 */
611 		struct {
612 			u16 int_handle;
613 			u16 rci:1;
614 			u16 ims:1;
615 			u16 rcr:1;
616 			u16 first_err_in_batch:1;
617 			u16 rsvd4_2:9;
618 			u16 operand_id:3;
619 		};
620 	};
621 	u64 fault_addr;
622 	u64 rsvd5;
623 } __packed;
624 
625 struct dsa_evl_entry {
626 	struct __evl_entry e;
627 	struct dsa_completion_record cr;
628 } __packed;
629 
630 struct iax_evl_entry {
631 	struct __evl_entry e;
632 	u64 rsvd[4];
633 	struct iax_completion_record cr;
634 } __packed;
635 
636 #endif
637