1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6 #ifndef _AMDXDNA_CTX_H_
7 #define _AMDXDNA_CTX_H_
8
9 #include <linux/bitfield.h>
10
11 #include "amdxdna_gem.h"
12
13 struct amdxdna_hwctx_priv;
14
15 enum ert_cmd_opcode {
16 ERT_START_CU = 0,
17 ERT_CMD_CHAIN = 19,
18 ERT_START_NPU = 20,
19 };
20
21 enum ert_cmd_state {
22 ERT_CMD_STATE_INVALID,
23 ERT_CMD_STATE_NEW,
24 ERT_CMD_STATE_QUEUED,
25 ERT_CMD_STATE_RUNNING,
26 ERT_CMD_STATE_COMPLETED,
27 ERT_CMD_STATE_ERROR,
28 ERT_CMD_STATE_ABORT,
29 ERT_CMD_STATE_SUBMITTED,
30 ERT_CMD_STATE_TIMEOUT,
31 ERT_CMD_STATE_NORESPONSE,
32 };
33
34 /*
35 * Interpretation of the beginning of data payload for ERT_START_NPU in
36 * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
37 */
38 struct amdxdna_cmd_start_npu {
39 u64 buffer; /* instruction buffer address */
40 u32 buffer_size; /* size of buffer in bytes */
41 u32 prop_count; /* properties count */
42 u32 prop_args[]; /* properties and regular kernel arguments */
43 };
44
45 /*
46 * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
47 * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
48 */
49 struct amdxdna_cmd_chain {
50 u32 command_count;
51 u32 submit_index;
52 u32 error_index;
53 u32 reserved[3];
54 u64 data[] __counted_by(command_count);
55 };
56
57 /* Exec buffer command header format */
58 #define AMDXDNA_CMD_STATE GENMASK(3, 0)
59 #define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
60 #define AMDXDNA_CMD_COUNT GENMASK(22, 12)
61 #define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
62 struct amdxdna_cmd {
63 u32 header;
64 u32 data[];
65 };
66
67 struct amdxdna_hwctx {
68 struct amdxdna_client *client;
69 struct amdxdna_hwctx_priv *priv;
70 char *name;
71
72 u32 id;
73 u32 max_opc;
74 u32 num_tiles;
75 u32 mem_size;
76 u32 fw_ctx_id;
77 u32 col_list_len;
78 u32 *col_list;
79 u32 start_col;
80 u32 num_col;
81 #define HWCTX_STAT_INIT 0
82 #define HWCTX_STAT_READY 1
83 #define HWCTX_STAT_STOP 2
84 u32 status;
85 u32 old_status;
86
87 struct amdxdna_qos_info qos;
88 struct amdxdna_hwctx_param_config_cu *cus;
89 u32 syncobj_hdl;
90 };
91
92 #define drm_job_to_xdna_job(j) \
93 container_of(j, struct amdxdna_sched_job, base)
94
95 struct amdxdna_sched_job {
96 struct drm_sched_job base;
97 struct kref refcnt;
98 struct amdxdna_hwctx *hwctx;
99 struct mm_struct *mm;
100 /* The fence to notice DRM scheduler that job is done by hardware */
101 struct dma_fence *fence;
102 /* user can wait on this fence */
103 struct dma_fence *out_fence;
104 bool job_done;
105 u64 seq;
106 struct amdxdna_gem_obj *cmd_bo;
107 size_t bo_cnt;
108 struct drm_gem_object *bos[] __counted_by(bo_cnt);
109 };
110
111 static inline u32
amdxdna_cmd_get_op(struct amdxdna_gem_obj * abo)112 amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
113 {
114 struct amdxdna_cmd *cmd = abo->mem.kva;
115
116 return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
117 }
118
119 static inline void
amdxdna_cmd_set_state(struct amdxdna_gem_obj * abo,enum ert_cmd_state s)120 amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
121 {
122 struct amdxdna_cmd *cmd = abo->mem.kva;
123
124 cmd->header &= ~AMDXDNA_CMD_STATE;
125 cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
126 }
127
128 static inline enum ert_cmd_state
amdxdna_cmd_get_state(struct amdxdna_gem_obj * abo)129 amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
130 {
131 struct amdxdna_cmd *cmd = abo->mem.kva;
132
133 return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
134 }
135
136 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
137 int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
138
amdxdna_hwctx_col_map(struct amdxdna_hwctx * hwctx)139 static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
140 {
141 return GENMASK(hwctx->start_col + hwctx->num_col - 1,
142 hwctx->start_col);
143 }
144
145 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
146 void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
147 void amdxdna_hwctx_suspend(struct amdxdna_client *client);
148 void amdxdna_hwctx_resume(struct amdxdna_client *client);
149
150 int amdxdna_cmd_submit(struct amdxdna_client *client,
151 u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
152 u32 hwctx_hdl, u64 *seq);
153
154 int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
155 u64 seq, u32 timeout);
156
157 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
158 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
159 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
160 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
161
162 #endif /* _AMDXDNA_CTX_H_ */
163