1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3 /* Authors: Cheng Xu <[email protected]> */
4 /* Kai Shen <[email protected]> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6
7 #include "erdma_verbs.h"
8
9 #define MAX_POLL_CHUNK_SIZE 16
10
notify_eq(struct erdma_eq * eq)11 void notify_eq(struct erdma_eq *eq)
12 {
13 u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
14 FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
15
16 *eq->dbrec = db_data;
17 writeq(db_data, eq->db);
18
19 atomic64_inc(&eq->notify_num);
20 }
21
get_next_valid_eqe(struct erdma_eq * eq)22 void *get_next_valid_eqe(struct erdma_eq *eq)
23 {
24 u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
25 u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
26
27 return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
28 }
29
erdma_aeq_event_handler(struct erdma_dev * dev)30 void erdma_aeq_event_handler(struct erdma_dev *dev)
31 {
32 struct erdma_aeqe *aeqe;
33 u32 cqn, qpn;
34 struct erdma_qp *qp;
35 struct erdma_cq *cq;
36 struct ib_event event;
37 u32 poll_cnt = 0;
38
39 memset(&event, 0, sizeof(event));
40
41 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
42 aeqe = get_next_valid_eqe(&dev->aeq);
43 if (!aeqe)
44 break;
45
46 dma_rmb();
47
48 dev->aeq.ci++;
49 atomic64_inc(&dev->aeq.event_num);
50 poll_cnt++;
51
52 if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
53 le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
54 cqn = le32_to_cpu(aeqe->event_data0);
55 cq = find_cq_by_cqn(dev, cqn);
56 if (!cq)
57 continue;
58
59 event.device = cq->ibcq.device;
60 event.element.cq = &cq->ibcq;
61 event.event = IB_EVENT_CQ_ERR;
62 if (cq->ibcq.event_handler)
63 cq->ibcq.event_handler(&event,
64 cq->ibcq.cq_context);
65 } else {
66 qpn = le32_to_cpu(aeqe->event_data0);
67 qp = find_qp_by_qpn(dev, qpn);
68 if (!qp)
69 continue;
70
71 event.device = qp->ibqp.device;
72 event.element.qp = &qp->ibqp;
73 event.event = IB_EVENT_QP_FATAL;
74 if (qp->ibqp.event_handler)
75 qp->ibqp.event_handler(&event,
76 qp->ibqp.qp_context);
77 }
78 }
79
80 notify_eq(&dev->aeq);
81 }
82
erdma_eq_common_init(struct erdma_dev * dev,struct erdma_eq * eq,u32 depth)83 int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
84 {
85 u32 buf_size = depth << EQE_SHIFT;
86
87 eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
88 &eq->qbuf_dma_addr, GFP_KERNEL);
89 if (!eq->qbuf)
90 return -ENOMEM;
91
92 eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
93 if (!eq->dbrec)
94 goto err_free_qbuf;
95
96 spin_lock_init(&eq->lock);
97 atomic64_set(&eq->event_num, 0);
98 atomic64_set(&eq->notify_num, 0);
99 eq->ci = 0;
100 eq->depth = depth;
101
102 return 0;
103
104 err_free_qbuf:
105 dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
106 eq->qbuf_dma_addr);
107
108 return -ENOMEM;
109 }
110
erdma_eq_destroy(struct erdma_dev * dev,struct erdma_eq * eq)111 void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
112 {
113 dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
114 dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
115 eq->qbuf_dma_addr);
116 }
117
erdma_aeq_init(struct erdma_dev * dev)118 int erdma_aeq_init(struct erdma_dev *dev)
119 {
120 struct erdma_eq *eq = &dev->aeq;
121 int ret;
122
123 ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
124 if (ret)
125 return ret;
126
127 eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
128
129 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
130 upper_32_bits(eq->qbuf_dma_addr));
131 erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
132 lower_32_bits(eq->qbuf_dma_addr));
133 erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
134 erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
135
136 return 0;
137 }
138
erdma_ceq_completion_handler(struct erdma_eq_cb * ceq_cb)139 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
140 {
141 struct erdma_dev *dev = ceq_cb->dev;
142 struct erdma_cq *cq;
143 u32 poll_cnt = 0;
144 u64 *ceqe;
145 int cqn;
146
147 if (!ceq_cb->ready)
148 return;
149
150 while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
151 ceqe = get_next_valid_eqe(&ceq_cb->eq);
152 if (!ceqe)
153 break;
154
155 dma_rmb();
156 ceq_cb->eq.ci++;
157 poll_cnt++;
158 cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
159
160 cq = find_cq_by_cqn(dev, cqn);
161 if (!cq)
162 continue;
163
164 if (rdma_is_kernel_res(&cq->ibcq.res))
165 cq->kern_cq.cmdsn++;
166
167 if (cq->ibcq.comp_handler)
168 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
169 }
170
171 notify_eq(&ceq_cb->eq);
172 }
173
erdma_intr_ceq_handler(int irq,void * data)174 static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
175 {
176 struct erdma_eq_cb *ceq_cb = data;
177
178 tasklet_schedule(&ceq_cb->tasklet);
179
180 return IRQ_HANDLED;
181 }
182
erdma_intr_ceq_task(unsigned long data)183 static void erdma_intr_ceq_task(unsigned long data)
184 {
185 erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
186 }
187
erdma_set_ceq_irq(struct erdma_dev * dev,u16 ceqn)188 static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
189 {
190 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
191 int err;
192
193 snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
194 pci_name(dev->pdev));
195 eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
196
197 tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
198 (unsigned long)&dev->ceqs[ceqn]);
199
200 cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
201 &eqc->irq.affinity_hint_mask);
202
203 err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
204 eqc->irq.name, eqc);
205 if (err) {
206 dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
207 return err;
208 }
209
210 irq_set_affinity_hint(eqc->irq.msix_vector,
211 &eqc->irq.affinity_hint_mask);
212
213 return 0;
214 }
215
erdma_free_ceq_irq(struct erdma_dev * dev,u16 ceqn)216 static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
217 {
218 struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
219
220 irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
221 free_irq(eqc->irq.msix_vector, eqc);
222 }
223
create_eq_cmd(struct erdma_dev * dev,u32 eqn,struct erdma_eq * eq)224 static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
225 {
226 struct erdma_cmdq_create_eq_req req;
227
228 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
229 CMDQ_OPCODE_CREATE_EQ);
230 req.eqn = eqn;
231 req.depth = ilog2(eq->depth);
232 req.qbuf_addr = eq->qbuf_dma_addr;
233 req.qtype = ERDMA_EQ_TYPE_CEQ;
234 /* Vector index is the same as EQN. */
235 req.vector_idx = eqn;
236 req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
237 req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
238
239 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
240 false);
241 }
242
erdma_ceq_init_one(struct erdma_dev * dev,u16 ceqn)243 static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
244 {
245 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
246 int ret;
247
248 ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
249 if (ret)
250 return ret;
251
252 eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
253 (ceqn + 1) * ERDMA_DB_SIZE;
254 dev->ceqs[ceqn].dev = dev;
255 dev->ceqs[ceqn].ready = true;
256
257 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
258 ret = create_eq_cmd(dev, ceqn + 1, eq);
259 if (ret) {
260 erdma_eq_destroy(dev, eq);
261 dev->ceqs[ceqn].ready = false;
262 }
263
264 return ret;
265 }
266
erdma_ceq_uninit_one(struct erdma_dev * dev,u16 ceqn)267 static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
268 {
269 struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
270 struct erdma_cmdq_destroy_eq_req req;
271 int err;
272
273 dev->ceqs[ceqn].ready = 0;
274
275 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
276 CMDQ_OPCODE_DESTROY_EQ);
277 /* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
278 req.eqn = ceqn + 1;
279 req.qtype = ERDMA_EQ_TYPE_CEQ;
280 req.vector_idx = ceqn + 1;
281
282 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL,
283 false);
284 if (err)
285 return;
286
287 erdma_eq_destroy(dev, eq);
288 }
289
erdma_ceqs_init(struct erdma_dev * dev)290 int erdma_ceqs_init(struct erdma_dev *dev)
291 {
292 u32 i, j;
293 int err;
294
295 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
296 err = erdma_ceq_init_one(dev, i);
297 if (err)
298 goto out_err;
299
300 err = erdma_set_ceq_irq(dev, i);
301 if (err) {
302 erdma_ceq_uninit_one(dev, i);
303 goto out_err;
304 }
305 }
306
307 return 0;
308
309 out_err:
310 for (j = 0; j < i; j++) {
311 erdma_free_ceq_irq(dev, j);
312 erdma_ceq_uninit_one(dev, j);
313 }
314
315 return err;
316 }
317
erdma_ceqs_uninit(struct erdma_dev * dev)318 void erdma_ceqs_uninit(struct erdma_dev *dev)
319 {
320 u32 i;
321
322 for (i = 0; i < dev->attrs.irq_num - 1; i++) {
323 erdma_free_ceq_irq(dev, i);
324 erdma_ceq_uninit_one(dev, i);
325 }
326 }
327