1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <[email protected]>
10 * Ursula Braun <[email protected]>
11 */
12
13 #include <net/tcp.h>
14 #include <rdma/ib_verbs.h>
15
16 #include "smc.h"
17 #include "smc_core.h"
18 #include "smc_clc.h"
19 #include "smc_llc.h"
20 #include "smc_pnet.h"
21
22 #define SMC_LLC_DATA_LEN 40
23
24 struct smc_llc_hdr {
25 struct smc_wr_rx_hdr common;
26 union {
27 struct {
28 u8 length; /* 44 */
29 #if defined(__BIG_ENDIAN_BITFIELD)
30 u8 reserved:4,
31 add_link_rej_rsn:4;
32 #elif defined(__LITTLE_ENDIAN_BITFIELD)
33 u8 add_link_rej_rsn:4,
34 reserved:4;
35 #endif
36 };
37 u16 length_v2; /* 44 - 8192*/
38 };
39 u8 flags;
40 } __packed; /* format defined in
41 * IBM Shared Memory Communications Version 2
42 * (https://www.ibm.com/support/pages/node/6326337)
43 */
44
45 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
46
47 struct smc_llc_msg_confirm_link { /* type 0x01 */
48 struct smc_llc_hdr hd;
49 u8 sender_mac[ETH_ALEN];
50 u8 sender_gid[SMC_GID_SIZE];
51 u8 sender_qp_num[3];
52 u8 link_num;
53 u8 link_uid[SMC_LGR_ID_SIZE];
54 u8 max_links;
55 u8 max_conns;
56 u8 reserved[8];
57 };
58
59 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
60 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
61
62 struct smc_llc_msg_add_link { /* type 0x02 */
63 struct smc_llc_hdr hd;
64 u8 sender_mac[ETH_ALEN];
65 u8 reserved2[2];
66 u8 sender_gid[SMC_GID_SIZE];
67 u8 sender_qp_num[3];
68 u8 link_num;
69 #if defined(__BIG_ENDIAN_BITFIELD)
70 u8 reserved3 : 4,
71 qp_mtu : 4;
72 #elif defined(__LITTLE_ENDIAN_BITFIELD)
73 u8 qp_mtu : 4,
74 reserved3 : 4;
75 #endif
76 u8 initial_psn[3];
77 u8 reserved[8];
78 };
79
80 struct smc_llc_msg_add_link_cont_rt {
81 __be32 rmb_key;
82 __be32 rmb_key_new;
83 __be64 rmb_vaddr_new;
84 };
85
86 struct smc_llc_msg_add_link_v2_ext {
87 #if defined(__BIG_ENDIAN_BITFIELD)
88 u8 v2_direct : 1,
89 reserved : 7;
90 #elif defined(__LITTLE_ENDIAN_BITFIELD)
91 u8 reserved : 7,
92 v2_direct : 1;
93 #endif
94 u8 reserved2;
95 u8 client_target_gid[SMC_GID_SIZE];
96 u8 reserved3[8];
97 u16 num_rkeys;
98 struct smc_llc_msg_add_link_cont_rt rt[];
99 } __packed; /* format defined in
100 * IBM Shared Memory Communications Version 2
101 * (https://www.ibm.com/support/pages/node/6326337)
102 */
103
104 struct smc_llc_msg_req_add_link_v2 {
105 struct smc_llc_hdr hd;
106 u8 reserved[20];
107 u8 gid_cnt;
108 u8 reserved2[3];
109 u8 gid[][SMC_GID_SIZE];
110 };
111
112 #define SMC_LLC_RKEYS_PER_CONT_MSG 2
113
114 struct smc_llc_msg_add_link_cont { /* type 0x03 */
115 struct smc_llc_hdr hd;
116 u8 link_num;
117 u8 num_rkeys;
118 u8 reserved2[2];
119 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
120 u8 reserved[4];
121 } __packed; /* format defined in RFC7609 */
122
123 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
124 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
125
126 struct smc_llc_msg_del_link { /* type 0x04 */
127 struct smc_llc_hdr hd;
128 u8 link_num;
129 __be32 reason;
130 u8 reserved[35];
131 } __packed; /* format defined in RFC7609 */
132
133 struct smc_llc_msg_test_link { /* type 0x07 */
134 struct smc_llc_hdr hd;
135 u8 user_data[16];
136 u8 reserved[24];
137 };
138
139 struct smc_rmb_rtoken {
140 union {
141 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
142 /* is actually the num of rtokens, first */
143 /* rtoken is always for the current link */
144 u8 link_id; /* link id of the rtoken */
145 };
146 __be32 rmb_key;
147 __be64 rmb_vaddr;
148 } __packed; /* format defined in RFC7609 */
149
150 #define SMC_LLC_RKEYS_PER_MSG 3
151 #define SMC_LLC_RKEYS_PER_MSG_V2 255
152
153 struct smc_llc_msg_confirm_rkey { /* type 0x06 */
154 struct smc_llc_hdr hd;
155 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
156 u8 reserved;
157 };
158
159 #define SMC_LLC_DEL_RKEY_MAX 8
160 #define SMC_LLC_FLAG_RKEY_RETRY 0x10
161 #define SMC_LLC_FLAG_RKEY_NEG 0x20
162
163 struct smc_llc_msg_delete_rkey { /* type 0x09 */
164 struct smc_llc_hdr hd;
165 u8 num_rkeys;
166 u8 err_mask;
167 u8 reserved[2];
168 __be32 rkey[8];
169 u8 reserved2[4];
170 };
171
172 struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */
173 struct smc_llc_hdr hd;
174 u8 num_rkeys;
175 u8 num_inval_rkeys;
176 u8 reserved[2];
177 __be32 rkey[];
178 };
179
180 union smc_llc_msg {
181 struct smc_llc_msg_confirm_link confirm_link;
182 struct smc_llc_msg_add_link add_link;
183 struct smc_llc_msg_req_add_link_v2 req_add_link;
184 struct smc_llc_msg_add_link_cont add_link_cont;
185 struct smc_llc_msg_del_link delete_link;
186
187 struct smc_llc_msg_confirm_rkey confirm_rkey;
188 struct smc_llc_msg_delete_rkey delete_rkey;
189
190 struct smc_llc_msg_test_link test_link;
191 struct {
192 struct smc_llc_hdr hdr;
193 u8 data[SMC_LLC_DATA_LEN];
194 } raw;
195 };
196
197 #define SMC_LLC_FLAG_RESP 0x80
198
199 struct smc_llc_qentry {
200 struct list_head list;
201 struct smc_link *link;
202 union smc_llc_msg msg;
203 };
204
205 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
206
smc_llc_flow_qentry_clr(struct smc_llc_flow * flow)207 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
208 {
209 struct smc_llc_qentry *qentry = flow->qentry;
210
211 flow->qentry = NULL;
212 return qentry;
213 }
214
smc_llc_flow_qentry_del(struct smc_llc_flow * flow)215 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
216 {
217 struct smc_llc_qentry *qentry;
218
219 if (flow->qentry) {
220 qentry = flow->qentry;
221 flow->qentry = NULL;
222 kfree(qentry);
223 }
224 }
225
smc_llc_flow_qentry_set(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)226 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
227 struct smc_llc_qentry *qentry)
228 {
229 flow->qentry = qentry;
230 }
231
smc_llc_flow_parallel(struct smc_link_group * lgr,u8 flow_type,struct smc_llc_qentry * qentry)232 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
233 struct smc_llc_qentry *qentry)
234 {
235 u8 msg_type = qentry->msg.raw.hdr.common.llc_type;
236
237 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
238 flow_type != msg_type && !lgr->delayed_event) {
239 lgr->delayed_event = qentry;
240 return;
241 }
242 /* drop parallel or already-in-progress llc requests */
243 if (flow_type != msg_type)
244 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel "
245 "LLC msg: msg %d flow %d role %d\n",
246 SMC_LGR_ID_SIZE, &lgr->id,
247 lgr->net->net_cookie,
248 qentry->msg.raw.hdr.common.type,
249 flow_type, lgr->role);
250 kfree(qentry);
251 }
252
253 /* try to start a new llc flow, initiated by an incoming llc msg */
smc_llc_flow_start(struct smc_llc_flow * flow,struct smc_llc_qentry * qentry)254 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
255 struct smc_llc_qentry *qentry)
256 {
257 struct smc_link_group *lgr = qentry->link->lgr;
258
259 spin_lock_bh(&lgr->llc_flow_lock);
260 if (flow->type) {
261 /* a flow is already active */
262 smc_llc_flow_parallel(lgr, flow->type, qentry);
263 spin_unlock_bh(&lgr->llc_flow_lock);
264 return false;
265 }
266 switch (qentry->msg.raw.hdr.common.llc_type) {
267 case SMC_LLC_ADD_LINK:
268 flow->type = SMC_LLC_FLOW_ADD_LINK;
269 break;
270 case SMC_LLC_DELETE_LINK:
271 flow->type = SMC_LLC_FLOW_DEL_LINK;
272 break;
273 case SMC_LLC_CONFIRM_RKEY:
274 case SMC_LLC_DELETE_RKEY:
275 flow->type = SMC_LLC_FLOW_RKEY;
276 break;
277 default:
278 flow->type = SMC_LLC_FLOW_NONE;
279 }
280 smc_llc_flow_qentry_set(flow, qentry);
281 spin_unlock_bh(&lgr->llc_flow_lock);
282 return true;
283 }
284
285 /* start a new local llc flow, wait till current flow finished */
smc_llc_flow_initiate(struct smc_link_group * lgr,enum smc_llc_flowtype type)286 int smc_llc_flow_initiate(struct smc_link_group *lgr,
287 enum smc_llc_flowtype type)
288 {
289 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
290 int rc;
291
292 /* all flows except confirm_rkey and delete_rkey are exclusive,
293 * confirm/delete rkey flows can run concurrently (local and remote)
294 */
295 if (type == SMC_LLC_FLOW_RKEY)
296 allowed_remote = SMC_LLC_FLOW_RKEY;
297 again:
298 if (list_empty(&lgr->list))
299 return -ENODEV;
300 spin_lock_bh(&lgr->llc_flow_lock);
301 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
302 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
303 lgr->llc_flow_rmt.type == allowed_remote)) {
304 lgr->llc_flow_lcl.type = type;
305 spin_unlock_bh(&lgr->llc_flow_lock);
306 return 0;
307 }
308 spin_unlock_bh(&lgr->llc_flow_lock);
309 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
310 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
311 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
312 lgr->llc_flow_rmt.type == allowed_remote))),
313 SMC_LLC_WAIT_TIME * 10);
314 if (!rc)
315 return -ETIMEDOUT;
316 goto again;
317 }
318
319 /* finish the current llc flow */
smc_llc_flow_stop(struct smc_link_group * lgr,struct smc_llc_flow * flow)320 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
321 {
322 spin_lock_bh(&lgr->llc_flow_lock);
323 memset(flow, 0, sizeof(*flow));
324 flow->type = SMC_LLC_FLOW_NONE;
325 spin_unlock_bh(&lgr->llc_flow_lock);
326 if (!list_empty(&lgr->list) && lgr->delayed_event &&
327 flow == &lgr->llc_flow_lcl)
328 schedule_work(&lgr->llc_event_work);
329 else
330 wake_up(&lgr->llc_flow_waiter);
331 }
332
333 /* lnk is optional and used for early wakeup when link goes down, useful in
334 * cases where we wait for a response on the link after we sent a request
335 */
smc_llc_wait(struct smc_link_group * lgr,struct smc_link * lnk,int time_out,u8 exp_msg)336 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
337 struct smc_link *lnk,
338 int time_out, u8 exp_msg)
339 {
340 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
341 u8 rcv_msg;
342
343 wait_event_timeout(lgr->llc_msg_waiter,
344 (flow->qentry ||
345 (lnk && !smc_link_usable(lnk)) ||
346 list_empty(&lgr->list)),
347 time_out);
348 if (!flow->qentry ||
349 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
350 smc_llc_flow_qentry_del(flow);
351 goto out;
352 }
353 rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type;
354 if (exp_msg && rcv_msg != exp_msg) {
355 if (exp_msg == SMC_LLC_ADD_LINK &&
356 rcv_msg == SMC_LLC_DELETE_LINK) {
357 /* flow_start will delay the unexpected msg */
358 smc_llc_flow_start(&lgr->llc_flow_lcl,
359 smc_llc_flow_qentry_clr(flow));
360 return NULL;
361 }
362 pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: "
363 "msg %d exp %d flow %d role %d flags %x\n",
364 SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie,
365 rcv_msg, exp_msg,
366 flow->type, lgr->role,
367 flow->qentry->msg.raw.hdr.flags);
368 smc_llc_flow_qentry_del(flow);
369 }
370 out:
371 return flow->qentry;
372 }
373
374 /********************************** send *************************************/
375
376 struct smc_llc_tx_pend {
377 };
378
379 /* handler for send/transmission completion of an LLC msg */
smc_llc_tx_handler(struct smc_wr_tx_pend_priv * pend,struct smc_link * link,enum ib_wc_status wc_status)380 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
381 struct smc_link *link,
382 enum ib_wc_status wc_status)
383 {
384 /* future work: handle wc_status error for recovery and failover */
385 }
386
387 /**
388 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
389 * @link: Pointer to SMC link used for sending LLC control message.
390 * @wr_buf: Out variable returning pointer to work request payload buffer.
391 * @pend: Out variable returning pointer to private pending WR tracking.
392 * It's the context the transmit complete handler will get.
393 *
394 * Reserves and pre-fills an entry for a pending work request send/tx.
395 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
396 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
397 *
398 * Return: 0 on success, otherwise an error value.
399 */
smc_llc_add_pending_send(struct smc_link * link,struct smc_wr_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)400 static int smc_llc_add_pending_send(struct smc_link *link,
401 struct smc_wr_buf **wr_buf,
402 struct smc_wr_tx_pend_priv **pend)
403 {
404 int rc;
405
406 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
407 pend);
408 if (rc < 0)
409 return rc;
410 BUILD_BUG_ON_MSG(
411 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
412 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
413 BUILD_BUG_ON_MSG(
414 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
415 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
416 BUILD_BUG_ON_MSG(
417 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
418 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
419 return 0;
420 }
421
smc_llc_add_pending_send_v2(struct smc_link * link,struct smc_wr_v2_buf ** wr_buf,struct smc_wr_tx_pend_priv ** pend)422 static int smc_llc_add_pending_send_v2(struct smc_link *link,
423 struct smc_wr_v2_buf **wr_buf,
424 struct smc_wr_tx_pend_priv **pend)
425 {
426 int rc;
427
428 rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend);
429 if (rc < 0)
430 return rc;
431 return 0;
432 }
433
smc_llc_init_msg_hdr(struct smc_llc_hdr * hdr,struct smc_link_group * lgr,size_t len)434 static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr,
435 struct smc_link_group *lgr, size_t len)
436 {
437 if (lgr->smc_version == SMC_V2) {
438 hdr->common.llc_version = SMC_V2;
439 hdr->length_v2 = len;
440 } else {
441 hdr->common.llc_version = 0;
442 hdr->length = len;
443 }
444 }
445
446 /* high-level API to send LLC confirm link */
smc_llc_send_confirm_link(struct smc_link * link,enum smc_llc_reqresp reqresp)447 int smc_llc_send_confirm_link(struct smc_link *link,
448 enum smc_llc_reqresp reqresp)
449 {
450 struct smc_llc_msg_confirm_link *confllc;
451 struct smc_wr_tx_pend_priv *pend;
452 struct smc_wr_buf *wr_buf;
453 int rc;
454
455 if (!smc_wr_tx_link_hold(link))
456 return -ENOLINK;
457 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
458 if (rc)
459 goto put_out;
460 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
461 memset(confllc, 0, sizeof(*confllc));
462 confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK;
463 smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc));
464 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
465 if (reqresp == SMC_LLC_RESP)
466 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
467 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
468 ETH_ALEN);
469 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
470 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
471 confllc->link_num = link->link_id;
472 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
473 confllc->max_links = SMC_LINKS_ADD_LNK_MAX;
474 if (link->lgr->smc_version == SMC_V2 &&
475 link->lgr->peer_smc_release >= SMC_RELEASE_1) {
476 confllc->max_conns = link->lgr->max_conns;
477 confllc->max_links = link->lgr->max_links;
478 }
479 /* send llc message */
480 rc = smc_wr_tx_send(link, pend);
481 put_out:
482 smc_wr_tx_link_put(link);
483 return rc;
484 }
485
486 /* send LLC confirm rkey request */
smc_llc_send_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)487 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
488 struct smc_buf_desc *rmb_desc)
489 {
490 struct smc_llc_msg_confirm_rkey *rkeyllc;
491 struct smc_wr_tx_pend_priv *pend;
492 struct smc_wr_buf *wr_buf;
493 struct smc_link *link;
494 int i, rc, rtok_ix;
495
496 if (!smc_wr_tx_link_hold(send_link))
497 return -ENOLINK;
498 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
499 if (rc)
500 goto put_out;
501 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
502 memset(rkeyllc, 0, sizeof(*rkeyllc));
503 rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY;
504 smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc));
505
506 rtok_ix = 1;
507 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
508 link = &send_link->lgr->lnk[i];
509 if (smc_link_active(link) && link != send_link) {
510 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
511 rkeyllc->rtoken[rtok_ix].rmb_key =
512 htonl(rmb_desc->mr[link->link_idx]->rkey);
513 rkeyllc->rtoken[rtok_ix].rmb_vaddr = rmb_desc->is_vm ?
514 cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
515 cpu_to_be64((u64)sg_dma_address
516 (rmb_desc->sgt[link->link_idx].sgl));
517 rtok_ix++;
518 }
519 }
520 /* rkey of send_link is in rtoken[0] */
521 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
522 rkeyllc->rtoken[0].rmb_key =
523 htonl(rmb_desc->mr[send_link->link_idx]->rkey);
524 rkeyllc->rtoken[0].rmb_vaddr = rmb_desc->is_vm ?
525 cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) :
526 cpu_to_be64((u64)sg_dma_address
527 (rmb_desc->sgt[send_link->link_idx].sgl));
528 /* send llc message */
529 rc = smc_wr_tx_send(send_link, pend);
530 put_out:
531 smc_wr_tx_link_put(send_link);
532 return rc;
533 }
534
535 /* send LLC delete rkey request */
smc_llc_send_delete_rkey(struct smc_link * link,struct smc_buf_desc * rmb_desc)536 static int smc_llc_send_delete_rkey(struct smc_link *link,
537 struct smc_buf_desc *rmb_desc)
538 {
539 struct smc_llc_msg_delete_rkey *rkeyllc;
540 struct smc_wr_tx_pend_priv *pend;
541 struct smc_wr_buf *wr_buf;
542 int rc;
543
544 if (!smc_wr_tx_link_hold(link))
545 return -ENOLINK;
546 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
547 if (rc)
548 goto put_out;
549 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
550 memset(rkeyllc, 0, sizeof(*rkeyllc));
551 rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY;
552 smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc));
553 rkeyllc->num_rkeys = 1;
554 rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey);
555 /* send llc message */
556 rc = smc_wr_tx_send(link, pend);
557 put_out:
558 smc_wr_tx_link_put(link);
559 return rc;
560 }
561
562 /* return first buffer from any of the next buf lists */
_smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst)563 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
564 int *buf_lst)
565 {
566 struct smc_buf_desc *buf_pos;
567
568 while (*buf_lst < SMC_RMBE_SIZES) {
569 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
570 struct smc_buf_desc, list);
571 if (buf_pos)
572 return buf_pos;
573 (*buf_lst)++;
574 }
575 return NULL;
576 }
577
578 /* return next rmb from buffer lists */
smc_llc_get_next_rmb(struct smc_link_group * lgr,int * buf_lst,struct smc_buf_desc * buf_pos)579 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
580 int *buf_lst,
581 struct smc_buf_desc *buf_pos)
582 {
583 struct smc_buf_desc *buf_next;
584
585 if (!buf_pos)
586 return _smc_llc_get_next_rmb(lgr, buf_lst);
587
588 if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
589 (*buf_lst)++;
590 return _smc_llc_get_next_rmb(lgr, buf_lst);
591 }
592 buf_next = list_next_entry(buf_pos, list);
593 return buf_next;
594 }
595
smc_llc_get_first_rmb(struct smc_link_group * lgr,int * buf_lst)596 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
597 int *buf_lst)
598 {
599 *buf_lst = 0;
600 return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
601 }
602
smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext * ext,struct smc_link * link,struct smc_link * link_new)603 static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
604 struct smc_link *link, struct smc_link *link_new)
605 {
606 struct smc_link_group *lgr = link->lgr;
607 struct smc_buf_desc *buf_pos;
608 int prim_lnk_idx, lnk_idx, i;
609 struct smc_buf_desc *rmb;
610 int len = sizeof(*ext);
611 int buf_lst;
612
613 ext->v2_direct = !lgr->uses_gateway;
614 memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE);
615
616 prim_lnk_idx = link->link_idx;
617 lnk_idx = link_new->link_idx;
618 down_write(&lgr->rmbs_lock);
619 ext->num_rkeys = lgr->conns_num;
620 if (!ext->num_rkeys)
621 goto out;
622 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
623 for (i = 0; i < ext->num_rkeys; i++) {
624 while (buf_pos && !(buf_pos)->used)
625 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
626 if (!buf_pos)
627 break;
628 rmb = buf_pos;
629 ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
630 ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
631 ext->rt[i].rmb_vaddr_new = rmb->is_vm ?
632 cpu_to_be64((uintptr_t)rmb->cpu_addr) :
633 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
634 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
635 }
636 len += i * sizeof(ext->rt[0]);
637 out:
638 up_write(&lgr->rmbs_lock);
639 return len;
640 }
641
642 /* send ADD LINK request or response */
smc_llc_send_add_link(struct smc_link * link,u8 mac[],u8 gid[],struct smc_link * link_new,enum smc_llc_reqresp reqresp)643 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
644 struct smc_link *link_new,
645 enum smc_llc_reqresp reqresp)
646 {
647 struct smc_llc_msg_add_link_v2_ext *ext = NULL;
648 struct smc_llc_msg_add_link *addllc;
649 struct smc_wr_tx_pend_priv *pend;
650 int len = sizeof(*addllc);
651 int rc;
652
653 if (!smc_wr_tx_link_hold(link))
654 return -ENOLINK;
655 if (link->lgr->smc_version == SMC_V2) {
656 struct smc_wr_v2_buf *wr_buf;
657
658 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
659 if (rc)
660 goto put_out;
661 addllc = (struct smc_llc_msg_add_link *)wr_buf;
662 ext = (struct smc_llc_msg_add_link_v2_ext *)
663 &wr_buf->raw[sizeof(*addllc)];
664 memset(ext, 0, SMC_WR_TX_SIZE);
665 } else {
666 struct smc_wr_buf *wr_buf;
667
668 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
669 if (rc)
670 goto put_out;
671 addllc = (struct smc_llc_msg_add_link *)wr_buf;
672 }
673
674 memset(addllc, 0, sizeof(*addllc));
675 addllc->hd.common.llc_type = SMC_LLC_ADD_LINK;
676 if (reqresp == SMC_LLC_RESP)
677 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
678 memcpy(addllc->sender_mac, mac, ETH_ALEN);
679 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
680 if (link_new) {
681 addllc->link_num = link_new->link_id;
682 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
683 hton24(addllc->initial_psn, link_new->psn_initial);
684 if (reqresp == SMC_LLC_REQ)
685 addllc->qp_mtu = link_new->path_mtu;
686 else
687 addllc->qp_mtu = min(link_new->path_mtu,
688 link_new->peer_mtu);
689 }
690 if (ext && link_new)
691 len += smc_llc_fill_ext_v2(ext, link, link_new);
692 smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len);
693 /* send llc message */
694 if (link->lgr->smc_version == SMC_V2)
695 rc = smc_wr_tx_v2_send(link, pend, len);
696 else
697 rc = smc_wr_tx_send(link, pend);
698 put_out:
699 smc_wr_tx_link_put(link);
700 return rc;
701 }
702
703 /* send DELETE LINK request or response */
smc_llc_send_delete_link(struct smc_link * link,u8 link_del_id,enum smc_llc_reqresp reqresp,bool orderly,u32 reason)704 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
705 enum smc_llc_reqresp reqresp, bool orderly,
706 u32 reason)
707 {
708 struct smc_llc_msg_del_link *delllc;
709 struct smc_wr_tx_pend_priv *pend;
710 struct smc_wr_buf *wr_buf;
711 int rc;
712
713 if (!smc_wr_tx_link_hold(link))
714 return -ENOLINK;
715 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
716 if (rc)
717 goto put_out;
718 delllc = (struct smc_llc_msg_del_link *)wr_buf;
719
720 memset(delllc, 0, sizeof(*delllc));
721 delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK;
722 smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc));
723 if (reqresp == SMC_LLC_RESP)
724 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
725 if (orderly)
726 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
727 if (link_del_id)
728 delllc->link_num = link_del_id;
729 else
730 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
731 delllc->reason = htonl(reason);
732 /* send llc message */
733 rc = smc_wr_tx_send(link, pend);
734 put_out:
735 smc_wr_tx_link_put(link);
736 return rc;
737 }
738
739 /* send LLC test link request */
smc_llc_send_test_link(struct smc_link * link,u8 user_data[16])740 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
741 {
742 struct smc_llc_msg_test_link *testllc;
743 struct smc_wr_tx_pend_priv *pend;
744 struct smc_wr_buf *wr_buf;
745 int rc;
746
747 if (!smc_wr_tx_link_hold(link))
748 return -ENOLINK;
749 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
750 if (rc)
751 goto put_out;
752 testllc = (struct smc_llc_msg_test_link *)wr_buf;
753 memset(testllc, 0, sizeof(*testllc));
754 testllc->hd.common.llc_type = SMC_LLC_TEST_LINK;
755 smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));
756 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
757 /* send llc message */
758 rc = smc_wr_tx_send(link, pend);
759 put_out:
760 smc_wr_tx_link_put(link);
761 return rc;
762 }
763
764 /* schedule an llc send on link, may wait for buffers */
smc_llc_send_message(struct smc_link * link,void * llcbuf)765 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
766 {
767 struct smc_wr_tx_pend_priv *pend;
768 struct smc_wr_buf *wr_buf;
769 int rc;
770
771 if (!smc_wr_tx_link_hold(link))
772 return -ENOLINK;
773 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
774 if (rc)
775 goto put_out;
776 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
777 rc = smc_wr_tx_send(link, pend);
778 put_out:
779 smc_wr_tx_link_put(link);
780 return rc;
781 }
782
783 /* schedule an llc send on link, may wait for buffers,
784 * and wait for send completion notification.
785 * @return 0 on success
786 */
smc_llc_send_message_wait(struct smc_link * link,void * llcbuf)787 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
788 {
789 struct smc_wr_tx_pend_priv *pend;
790 struct smc_wr_buf *wr_buf;
791 int rc;
792
793 if (!smc_wr_tx_link_hold(link))
794 return -ENOLINK;
795 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
796 if (rc)
797 goto put_out;
798 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
799 rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
800 put_out:
801 smc_wr_tx_link_put(link);
802 return rc;
803 }
804
805 /********************************* receive ***********************************/
806
smc_llc_alloc_alt_link(struct smc_link_group * lgr,enum smc_lgr_type lgr_new_t)807 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
808 enum smc_lgr_type lgr_new_t)
809 {
810 int i;
811
812 if (lgr->type == SMC_LGR_SYMMETRIC ||
813 (lgr->type != SMC_LGR_SINGLE &&
814 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
815 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
816 return -EMLINK;
817
818 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
819 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
820 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
821 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
822 return i;
823 } else {
824 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
825 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
826 return i;
827 }
828 return -EMLINK;
829 }
830
831 /* send one add_link_continue msg */
smc_llc_add_link_cont(struct smc_link * link,struct smc_link * link_new,u8 * num_rkeys_todo,int * buf_lst,struct smc_buf_desc ** buf_pos)832 static int smc_llc_add_link_cont(struct smc_link *link,
833 struct smc_link *link_new, u8 *num_rkeys_todo,
834 int *buf_lst, struct smc_buf_desc **buf_pos)
835 {
836 struct smc_llc_msg_add_link_cont *addc_llc;
837 struct smc_link_group *lgr = link->lgr;
838 int prim_lnk_idx, lnk_idx, i, rc;
839 struct smc_wr_tx_pend_priv *pend;
840 struct smc_wr_buf *wr_buf;
841 struct smc_buf_desc *rmb;
842 u8 n;
843
844 if (!smc_wr_tx_link_hold(link))
845 return -ENOLINK;
846 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
847 if (rc)
848 goto put_out;
849 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
850 memset(addc_llc, 0, sizeof(*addc_llc));
851
852 prim_lnk_idx = link->link_idx;
853 lnk_idx = link_new->link_idx;
854 addc_llc->link_num = link_new->link_id;
855 addc_llc->num_rkeys = *num_rkeys_todo;
856 n = *num_rkeys_todo;
857 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
858 while (*buf_pos && !(*buf_pos)->used)
859 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
860 if (!*buf_pos) {
861 addc_llc->num_rkeys = addc_llc->num_rkeys -
862 *num_rkeys_todo;
863 *num_rkeys_todo = 0;
864 break;
865 }
866 rmb = *buf_pos;
867
868 addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey);
869 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey);
870 addc_llc->rt[i].rmb_vaddr_new = rmb->is_vm ?
871 cpu_to_be64((uintptr_t)rmb->cpu_addr) :
872 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
873
874 (*num_rkeys_todo)--;
875 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
876 }
877 addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
878 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
879 if (lgr->role == SMC_CLNT)
880 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
881 rc = smc_wr_tx_send(link, pend);
882 put_out:
883 smc_wr_tx_link_put(link);
884 return rc;
885 }
886
smc_llc_cli_rkey_exchange(struct smc_link * link,struct smc_link * link_new)887 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
888 struct smc_link *link_new)
889 {
890 struct smc_llc_msg_add_link_cont *addc_llc;
891 struct smc_link_group *lgr = link->lgr;
892 u8 max, num_rkeys_send, num_rkeys_recv;
893 struct smc_llc_qentry *qentry;
894 struct smc_buf_desc *buf_pos;
895 int buf_lst;
896 int rc = 0;
897 int i;
898
899 down_write(&lgr->rmbs_lock);
900 num_rkeys_send = lgr->conns_num;
901 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
902 do {
903 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
904 SMC_LLC_ADD_LINK_CONT);
905 if (!qentry) {
906 rc = -ETIMEDOUT;
907 break;
908 }
909 addc_llc = &qentry->msg.add_link_cont;
910 num_rkeys_recv = addc_llc->num_rkeys;
911 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
912 for (i = 0; i < max; i++) {
913 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
914 addc_llc->rt[i].rmb_key,
915 addc_llc->rt[i].rmb_vaddr_new,
916 addc_llc->rt[i].rmb_key_new);
917 num_rkeys_recv--;
918 }
919 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
920 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
921 &buf_lst, &buf_pos);
922 if (rc)
923 break;
924 } while (num_rkeys_send || num_rkeys_recv);
925
926 up_write(&lgr->rmbs_lock);
927 return rc;
928 }
929
930 /* prepare and send an add link reject response */
smc_llc_cli_add_link_reject(struct smc_llc_qentry * qentry)931 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
932 {
933 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
934 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
935 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
936 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
937 sizeof(qentry->msg));
938 return smc_llc_send_message(qentry->link, &qentry->msg);
939 }
940
smc_llc_cli_conf_link(struct smc_link * link,struct smc_init_info * ini,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)941 static int smc_llc_cli_conf_link(struct smc_link *link,
942 struct smc_init_info *ini,
943 struct smc_link *link_new,
944 enum smc_lgr_type lgr_new_t)
945 {
946 struct smc_link_group *lgr = link->lgr;
947 struct smc_llc_qentry *qentry = NULL;
948 int rc = 0;
949
950 /* receive CONFIRM LINK request over RoCE fabric */
951 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
952 if (!qentry) {
953 rc = smc_llc_send_delete_link(link, link_new->link_id,
954 SMC_LLC_REQ, false,
955 SMC_LLC_DEL_LOST_PATH);
956 return -ENOLINK;
957 }
958 if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
959 /* received DELETE_LINK instead */
960 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
961 smc_llc_send_message(link, &qentry->msg);
962 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
963 return -ENOLINK;
964 }
965 smc_llc_save_peer_uid(qentry);
966 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
967
968 rc = smc_ib_modify_qp_rts(link_new);
969 if (rc) {
970 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
971 false, SMC_LLC_DEL_LOST_PATH);
972 return -ENOLINK;
973 }
974 smc_wr_remember_qp_attr(link_new);
975
976 rc = smcr_buf_reg_lgr(link_new);
977 if (rc) {
978 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
979 false, SMC_LLC_DEL_LOST_PATH);
980 return -ENOLINK;
981 }
982
983 /* send CONFIRM LINK response over RoCE fabric */
984 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
985 if (rc) {
986 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
987 false, SMC_LLC_DEL_LOST_PATH);
988 return -ENOLINK;
989 }
990 smc_llc_link_active(link_new);
991 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
992 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
993 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
994 else
995 smcr_lgr_set_type(lgr, lgr_new_t);
996 return 0;
997 }
998
smc_llc_save_add_link_rkeys(struct smc_link * link,struct smc_link * link_new,u8 * llc_msg)999 static void smc_llc_save_add_link_rkeys(struct smc_link *link,
1000 struct smc_link *link_new,
1001 u8 *llc_msg)
1002 {
1003 struct smc_llc_msg_add_link_v2_ext *ext;
1004 struct smc_link_group *lgr = link->lgr;
1005 int max, i;
1006
1007 ext = (struct smc_llc_msg_add_link_v2_ext *)(llc_msg +
1008 SMC_WR_TX_SIZE);
1009 max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1010 down_write(&lgr->rmbs_lock);
1011 for (i = 0; i < max; i++) {
1012 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1013 ext->rt[i].rmb_key,
1014 ext->rt[i].rmb_vaddr_new,
1015 ext->rt[i].rmb_key_new);
1016 }
1017 up_write(&lgr->rmbs_lock);
1018 }
1019
smc_llc_save_add_link_info(struct smc_link * link,struct smc_llc_msg_add_link * add_llc)1020 static void smc_llc_save_add_link_info(struct smc_link *link,
1021 struct smc_llc_msg_add_link *add_llc)
1022 {
1023 link->peer_qpn = ntoh24(add_llc->sender_qp_num);
1024 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
1025 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
1026 link->peer_psn = ntoh24(add_llc->initial_psn);
1027 link->peer_mtu = add_llc->qp_mtu;
1028 }
1029
1030 /* as an SMC client, process an add link request */
smc_llc_cli_add_link(struct smc_link * link,struct smc_llc_qentry * qentry)1031 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
1032 {
1033 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
1034 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1035 struct smc_link_group *lgr = smc_get_lgr(link);
1036 struct smc_init_info *ini = NULL;
1037 struct smc_link *lnk_new = NULL;
1038 int lnk_idx, rc = 0;
1039
1040 if (!llc->qp_mtu)
1041 goto out_reject;
1042
1043 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1044 if (!ini) {
1045 rc = -ENOMEM;
1046 goto out_reject;
1047 }
1048
1049 if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) {
1050 rc = 0;
1051 goto out_reject;
1052 }
1053
1054 ini->vlan_id = lgr->vlan_id;
1055 if (lgr->smc_version == SMC_V2) {
1056 ini->check_smcrv2 = true;
1057 ini->smcrv2.saddr = lgr->saddr;
1058 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid);
1059 }
1060 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1061 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1062 (lgr->smc_version == SMC_V2 ||
1063 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1064 if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2)
1065 goto out_reject;
1066 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1067 }
1068 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1069 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1070 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1071 ini->smcrv2.ib_port_v2 = link->ibport;
1072 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1073 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1074 ini->ib_dev = link->smcibdev;
1075 ini->ib_port = link->ibport;
1076 }
1077 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1078 if (lnk_idx < 0)
1079 goto out_reject;
1080 lnk_new = &lgr->lnk[lnk_idx];
1081 rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini);
1082 if (rc)
1083 goto out_reject;
1084 smc_llc_save_add_link_info(lnk_new, llc);
1085 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */
1086 smc_llc_link_set_uid(lnk_new);
1087
1088 rc = smc_ib_ready_link(lnk_new);
1089 if (rc)
1090 goto out_clear_lnk;
1091
1092 rc = smcr_buf_map_lgr(lnk_new);
1093 if (rc)
1094 goto out_clear_lnk;
1095
1096 rc = smc_llc_send_add_link(link,
1097 lnk_new->smcibdev->mac[lnk_new->ibport - 1],
1098 lnk_new->gid, lnk_new, SMC_LLC_RESP);
1099 if (rc)
1100 goto out_clear_lnk;
1101 if (lgr->smc_version == SMC_V2) {
1102 u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ?
1103 (u8 *)lgr->wr_rx_buf_v2 : (u8 *)llc;
1104 smc_llc_save_add_link_rkeys(link, lnk_new, llc_msg);
1105 } else {
1106 rc = smc_llc_cli_rkey_exchange(link, lnk_new);
1107 if (rc) {
1108 rc = 0;
1109 goto out_clear_lnk;
1110 }
1111 }
1112 rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t);
1113 if (!rc)
1114 goto out;
1115 out_clear_lnk:
1116 lnk_new->state = SMC_LNK_INACTIVE;
1117 smcr_link_clear(lnk_new, false);
1118 out_reject:
1119 smc_llc_cli_add_link_reject(qentry);
1120 out:
1121 kfree(ini);
1122 kfree(qentry);
1123 return rc;
1124 }
1125
smc_llc_send_request_add_link(struct smc_link * link)1126 static void smc_llc_send_request_add_link(struct smc_link *link)
1127 {
1128 struct smc_llc_msg_req_add_link_v2 *llc;
1129 struct smc_wr_tx_pend_priv *pend;
1130 struct smc_wr_v2_buf *wr_buf;
1131 struct smc_gidlist gidlist;
1132 int rc, len, i;
1133
1134 if (!smc_wr_tx_link_hold(link))
1135 return;
1136 if (link->lgr->type == SMC_LGR_SYMMETRIC ||
1137 link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1138 goto put_out;
1139
1140 smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);
1141 if (gidlist.len <= 1)
1142 goto put_out;
1143
1144 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);
1145 if (rc)
1146 goto put_out;
1147 llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;
1148 memset(llc, 0, SMC_WR_TX_SIZE);
1149
1150 llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;
1151 for (i = 0; i < gidlist.len; i++)
1152 memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));
1153 llc->gid_cnt = gidlist.len;
1154 len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));
1155 smc_llc_init_msg_hdr(&llc->hd, link->lgr, len);
1156 rc = smc_wr_tx_v2_send(link, pend, len);
1157 if (!rc)
1158 /* set REQ_ADD_LINK flow and wait for response from peer */
1159 link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;
1160 put_out:
1161 smc_wr_tx_link_put(link);
1162 }
1163
1164 /* as an SMC client, invite server to start the add_link processing */
smc_llc_cli_add_link_invite(struct smc_link * link,struct smc_llc_qentry * qentry)1165 static void smc_llc_cli_add_link_invite(struct smc_link *link,
1166 struct smc_llc_qentry *qentry)
1167 {
1168 struct smc_link_group *lgr = smc_get_lgr(link);
1169 struct smc_init_info *ini = NULL;
1170
1171 if (lgr->smc_version == SMC_V2) {
1172 smc_llc_send_request_add_link(link);
1173 goto out;
1174 }
1175
1176 if (lgr->type == SMC_LGR_SYMMETRIC ||
1177 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1178 goto out;
1179
1180 if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1)
1181 goto out;
1182
1183 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1184 if (!ini)
1185 goto out;
1186
1187 ini->vlan_id = lgr->vlan_id;
1188 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1189 if (!ini->ib_dev)
1190 goto out;
1191
1192 smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1],
1193 ini->ib_gid, NULL, SMC_LLC_REQ);
1194 out:
1195 kfree(ini);
1196 kfree(qentry);
1197 }
1198
smc_llc_is_empty_llc_message(union smc_llc_msg * llc)1199 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
1200 {
1201 int i;
1202
1203 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
1204 if (llc->raw.data[i])
1205 return false;
1206 return true;
1207 }
1208
smc_llc_is_local_add_link(union smc_llc_msg * llc)1209 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
1210 {
1211 if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK &&
1212 smc_llc_is_empty_llc_message(llc))
1213 return true;
1214 return false;
1215 }
1216
smc_llc_process_cli_add_link(struct smc_link_group * lgr)1217 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
1218 {
1219 struct smc_llc_qentry *qentry;
1220
1221 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1222
1223 down_write(&lgr->llc_conf_mutex);
1224 if (smc_llc_is_local_add_link(&qentry->msg))
1225 smc_llc_cli_add_link_invite(qentry->link, qentry);
1226 else
1227 smc_llc_cli_add_link(qentry->link, qentry);
1228 up_write(&lgr->llc_conf_mutex);
1229 }
1230
smc_llc_active_link_count(struct smc_link_group * lgr)1231 static int smc_llc_active_link_count(struct smc_link_group *lgr)
1232 {
1233 int i, link_count = 0;
1234
1235 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1236 if (!smc_link_active(&lgr->lnk[i]))
1237 continue;
1238 link_count++;
1239 }
1240 return link_count;
1241 }
1242
1243 /* find the asymmetric link when 3 links are established */
smc_llc_find_asym_link(struct smc_link_group * lgr)1244 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
1245 {
1246 int asym_idx = -ENOENT;
1247 int i, j, k;
1248 bool found;
1249
1250 /* determine asymmetric link */
1251 found = false;
1252 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1253 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
1254 if (!smc_link_usable(&lgr->lnk[i]) ||
1255 !smc_link_usable(&lgr->lnk[j]))
1256 continue;
1257 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
1258 SMC_GID_SIZE)) {
1259 found = true; /* asym_lnk is i or j */
1260 break;
1261 }
1262 }
1263 if (found)
1264 break;
1265 }
1266 if (!found)
1267 goto out; /* no asymmetric link */
1268 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
1269 if (!smc_link_usable(&lgr->lnk[k]))
1270 continue;
1271 if (k != i &&
1272 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
1273 SMC_GID_SIZE)) {
1274 asym_idx = i;
1275 break;
1276 }
1277 if (k != j &&
1278 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1279 SMC_GID_SIZE)) {
1280 asym_idx = j;
1281 break;
1282 }
1283 }
1284 out:
1285 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1286 }
1287
smc_llc_delete_asym_link(struct smc_link_group * lgr)1288 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1289 {
1290 struct smc_link *lnk_new = NULL, *lnk_asym;
1291 struct smc_llc_qentry *qentry;
1292 int rc;
1293
1294 lnk_asym = smc_llc_find_asym_link(lgr);
1295 if (!lnk_asym)
1296 return; /* no asymmetric link */
1297 if (!smc_link_downing(&lnk_asym->state))
1298 return;
1299 lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1300 smc_wr_tx_wait_no_pending_sends(lnk_asym);
1301 if (!lnk_new)
1302 goto out_free;
1303 /* change flow type from ADD_LINK into DEL_LINK */
1304 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1305 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1306 true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1307 if (rc) {
1308 smcr_link_down_cond(lnk_new);
1309 goto out_free;
1310 }
1311 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1312 SMC_LLC_DELETE_LINK);
1313 if (!qentry) {
1314 smcr_link_down_cond(lnk_new);
1315 goto out_free;
1316 }
1317 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1318 out_free:
1319 smcr_link_clear(lnk_asym, true);
1320 }
1321
smc_llc_srv_rkey_exchange(struct smc_link * link,struct smc_link * link_new)1322 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1323 struct smc_link *link_new)
1324 {
1325 struct smc_llc_msg_add_link_cont *addc_llc;
1326 struct smc_link_group *lgr = link->lgr;
1327 u8 max, num_rkeys_send, num_rkeys_recv;
1328 struct smc_llc_qentry *qentry = NULL;
1329 struct smc_buf_desc *buf_pos;
1330 int buf_lst;
1331 int rc = 0;
1332 int i;
1333
1334 down_write(&lgr->rmbs_lock);
1335 num_rkeys_send = lgr->conns_num;
1336 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1337 do {
1338 smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1339 &buf_lst, &buf_pos);
1340 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1341 SMC_LLC_ADD_LINK_CONT);
1342 if (!qentry) {
1343 rc = -ETIMEDOUT;
1344 goto out;
1345 }
1346 addc_llc = &qentry->msg.add_link_cont;
1347 num_rkeys_recv = addc_llc->num_rkeys;
1348 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1349 for (i = 0; i < max; i++) {
1350 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1351 addc_llc->rt[i].rmb_key,
1352 addc_llc->rt[i].rmb_vaddr_new,
1353 addc_llc->rt[i].rmb_key_new);
1354 num_rkeys_recv--;
1355 }
1356 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1357 } while (num_rkeys_send || num_rkeys_recv);
1358 out:
1359 up_write(&lgr->rmbs_lock);
1360 return rc;
1361 }
1362
smc_llc_srv_conf_link(struct smc_link * link,struct smc_link * link_new,enum smc_lgr_type lgr_new_t)1363 static int smc_llc_srv_conf_link(struct smc_link *link,
1364 struct smc_link *link_new,
1365 enum smc_lgr_type lgr_new_t)
1366 {
1367 struct smc_link_group *lgr = link->lgr;
1368 struct smc_llc_qentry *qentry = NULL;
1369 int rc;
1370
1371 /* send CONFIRM LINK request over the RoCE fabric */
1372 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1373 if (rc)
1374 return -ENOLINK;
1375 /* receive CONFIRM LINK response over the RoCE fabric */
1376 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1377 if (!qentry ||
1378 qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) {
1379 /* send DELETE LINK */
1380 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1381 false, SMC_LLC_DEL_LOST_PATH);
1382 if (qentry)
1383 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1384 return -ENOLINK;
1385 }
1386 smc_llc_save_peer_uid(qentry);
1387 smc_llc_link_active(link_new);
1388 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1389 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1390 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1391 else
1392 smcr_lgr_set_type(lgr, lgr_new_t);
1393 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1394 return 0;
1395 }
1396
smc_llc_send_req_add_link_response(struct smc_llc_qentry * qentry)1397 static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry)
1398 {
1399 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
1400 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr,
1401 sizeof(qentry->msg));
1402 memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data));
1403 smc_llc_send_message(qentry->link, &qentry->msg);
1404 }
1405
smc_llc_srv_add_link(struct smc_link * link,struct smc_llc_qentry * req_qentry)1406 int smc_llc_srv_add_link(struct smc_link *link,
1407 struct smc_llc_qentry *req_qentry)
1408 {
1409 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1410 struct smc_link_group *lgr = link->lgr;
1411 struct smc_llc_msg_add_link *add_llc;
1412 struct smc_llc_qentry *qentry = NULL;
1413 bool send_req_add_link_resp = false;
1414 struct smc_link *link_new = NULL;
1415 struct smc_init_info *ini = NULL;
1416 int lnk_idx, rc = 0;
1417
1418 if (req_qentry &&
1419 req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK)
1420 send_req_add_link_resp = true;
1421
1422 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1423 if (!ini) {
1424 rc = -ENOMEM;
1425 goto out;
1426 }
1427
1428 if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) {
1429 rc = 0;
1430 goto out;
1431 }
1432
1433 /* ignore client add link recommendation, start new flow */
1434 ini->vlan_id = lgr->vlan_id;
1435 if (lgr->smc_version == SMC_V2) {
1436 ini->check_smcrv2 = true;
1437 ini->smcrv2.saddr = lgr->saddr;
1438 if (send_req_add_link_resp) {
1439 struct smc_llc_msg_req_add_link_v2 *req_add =
1440 &req_qentry->msg.req_add_link;
1441
1442 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]);
1443 }
1444 }
1445 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev);
1446 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) {
1447 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1448 ini->smcrv2.ib_dev_v2 = link->smcibdev;
1449 ini->smcrv2.ib_port_v2 = link->ibport;
1450 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) {
1451 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1452 ini->ib_dev = link->smcibdev;
1453 ini->ib_port = link->ibport;
1454 }
1455 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1456 if (lnk_idx < 0) {
1457 rc = 0;
1458 goto out;
1459 }
1460
1461 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini);
1462 if (rc)
1463 goto out;
1464 link_new = &lgr->lnk[lnk_idx];
1465
1466 rc = smcr_buf_map_lgr(link_new);
1467 if (rc)
1468 goto out_err;
1469
1470 rc = smc_llc_send_add_link(link,
1471 link_new->smcibdev->mac[link_new->ibport-1],
1472 link_new->gid, link_new, SMC_LLC_REQ);
1473 if (rc)
1474 goto out_err;
1475 send_req_add_link_resp = false;
1476 /* receive ADD LINK response over the RoCE fabric */
1477 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1478 if (!qentry) {
1479 rc = -ETIMEDOUT;
1480 goto out_err;
1481 }
1482 add_llc = &qentry->msg.add_link;
1483 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1484 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1485 rc = -ENOLINK;
1486 goto out_err;
1487 }
1488 if (lgr->type == SMC_LGR_SINGLE &&
1489 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1490 (lgr->smc_version == SMC_V2 ||
1491 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) {
1492 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1493 }
1494 smc_llc_save_add_link_info(link_new, add_llc);
1495 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1496
1497 rc = smc_ib_ready_link(link_new);
1498 if (rc)
1499 goto out_err;
1500 rc = smcr_buf_reg_lgr(link_new);
1501 if (rc)
1502 goto out_err;
1503 if (lgr->smc_version == SMC_V2) {
1504 u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ?
1505 (u8 *)lgr->wr_rx_buf_v2 : (u8 *)add_llc;
1506 smc_llc_save_add_link_rkeys(link, link_new, llc_msg);
1507 } else {
1508 rc = smc_llc_srv_rkey_exchange(link, link_new);
1509 if (rc)
1510 goto out_err;
1511 }
1512 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1513 if (rc)
1514 goto out_err;
1515 kfree(ini);
1516 return 0;
1517 out_err:
1518 if (link_new) {
1519 link_new->state = SMC_LNK_INACTIVE;
1520 smcr_link_clear(link_new, false);
1521 }
1522 out:
1523 kfree(ini);
1524 if (send_req_add_link_resp)
1525 smc_llc_send_req_add_link_response(req_qentry);
1526 return rc;
1527 }
1528
smc_llc_process_srv_add_link(struct smc_link_group * lgr)1529 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1530 {
1531 struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1532 struct smc_llc_qentry *qentry;
1533 int rc;
1534
1535 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1536
1537 down_write(&lgr->llc_conf_mutex);
1538 rc = smc_llc_srv_add_link(link, qentry);
1539 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1540 /* delete any asymmetric link */
1541 smc_llc_delete_asym_link(lgr);
1542 }
1543 up_write(&lgr->llc_conf_mutex);
1544 kfree(qentry);
1545 }
1546
1547 /* enqueue a local add_link req to trigger a new add_link flow */
smc_llc_add_link_local(struct smc_link * link)1548 void smc_llc_add_link_local(struct smc_link *link)
1549 {
1550 struct smc_llc_msg_add_link add_llc = {};
1551
1552 add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK;
1553 smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc));
1554 /* no dev and port needed */
1555 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1556 }
1557
1558 /* worker to process an add link message */
smc_llc_add_link_work(struct work_struct * work)1559 static void smc_llc_add_link_work(struct work_struct *work)
1560 {
1561 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1562 llc_add_link_work);
1563
1564 if (list_empty(&lgr->list)) {
1565 /* link group is terminating */
1566 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1567 goto out;
1568 }
1569
1570 if (lgr->role == SMC_CLNT)
1571 smc_llc_process_cli_add_link(lgr);
1572 else
1573 smc_llc_process_srv_add_link(lgr);
1574 out:
1575 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK)
1576 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1577 }
1578
1579 /* enqueue a local del_link msg to trigger a new del_link flow,
1580 * called only for role SMC_SERV
1581 */
smc_llc_srv_delete_link_local(struct smc_link * link,u8 del_link_id)1582 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1583 {
1584 struct smc_llc_msg_del_link del_llc = {};
1585
1586 del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1587 smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc));
1588 del_llc.link_num = del_link_id;
1589 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1590 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1591 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1592 }
1593
smc_llc_process_cli_delete_link(struct smc_link_group * lgr)1594 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1595 {
1596 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1597 struct smc_llc_msg_del_link *del_llc;
1598 struct smc_llc_qentry *qentry;
1599 int active_links;
1600 int lnk_idx;
1601
1602 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1603 lnk = qentry->link;
1604 del_llc = &qentry->msg.delete_link;
1605
1606 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1607 smc_lgr_terminate_sched(lgr);
1608 goto out;
1609 }
1610 down_write(&lgr->llc_conf_mutex);
1611 /* delete single link */
1612 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1613 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1614 continue;
1615 lnk_del = &lgr->lnk[lnk_idx];
1616 break;
1617 }
1618 del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1619 if (!lnk_del) {
1620 /* link was not found */
1621 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1622 smc_llc_send_message(lnk, &qentry->msg);
1623 goto out_unlock;
1624 }
1625 lnk_asym = smc_llc_find_asym_link(lgr);
1626
1627 del_llc->reason = 0;
1628 smc_llc_send_message(lnk, &qentry->msg); /* response */
1629
1630 if (smc_link_downing(&lnk_del->state))
1631 smc_switch_conns(lgr, lnk_del, false);
1632 smcr_link_clear(lnk_del, true);
1633
1634 active_links = smc_llc_active_link_count(lgr);
1635 if (lnk_del == lnk_asym) {
1636 /* expected deletion of asym link, don't change lgr state */
1637 } else if (active_links == 1) {
1638 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1639 } else if (!active_links) {
1640 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1641 smc_lgr_terminate_sched(lgr);
1642 }
1643 out_unlock:
1644 up_write(&lgr->llc_conf_mutex);
1645 out:
1646 kfree(qentry);
1647 }
1648
1649 /* try to send a DELETE LINK ALL request on any active link,
1650 * waiting for send completion
1651 */
smc_llc_send_link_delete_all(struct smc_link_group * lgr,bool ord,u32 rsn)1652 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1653 {
1654 struct smc_llc_msg_del_link delllc = {};
1655 int i;
1656
1657 delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK;
1658 smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc));
1659 if (ord)
1660 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1661 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1662 delllc.reason = htonl(rsn);
1663
1664 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1665 if (!smc_link_sendable(&lgr->lnk[i]))
1666 continue;
1667 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1668 break;
1669 }
1670 }
1671
smc_llc_process_srv_delete_link(struct smc_link_group * lgr)1672 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1673 {
1674 struct smc_llc_msg_del_link *del_llc;
1675 struct smc_link *lnk, *lnk_del;
1676 struct smc_llc_qentry *qentry;
1677 int active_links;
1678 int i;
1679
1680 down_write(&lgr->llc_conf_mutex);
1681 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1682 lnk = qentry->link;
1683 del_llc = &qentry->msg.delete_link;
1684
1685 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1686 /* delete entire lgr */
1687 smc_llc_send_link_delete_all(lgr, true, ntohl(
1688 qentry->msg.delete_link.reason));
1689 smc_lgr_terminate_sched(lgr);
1690 goto out;
1691 }
1692 /* delete single link */
1693 lnk_del = NULL;
1694 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1695 if (lgr->lnk[i].link_id == del_llc->link_num) {
1696 lnk_del = &lgr->lnk[i];
1697 break;
1698 }
1699 }
1700 if (!lnk_del)
1701 goto out; /* asymmetric link already deleted */
1702
1703 if (smc_link_downing(&lnk_del->state)) {
1704 if (smc_switch_conns(lgr, lnk_del, false))
1705 smc_wr_tx_wait_no_pending_sends(lnk_del);
1706 }
1707 if (!list_empty(&lgr->list)) {
1708 /* qentry is either a request from peer (send it back to
1709 * initiate the DELETE_LINK processing), or a locally
1710 * enqueued DELETE_LINK request (forward it)
1711 */
1712 if (!smc_llc_send_message(lnk, &qentry->msg)) {
1713 struct smc_llc_qentry *qentry2;
1714
1715 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1716 SMC_LLC_DELETE_LINK);
1717 if (qentry2)
1718 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1719 }
1720 }
1721 smcr_link_clear(lnk_del, true);
1722
1723 active_links = smc_llc_active_link_count(lgr);
1724 if (active_links == 1) {
1725 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1726 } else if (!active_links) {
1727 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1728 smc_lgr_terminate_sched(lgr);
1729 }
1730
1731 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1732 /* trigger setup of asymm alt link */
1733 smc_llc_add_link_local(lnk);
1734 }
1735 out:
1736 up_write(&lgr->llc_conf_mutex);
1737 kfree(qentry);
1738 }
1739
smc_llc_delete_link_work(struct work_struct * work)1740 static void smc_llc_delete_link_work(struct work_struct *work)
1741 {
1742 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1743 llc_del_link_work);
1744
1745 if (list_empty(&lgr->list)) {
1746 /* link group is terminating */
1747 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1748 goto out;
1749 }
1750
1751 if (lgr->role == SMC_CLNT)
1752 smc_llc_process_cli_delete_link(lgr);
1753 else
1754 smc_llc_process_srv_delete_link(lgr);
1755 out:
1756 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1757 }
1758
1759 /* process a confirm_rkey request from peer, remote flow */
smc_llc_rmt_conf_rkey(struct smc_link_group * lgr)1760 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1761 {
1762 struct smc_llc_msg_confirm_rkey *llc;
1763 struct smc_llc_qentry *qentry;
1764 struct smc_link *link;
1765 int num_entries;
1766 int rk_idx;
1767 int i;
1768
1769 qentry = lgr->llc_flow_rmt.qentry;
1770 llc = &qentry->msg.confirm_rkey;
1771 link = qentry->link;
1772
1773 num_entries = llc->rtoken[0].num_rkeys;
1774 if (num_entries > SMC_LLC_RKEYS_PER_MSG)
1775 goto out_err;
1776 /* first rkey entry is for receiving link */
1777 rk_idx = smc_rtoken_add(link,
1778 llc->rtoken[0].rmb_vaddr,
1779 llc->rtoken[0].rmb_key);
1780 if (rk_idx < 0)
1781 goto out_err;
1782
1783 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1784 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1785 llc->rtoken[i].rmb_vaddr,
1786 llc->rtoken[i].rmb_key);
1787 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
1788 goto out;
1789 out_err:
1790 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1791 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1792 out:
1793 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1794 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1795 smc_llc_send_message(link, &qentry->msg);
1796 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1797 }
1798
1799 /* process a delete_rkey request from peer, remote flow */
smc_llc_rmt_delete_rkey(struct smc_link_group * lgr)1800 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1801 {
1802 struct smc_llc_msg_delete_rkey *llc;
1803 struct smc_llc_qentry *qentry;
1804 struct smc_link *link;
1805 u8 err_mask = 0;
1806 int i, max;
1807
1808 qentry = lgr->llc_flow_rmt.qentry;
1809 llc = &qentry->msg.delete_rkey;
1810 link = qentry->link;
1811
1812 if (lgr->smc_version == SMC_V2) {
1813 struct smc_llc_msg_delete_rkey_v2 *llcv2;
1814
1815 if (smc_link_shared_v2_rxbuf(link)) {
1816 memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc));
1817 llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2;
1818 } else {
1819 llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)llc;
1820 }
1821 llcv2->num_inval_rkeys = 0;
1822
1823 max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
1824 for (i = 0; i < max; i++) {
1825 if (smc_rtoken_delete(link, llcv2->rkey[i]))
1826 llcv2->num_inval_rkeys++;
1827 }
1828 memset(&llc->rkey[0], 0, sizeof(llc->rkey));
1829 memset(&llc->reserved2, 0, sizeof(llc->reserved2));
1830 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc));
1831 if (llcv2->num_inval_rkeys) {
1832 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1833 llc->err_mask = llcv2->num_inval_rkeys;
1834 }
1835 goto finish;
1836 }
1837
1838 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1839 for (i = 0; i < max; i++) {
1840 if (smc_rtoken_delete(link, llc->rkey[i]))
1841 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1842 }
1843 if (err_mask) {
1844 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1845 llc->err_mask = err_mask;
1846 }
1847 finish:
1848 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1849 smc_llc_send_message(link, &qentry->msg);
1850 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1851 }
1852
smc_llc_protocol_violation(struct smc_link_group * lgr,u8 type)1853 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1854 {
1855 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: "
1856 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id,
1857 lgr->net->net_cookie, type);
1858 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1859 smc_lgr_terminate_sched(lgr);
1860 }
1861
1862 /* flush the llc event queue */
smc_llc_event_flush(struct smc_link_group * lgr)1863 static void smc_llc_event_flush(struct smc_link_group *lgr)
1864 {
1865 struct smc_llc_qentry *qentry, *q;
1866
1867 spin_lock_bh(&lgr->llc_event_q_lock);
1868 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1869 list_del_init(&qentry->list);
1870 kfree(qentry);
1871 }
1872 spin_unlock_bh(&lgr->llc_event_q_lock);
1873 }
1874
smc_llc_event_handler(struct smc_llc_qentry * qentry)1875 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1876 {
1877 union smc_llc_msg *llc = &qentry->msg;
1878 struct smc_link *link = qentry->link;
1879 struct smc_link_group *lgr = link->lgr;
1880
1881 if (!smc_link_usable(link))
1882 goto out;
1883
1884 switch (llc->raw.hdr.common.llc_type) {
1885 case SMC_LLC_TEST_LINK:
1886 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1887 smc_llc_send_message(link, llc);
1888 break;
1889 case SMC_LLC_ADD_LINK:
1890 if (list_empty(&lgr->list))
1891 goto out; /* lgr is terminating */
1892 if (lgr->role == SMC_CLNT) {
1893 if (smc_llc_is_local_add_link(llc)) {
1894 if (lgr->llc_flow_lcl.type ==
1895 SMC_LLC_FLOW_ADD_LINK)
1896 break; /* add_link in progress */
1897 if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1898 qentry)) {
1899 schedule_work(&lgr->llc_add_link_work);
1900 }
1901 return;
1902 }
1903 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1904 !lgr->llc_flow_lcl.qentry) {
1905 /* a flow is waiting for this message */
1906 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1907 qentry);
1908 wake_up(&lgr->llc_msg_waiter);
1909 return;
1910 }
1911 if (lgr->llc_flow_lcl.type ==
1912 SMC_LLC_FLOW_REQ_ADD_LINK) {
1913 /* server started add_link processing */
1914 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1915 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1916 qentry);
1917 schedule_work(&lgr->llc_add_link_work);
1918 return;
1919 }
1920 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1921 schedule_work(&lgr->llc_add_link_work);
1922 }
1923 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1924 /* as smc server, handle client suggestion */
1925 schedule_work(&lgr->llc_add_link_work);
1926 }
1927 return;
1928 case SMC_LLC_CONFIRM_LINK:
1929 case SMC_LLC_ADD_LINK_CONT:
1930 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1931 /* a flow is waiting for this message */
1932 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1933 wake_up(&lgr->llc_msg_waiter);
1934 return;
1935 }
1936 break;
1937 case SMC_LLC_DELETE_LINK:
1938 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1939 !lgr->llc_flow_lcl.qentry) {
1940 /* DEL LINK REQ during ADD LINK SEQ */
1941 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1942 wake_up(&lgr->llc_msg_waiter);
1943 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1944 schedule_work(&lgr->llc_del_link_work);
1945 }
1946 return;
1947 case SMC_LLC_CONFIRM_RKEY:
1948 /* new request from remote, assign to remote flow */
1949 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1950 /* process here, does not wait for more llc msgs */
1951 smc_llc_rmt_conf_rkey(lgr);
1952 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1953 }
1954 return;
1955 case SMC_LLC_CONFIRM_RKEY_CONT:
1956 /* not used because max links is 3, and 3 rkeys fit into
1957 * one CONFIRM_RKEY message
1958 */
1959 break;
1960 case SMC_LLC_DELETE_RKEY:
1961 /* new request from remote, assign to remote flow */
1962 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1963 /* process here, does not wait for more llc msgs */
1964 smc_llc_rmt_delete_rkey(lgr);
1965 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1966 }
1967 return;
1968 case SMC_LLC_REQ_ADD_LINK:
1969 /* handle response here, smc_llc_flow_stop() cannot be called
1970 * in tasklet context
1971 */
1972 if (lgr->role == SMC_CLNT &&
1973 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK &&
1974 (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) {
1975 smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl);
1976 } else if (lgr->role == SMC_SERV) {
1977 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1978 /* as smc server, handle client suggestion */
1979 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK;
1980 schedule_work(&lgr->llc_add_link_work);
1981 }
1982 return;
1983 }
1984 break;
1985 default:
1986 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1987 break;
1988 }
1989 out:
1990 kfree(qentry);
1991 }
1992
1993 /* worker to process llc messages on the event queue */
smc_llc_event_work(struct work_struct * work)1994 static void smc_llc_event_work(struct work_struct *work)
1995 {
1996 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1997 llc_event_work);
1998 struct smc_llc_qentry *qentry;
1999
2000 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
2001 qentry = lgr->delayed_event;
2002 lgr->delayed_event = NULL;
2003 if (smc_link_usable(qentry->link))
2004 smc_llc_event_handler(qentry);
2005 else
2006 kfree(qentry);
2007 }
2008
2009 again:
2010 spin_lock_bh(&lgr->llc_event_q_lock);
2011 if (!list_empty(&lgr->llc_event_q)) {
2012 qentry = list_first_entry(&lgr->llc_event_q,
2013 struct smc_llc_qentry, list);
2014 list_del_init(&qentry->list);
2015 spin_unlock_bh(&lgr->llc_event_q_lock);
2016 smc_llc_event_handler(qentry);
2017 goto again;
2018 }
2019 spin_unlock_bh(&lgr->llc_event_q_lock);
2020 }
2021
2022 /* process llc responses in tasklet context */
smc_llc_rx_response(struct smc_link * link,struct smc_llc_qentry * qentry)2023 static void smc_llc_rx_response(struct smc_link *link,
2024 struct smc_llc_qentry *qentry)
2025 {
2026 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
2027 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
2028 u8 llc_type = qentry->msg.raw.hdr.common.llc_type;
2029
2030 switch (llc_type) {
2031 case SMC_LLC_TEST_LINK:
2032 if (smc_link_active(link))
2033 complete(&link->llc_testlink_resp);
2034 break;
2035 case SMC_LLC_ADD_LINK:
2036 case SMC_LLC_ADD_LINK_CONT:
2037 case SMC_LLC_CONFIRM_LINK:
2038 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
2039 break; /* drop out-of-flow response */
2040 goto assign;
2041 case SMC_LLC_DELETE_LINK:
2042 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
2043 break; /* drop out-of-flow response */
2044 goto assign;
2045 case SMC_LLC_CONFIRM_RKEY:
2046 case SMC_LLC_DELETE_RKEY:
2047 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
2048 break; /* drop out-of-flow response */
2049 goto assign;
2050 case SMC_LLC_CONFIRM_RKEY_CONT:
2051 /* not used because max links is 3 */
2052 break;
2053 default:
2054 smc_llc_protocol_violation(link->lgr,
2055 qentry->msg.raw.hdr.common.type);
2056 break;
2057 }
2058 kfree(qentry);
2059 return;
2060 assign:
2061 /* assign responses to the local flow, we requested them */
2062 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
2063 wake_up(&link->lgr->llc_msg_waiter);
2064 }
2065
smc_llc_enqueue(struct smc_link * link,union smc_llc_msg * llc)2066 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
2067 {
2068 struct smc_link_group *lgr = link->lgr;
2069 struct smc_llc_qentry *qentry;
2070 unsigned long flags;
2071
2072 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
2073 if (!qentry)
2074 return;
2075 qentry->link = link;
2076 INIT_LIST_HEAD(&qentry->list);
2077 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
2078
2079 /* process responses immediately */
2080 if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) &&
2081 llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) {
2082 smc_llc_rx_response(link, qentry);
2083 return;
2084 }
2085
2086 /* add requests to event queue */
2087 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
2088 list_add_tail(&qentry->list, &lgr->llc_event_q);
2089 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
2090 queue_work(system_highpri_wq, &lgr->llc_event_work);
2091 }
2092
2093 /* copy received msg and add it to the event queue */
smc_llc_rx_handler(struct ib_wc * wc,void * buf)2094 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
2095 {
2096 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
2097 union smc_llc_msg *llc = buf;
2098
2099 if (wc->byte_len < sizeof(*llc))
2100 return; /* short message */
2101 if (!llc->raw.hdr.common.llc_version) {
2102 if (llc->raw.hdr.length != sizeof(*llc))
2103 return; /* invalid message */
2104 } else {
2105 if (llc->raw.hdr.length_v2 < sizeof(*llc))
2106 return; /* invalid message */
2107 }
2108
2109 smc_llc_enqueue(link, llc);
2110 }
2111
2112 /***************************** worker, utils *********************************/
2113
smc_llc_testlink_work(struct work_struct * work)2114 static void smc_llc_testlink_work(struct work_struct *work)
2115 {
2116 struct smc_link *link = container_of(to_delayed_work(work),
2117 struct smc_link, llc_testlink_wrk);
2118 unsigned long next_interval;
2119 unsigned long expire_time;
2120 u8 user_data[16] = { 0 };
2121 int rc;
2122
2123 if (!smc_link_active(link))
2124 return; /* don't reschedule worker */
2125 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
2126 if (time_is_after_jiffies(expire_time)) {
2127 next_interval = expire_time - jiffies;
2128 goto out;
2129 }
2130 reinit_completion(&link->llc_testlink_resp);
2131 smc_llc_send_test_link(link, user_data);
2132 /* receive TEST LINK response over RoCE fabric */
2133 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
2134 SMC_LLC_WAIT_TIME);
2135 if (!smc_link_active(link))
2136 return; /* link state changed */
2137 if (rc <= 0) {
2138 smcr_link_down_cond_sched(link);
2139 return;
2140 }
2141 next_interval = link->llc_testlink_time;
2142 out:
2143 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
2144 }
2145
smc_llc_lgr_init(struct smc_link_group * lgr,struct smc_sock * smc)2146 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
2147 {
2148 struct net *net = sock_net(smc->clcsock->sk);
2149
2150 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
2151 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
2152 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
2153 INIT_LIST_HEAD(&lgr->llc_event_q);
2154 spin_lock_init(&lgr->llc_event_q_lock);
2155 spin_lock_init(&lgr->llc_flow_lock);
2156 init_waitqueue_head(&lgr->llc_flow_waiter);
2157 init_waitqueue_head(&lgr->llc_msg_waiter);
2158 init_rwsem(&lgr->llc_conf_mutex);
2159 lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time);
2160 }
2161
2162 /* called after lgr was removed from lgr_list */
smc_llc_lgr_clear(struct smc_link_group * lgr)2163 void smc_llc_lgr_clear(struct smc_link_group *lgr)
2164 {
2165 smc_llc_event_flush(lgr);
2166 wake_up_all(&lgr->llc_flow_waiter);
2167 wake_up_all(&lgr->llc_msg_waiter);
2168 cancel_work_sync(&lgr->llc_event_work);
2169 cancel_work_sync(&lgr->llc_add_link_work);
2170 cancel_work_sync(&lgr->llc_del_link_work);
2171 if (lgr->delayed_event) {
2172 kfree(lgr->delayed_event);
2173 lgr->delayed_event = NULL;
2174 }
2175 }
2176
smc_llc_link_init(struct smc_link * link)2177 int smc_llc_link_init(struct smc_link *link)
2178 {
2179 init_completion(&link->llc_testlink_resp);
2180 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
2181 return 0;
2182 }
2183
smc_llc_link_active(struct smc_link * link)2184 void smc_llc_link_active(struct smc_link *link)
2185 {
2186 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, "
2187 "peerid %*phN, ibdev %s, ibport %d\n",
2188 SMC_LGR_ID_SIZE, &link->lgr->id,
2189 link->lgr->net->net_cookie,
2190 SMC_LGR_ID_SIZE, &link->link_uid,
2191 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2192 link->smcibdev->ibdev->name, link->ibport);
2193 link->state = SMC_LNK_ACTIVE;
2194 if (link->lgr->llc_testlink_time) {
2195 link->llc_testlink_time = link->lgr->llc_testlink_time;
2196 schedule_delayed_work(&link->llc_testlink_wrk,
2197 link->llc_testlink_time);
2198 }
2199 }
2200
2201 /* called in worker context */
smc_llc_link_clear(struct smc_link * link,bool log)2202 void smc_llc_link_clear(struct smc_link *link, bool log)
2203 {
2204 if (log)
2205 pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN"
2206 ", peerid %*phN, ibdev %s, ibport %d\n",
2207 SMC_LGR_ID_SIZE, &link->lgr->id,
2208 link->lgr->net->net_cookie,
2209 SMC_LGR_ID_SIZE, &link->link_uid,
2210 SMC_LGR_ID_SIZE, &link->peer_link_uid,
2211 link->smcibdev->ibdev->name, link->ibport);
2212 complete(&link->llc_testlink_resp);
2213 cancel_delayed_work_sync(&link->llc_testlink_wrk);
2214 }
2215
2216 /* register a new rtoken at the remote peer (for all links) */
smc_llc_do_confirm_rkey(struct smc_link * send_link,struct smc_buf_desc * rmb_desc)2217 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
2218 struct smc_buf_desc *rmb_desc)
2219 {
2220 struct smc_link_group *lgr = send_link->lgr;
2221 struct smc_llc_qentry *qentry = NULL;
2222 int rc = 0;
2223
2224 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
2225 if (rc)
2226 goto out;
2227 /* receive CONFIRM RKEY response from server over RoCE fabric */
2228 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2229 SMC_LLC_CONFIRM_RKEY);
2230 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2231 rc = -EFAULT;
2232 out:
2233 if (qentry)
2234 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2235 return rc;
2236 }
2237
2238 /* unregister an rtoken at the remote peer */
smc_llc_do_delete_rkey(struct smc_link_group * lgr,struct smc_buf_desc * rmb_desc)2239 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
2240 struct smc_buf_desc *rmb_desc)
2241 {
2242 struct smc_llc_qentry *qentry = NULL;
2243 struct smc_link *send_link;
2244 int rc = 0;
2245
2246 send_link = smc_llc_usable_link(lgr);
2247 if (!send_link)
2248 return -ENOLINK;
2249
2250 /* protected by llc_flow control */
2251 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
2252 if (rc)
2253 goto out;
2254 /* receive DELETE RKEY response from server over RoCE fabric */
2255 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
2256 SMC_LLC_DELETE_RKEY);
2257 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
2258 rc = -EFAULT;
2259 out:
2260 if (qentry)
2261 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
2262 return rc;
2263 }
2264
smc_llc_link_set_uid(struct smc_link * link)2265 void smc_llc_link_set_uid(struct smc_link *link)
2266 {
2267 __be32 link_uid;
2268
2269 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
2270 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
2271 }
2272
2273 /* save peers link user id, used for debug purposes */
smc_llc_save_peer_uid(struct smc_llc_qentry * qentry)2274 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
2275 {
2276 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
2277 SMC_LGR_ID_SIZE);
2278 }
2279
2280 /* evaluate confirm link request or response */
smc_llc_eval_conf_link(struct smc_llc_qentry * qentry,enum smc_llc_reqresp type)2281 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
2282 enum smc_llc_reqresp type)
2283 {
2284 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */
2285 qentry->link->link_id = qentry->msg.confirm_link.link_num;
2286 smc_llc_link_set_uid(qentry->link);
2287 }
2288 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
2289 return -ENOTSUPP;
2290 return 0;
2291 }
2292
2293 /***************************** init, exit, misc ******************************/
2294
2295 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
2296 {
2297 .handler = smc_llc_rx_handler,
2298 .type = SMC_LLC_CONFIRM_LINK
2299 },
2300 {
2301 .handler = smc_llc_rx_handler,
2302 .type = SMC_LLC_TEST_LINK
2303 },
2304 {
2305 .handler = smc_llc_rx_handler,
2306 .type = SMC_LLC_ADD_LINK
2307 },
2308 {
2309 .handler = smc_llc_rx_handler,
2310 .type = SMC_LLC_ADD_LINK_CONT
2311 },
2312 {
2313 .handler = smc_llc_rx_handler,
2314 .type = SMC_LLC_DELETE_LINK
2315 },
2316 {
2317 .handler = smc_llc_rx_handler,
2318 .type = SMC_LLC_CONFIRM_RKEY
2319 },
2320 {
2321 .handler = smc_llc_rx_handler,
2322 .type = SMC_LLC_CONFIRM_RKEY_CONT
2323 },
2324 {
2325 .handler = smc_llc_rx_handler,
2326 .type = SMC_LLC_DELETE_RKEY
2327 },
2328 /* V2 types */
2329 {
2330 .handler = smc_llc_rx_handler,
2331 .type = SMC_LLC_CONFIRM_LINK_V2
2332 },
2333 {
2334 .handler = smc_llc_rx_handler,
2335 .type = SMC_LLC_TEST_LINK_V2
2336 },
2337 {
2338 .handler = smc_llc_rx_handler,
2339 .type = SMC_LLC_ADD_LINK_V2
2340 },
2341 {
2342 .handler = smc_llc_rx_handler,
2343 .type = SMC_LLC_DELETE_LINK_V2
2344 },
2345 {
2346 .handler = smc_llc_rx_handler,
2347 .type = SMC_LLC_REQ_ADD_LINK_V2
2348 },
2349 {
2350 .handler = smc_llc_rx_handler,
2351 .type = SMC_LLC_CONFIRM_RKEY_V2
2352 },
2353 {
2354 .handler = smc_llc_rx_handler,
2355 .type = SMC_LLC_DELETE_RKEY_V2
2356 },
2357 {
2358 .handler = NULL,
2359 }
2360 };
2361
smc_llc_init(void)2362 int __init smc_llc_init(void)
2363 {
2364 struct smc_wr_rx_handler *handler;
2365 int rc = 0;
2366
2367 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
2368 INIT_HLIST_NODE(&handler->list);
2369 rc = smc_wr_rx_register_handler(handler);
2370 if (rc)
2371 break;
2372 }
2373 return rc;
2374 }
2375