1 /*
2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/list.h>
36 #include <linux/slab.h>
37 #include <net/neighbour.h>
38 #include <linux/notifier.h>
39 #include <linux/atomic.h>
40 #include <linux/proc_fs.h>
41 #include <linux/if_vlan.h>
42 #include <net/netevent.h>
43 #include <linux/highmem.h>
44 #include <linux/vmalloc.h>
45 #include <linux/export.h>
46
47 #include "common.h"
48 #include "regs.h"
49 #include "cxgb3_ioctl.h"
50 #include "cxgb3_ctl_defs.h"
51 #include "cxgb3_defs.h"
52 #include "l2t.h"
53 #include "firmware_exports.h"
54 #include "cxgb3_offload.h"
55
56 static LIST_HEAD(client_list);
57 static LIST_HEAD(ofld_dev_list);
58 static DEFINE_MUTEX(cxgb3_db_lock);
59
60 static DEFINE_RWLOCK(adapter_list_lock);
61 static LIST_HEAD(adapter_list);
62
63 static const unsigned int MAX_ATIDS = 64 * 1024;
64 static const unsigned int ATID_BASE = 0x10000;
65
66 static void cxgb_neigh_update(struct neighbour *neigh);
67 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
68 struct neighbour *neigh, const void *daddr);
69
offload_activated(struct t3cdev * tdev)70 static inline int offload_activated(struct t3cdev *tdev)
71 {
72 const struct adapter *adapter = tdev2adap(tdev);
73
74 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
75 }
76
77 /**
78 * cxgb3_register_client - register an offload client
79 * @client: the client
80 *
81 * Add the client to the client list,
82 * and call backs the client for each activated offload device
83 */
cxgb3_register_client(struct cxgb3_client * client)84 void cxgb3_register_client(struct cxgb3_client *client)
85 {
86 struct t3cdev *tdev;
87
88 mutex_lock(&cxgb3_db_lock);
89 list_add_tail(&client->client_list, &client_list);
90
91 if (client->add) {
92 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
93 if (offload_activated(tdev))
94 client->add(tdev);
95 }
96 }
97 mutex_unlock(&cxgb3_db_lock);
98 }
99
100 EXPORT_SYMBOL(cxgb3_register_client);
101
102 /**
103 * cxgb3_unregister_client - unregister an offload client
104 * @client: the client
105 *
106 * Remove the client to the client list,
107 * and call backs the client for each activated offload device.
108 */
cxgb3_unregister_client(struct cxgb3_client * client)109 void cxgb3_unregister_client(struct cxgb3_client *client)
110 {
111 struct t3cdev *tdev;
112
113 mutex_lock(&cxgb3_db_lock);
114 list_del(&client->client_list);
115
116 if (client->remove) {
117 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
118 if (offload_activated(tdev))
119 client->remove(tdev);
120 }
121 }
122 mutex_unlock(&cxgb3_db_lock);
123 }
124
125 EXPORT_SYMBOL(cxgb3_unregister_client);
126
127 /**
128 * cxgb3_add_clients - activate registered clients for an offload device
129 * @tdev: the offload device
130 *
131 * Call backs all registered clients once a offload device is activated
132 */
cxgb3_add_clients(struct t3cdev * tdev)133 void cxgb3_add_clients(struct t3cdev *tdev)
134 {
135 struct cxgb3_client *client;
136
137 mutex_lock(&cxgb3_db_lock);
138 list_for_each_entry(client, &client_list, client_list) {
139 if (client->add)
140 client->add(tdev);
141 }
142 mutex_unlock(&cxgb3_db_lock);
143 }
144
145 /**
146 * cxgb3_remove_clients - deactivates registered clients
147 * for an offload device
148 * @tdev: the offload device
149 *
150 * Call backs all registered clients once a offload device is deactivated
151 */
cxgb3_remove_clients(struct t3cdev * tdev)152 void cxgb3_remove_clients(struct t3cdev *tdev)
153 {
154 struct cxgb3_client *client;
155
156 mutex_lock(&cxgb3_db_lock);
157 list_for_each_entry(client, &client_list, client_list) {
158 if (client->remove)
159 client->remove(tdev);
160 }
161 mutex_unlock(&cxgb3_db_lock);
162 }
163
cxgb3_event_notify(struct t3cdev * tdev,u32 event,u32 port)164 void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
165 {
166 struct cxgb3_client *client;
167
168 mutex_lock(&cxgb3_db_lock);
169 list_for_each_entry(client, &client_list, client_list) {
170 if (client->event_handler)
171 client->event_handler(tdev, event, port);
172 }
173 mutex_unlock(&cxgb3_db_lock);
174 }
175
get_iff_from_mac(struct adapter * adapter,const unsigned char * mac,unsigned int vlan)176 static struct net_device *get_iff_from_mac(struct adapter *adapter,
177 const unsigned char *mac,
178 unsigned int vlan)
179 {
180 int i;
181
182 for_each_port(adapter, i) {
183 struct net_device *dev = adapter->port[i];
184
185 if (ether_addr_equal(dev->dev_addr, mac)) {
186 rcu_read_lock();
187 if (vlan && vlan != VLAN_VID_MASK) {
188 dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
189 } else if (netif_is_bond_slave(dev)) {
190 struct net_device *upper_dev;
191
192 while ((upper_dev =
193 netdev_master_upper_dev_get_rcu(dev)))
194 dev = upper_dev;
195 }
196 rcu_read_unlock();
197 return dev;
198 }
199 }
200 return NULL;
201 }
202
cxgb_ulp_iscsi_ctl(struct adapter * adapter,unsigned int req,void * data)203 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
204 void *data)
205 {
206 int i;
207 int ret = 0;
208 unsigned int val = 0;
209 struct ulp_iscsi_info *uiip = data;
210
211 switch (req) {
212 case ULP_ISCSI_GET_PARAMS:
213 uiip->pdev = adapter->pdev;
214 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
215 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
216 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
217
218 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
219 for (i = 0; i < 4; i++, val >>= 8)
220 uiip->pgsz_factor[i] = val & 0xFF;
221
222 val = t3_read_reg(adapter, A_TP_PARA_REG7);
223 uiip->max_txsz =
224 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
225 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
226 /*
227 * On tx, the iscsi pdu has to be <= tx page size and has to
228 * fit into the Tx PM FIFO.
229 */
230 val = min(adapter->params.tp.tx_pg_size,
231 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
232 uiip->max_txsz = min(val, uiip->max_txsz);
233
234 /* set MaxRxData to 16224 */
235 val = t3_read_reg(adapter, A_TP_PARA_REG2);
236 if ((val >> S_MAXRXDATA) != 0x3f60) {
237 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
238 val |= V_MAXRXDATA(0x3f60);
239 pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
240 adapter->name, val);
241 t3_write_reg(adapter, A_TP_PARA_REG2, val);
242 }
243
244 /*
245 * on rx, the iscsi pdu has to be < rx page size and the
246 * max rx data length programmed in TP
247 */
248 val = min(adapter->params.tp.rx_pg_size,
249 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
250 S_MAXRXDATA) & M_MAXRXDATA);
251 uiip->max_rxsz = min(val, uiip->max_rxsz);
252 break;
253 case ULP_ISCSI_SET_PARAMS:
254 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
255 /* program the ddp page sizes */
256 for (i = 0; i < 4; i++)
257 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
258 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
259 pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
260 adapter->name, val, uiip->pgsz_factor[0],
261 uiip->pgsz_factor[1], uiip->pgsz_factor[2],
262 uiip->pgsz_factor[3]);
263 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
264 }
265 break;
266 default:
267 ret = -EOPNOTSUPP;
268 }
269 return ret;
270 }
271
272 /* Response queue used for RDMA events. */
273 #define ASYNC_NOTIF_RSPQ 0
274
cxgb_rdma_ctl(struct adapter * adapter,unsigned int req,void * data)275 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
276 {
277 int ret = 0;
278
279 switch (req) {
280 case RDMA_GET_PARAMS: {
281 struct rdma_info *rdma = data;
282 struct pci_dev *pdev = adapter->pdev;
283
284 rdma->udbell_physbase = pci_resource_start(pdev, 2);
285 rdma->udbell_len = pci_resource_len(pdev, 2);
286 rdma->tpt_base =
287 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
288 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
289 rdma->pbl_base =
290 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
291 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
292 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
293 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
294 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
295 rdma->pdev = pdev;
296 break;
297 }
298 case RDMA_CQ_OP:{
299 unsigned long flags;
300 struct rdma_cq_op *rdma = data;
301
302 /* may be called in any context */
303 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
304 ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
305 rdma->credits);
306 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
307 break;
308 }
309 case RDMA_GET_MEM:{
310 struct ch_mem_range *t = data;
311 struct mc7 *mem;
312
313 if ((t->addr & 7) || (t->len & 7))
314 return -EINVAL;
315 if (t->mem_id == MEM_CM)
316 mem = &adapter->cm;
317 else if (t->mem_id == MEM_PMRX)
318 mem = &adapter->pmrx;
319 else if (t->mem_id == MEM_PMTX)
320 mem = &adapter->pmtx;
321 else
322 return -EINVAL;
323
324 ret =
325 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
326 (u64 *) t->buf);
327 if (ret)
328 return ret;
329 break;
330 }
331 case RDMA_CQ_SETUP:{
332 struct rdma_cq_setup *rdma = data;
333
334 spin_lock_irq(&adapter->sge.reg_lock);
335 ret =
336 t3_sge_init_cqcntxt(adapter, rdma->id,
337 rdma->base_addr, rdma->size,
338 ASYNC_NOTIF_RSPQ,
339 rdma->ovfl_mode, rdma->credits,
340 rdma->credit_thres);
341 spin_unlock_irq(&adapter->sge.reg_lock);
342 break;
343 }
344 case RDMA_CQ_DISABLE:
345 spin_lock_irq(&adapter->sge.reg_lock);
346 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
347 spin_unlock_irq(&adapter->sge.reg_lock);
348 break;
349 case RDMA_CTRL_QP_SETUP:{
350 struct rdma_ctrlqp_setup *rdma = data;
351
352 spin_lock_irq(&adapter->sge.reg_lock);
353 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
354 SGE_CNTXT_RDMA,
355 ASYNC_NOTIF_RSPQ,
356 rdma->base_addr, rdma->size,
357 FW_RI_TID_START, 1, 0);
358 spin_unlock_irq(&adapter->sge.reg_lock);
359 break;
360 }
361 case RDMA_GET_MIB: {
362 spin_lock(&adapter->stats_lock);
363 t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
364 spin_unlock(&adapter->stats_lock);
365 break;
366 }
367 default:
368 ret = -EOPNOTSUPP;
369 }
370 return ret;
371 }
372
cxgb_offload_ctl(struct t3cdev * tdev,unsigned int req,void * data)373 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
374 {
375 struct adapter *adapter = tdev2adap(tdev);
376 struct tid_range *tid;
377 struct mtutab *mtup;
378 struct iff_mac *iffmacp;
379 struct ddp_params *ddpp;
380 struct adap_ports *ports;
381 struct ofld_page_info *rx_page_info;
382 struct tp_params *tp = &adapter->params.tp;
383 int i;
384
385 switch (req) {
386 case GET_MAX_OUTSTANDING_WR:
387 *(unsigned int *)data = FW_WR_NUM;
388 break;
389 case GET_WR_LEN:
390 *(unsigned int *)data = WR_FLITS;
391 break;
392 case GET_TX_MAX_CHUNK:
393 *(unsigned int *)data = 1 << 20; /* 1MB */
394 break;
395 case GET_TID_RANGE:
396 tid = data;
397 tid->num = t3_mc5_size(&adapter->mc5) -
398 adapter->params.mc5.nroutes -
399 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
400 tid->base = 0;
401 break;
402 case GET_STID_RANGE:
403 tid = data;
404 tid->num = adapter->params.mc5.nservers;
405 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
406 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
407 break;
408 case GET_L2T_CAPACITY:
409 *(unsigned int *)data = 2048;
410 break;
411 case GET_MTUS:
412 mtup = data;
413 mtup->size = NMTUS;
414 mtup->mtus = adapter->params.mtus;
415 break;
416 case GET_IFF_FROM_MAC:
417 iffmacp = data;
418 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
419 iffmacp->vlan_tag &
420 VLAN_VID_MASK);
421 break;
422 case GET_DDP_PARAMS:
423 ddpp = data;
424 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
425 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
426 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
427 break;
428 case GET_PORTS:
429 ports = data;
430 ports->nports = adapter->params.nports;
431 for_each_port(adapter, i)
432 ports->lldevs[i] = adapter->port[i];
433 break;
434 case ULP_ISCSI_GET_PARAMS:
435 case ULP_ISCSI_SET_PARAMS:
436 if (!offload_running(adapter))
437 return -EAGAIN;
438 return cxgb_ulp_iscsi_ctl(adapter, req, data);
439 case RDMA_GET_PARAMS:
440 case RDMA_CQ_OP:
441 case RDMA_CQ_SETUP:
442 case RDMA_CQ_DISABLE:
443 case RDMA_CTRL_QP_SETUP:
444 case RDMA_GET_MEM:
445 case RDMA_GET_MIB:
446 if (!offload_running(adapter))
447 return -EAGAIN;
448 return cxgb_rdma_ctl(adapter, req, data);
449 case GET_RX_PAGE_INFO:
450 rx_page_info = data;
451 rx_page_info->page_size = tp->rx_pg_size;
452 rx_page_info->num = tp->rx_num_pgs;
453 break;
454 case GET_ISCSI_IPV4ADDR: {
455 struct iscsi_ipv4addr *p = data;
456 struct port_info *pi = netdev_priv(p->dev);
457 p->ipv4addr = pi->iscsi_ipv4addr;
458 break;
459 }
460 case GET_EMBEDDED_INFO: {
461 struct ch_embedded_info *e = data;
462
463 spin_lock(&adapter->stats_lock);
464 t3_get_fw_version(adapter, &e->fw_vers);
465 t3_get_tp_version(adapter, &e->tp_vers);
466 spin_unlock(&adapter->stats_lock);
467 break;
468 }
469 default:
470 return -EOPNOTSUPP;
471 }
472 return 0;
473 }
474
475 /*
476 * Dummy handler for Rx offload packets in case we get an offload packet before
477 * proper processing is setup. This complains and drops the packet as it isn't
478 * normal to get offload packets at this stage.
479 */
rx_offload_blackhole(struct t3cdev * dev,struct sk_buff ** skbs,int n)480 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
481 int n)
482 {
483 while (n--)
484 dev_kfree_skb_any(skbs[n]);
485 return 0;
486 }
487
dummy_neigh_update(struct t3cdev * dev,struct neighbour * neigh)488 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
489 {
490 }
491
cxgb3_set_dummy_ops(struct t3cdev * dev)492 void cxgb3_set_dummy_ops(struct t3cdev *dev)
493 {
494 dev->recv = rx_offload_blackhole;
495 dev->neigh_update = dummy_neigh_update;
496 }
497
498 /*
499 * Free an active-open TID.
500 */
cxgb3_free_atid(struct t3cdev * tdev,int atid)501 void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
502 {
503 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
504 union active_open_entry *p = atid2entry(t, atid);
505 void *ctx = p->t3c_tid.ctx;
506
507 spin_lock_bh(&t->atid_lock);
508 p->next = t->afree;
509 t->afree = p;
510 t->atids_in_use--;
511 spin_unlock_bh(&t->atid_lock);
512
513 return ctx;
514 }
515
516 EXPORT_SYMBOL(cxgb3_free_atid);
517
cxgb3_insert_tid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx,unsigned int tid)518 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
519 void *ctx, unsigned int tid)
520 {
521 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
522
523 t->tid_tab[tid].client = client;
524 t->tid_tab[tid].ctx = ctx;
525 atomic_inc(&t->tids_in_use);
526 }
527
528 EXPORT_SYMBOL(cxgb3_insert_tid);
529
530 /*
531 * Populate a TID_RELEASE WR. The skb must be already propely sized.
532 */
mk_tid_release(struct sk_buff * skb,unsigned int tid)533 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
534 {
535 struct cpl_tid_release *req;
536
537 skb->priority = CPL_PRIORITY_SETUP;
538 req = __skb_put(skb, sizeof(*req));
539 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
540 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
541 }
542
t3_process_tid_release_list(struct work_struct * work)543 static void t3_process_tid_release_list(struct work_struct *work)
544 {
545 struct t3c_data *td = container_of(work, struct t3c_data,
546 tid_release_task);
547 struct sk_buff *skb;
548 struct t3cdev *tdev = td->dev;
549
550
551 spin_lock_bh(&td->tid_release_lock);
552 while (td->tid_release_list) {
553 struct t3c_tid_entry *p = td->tid_release_list;
554
555 td->tid_release_list = p->ctx;
556 spin_unlock_bh(&td->tid_release_lock);
557
558 skb = alloc_skb(sizeof(struct cpl_tid_release),
559 GFP_KERNEL);
560 if (!skb)
561 skb = td->nofail_skb;
562 if (!skb) {
563 spin_lock_bh(&td->tid_release_lock);
564 p->ctx = (void *)td->tid_release_list;
565 td->tid_release_list = p;
566 break;
567 }
568 mk_tid_release(skb, p - td->tid_maps.tid_tab);
569 cxgb3_ofld_send(tdev, skb);
570 p->ctx = NULL;
571 if (skb == td->nofail_skb)
572 td->nofail_skb =
573 alloc_skb(sizeof(struct cpl_tid_release),
574 GFP_KERNEL);
575 spin_lock_bh(&td->tid_release_lock);
576 }
577 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
578 spin_unlock_bh(&td->tid_release_lock);
579
580 if (!td->nofail_skb)
581 td->nofail_skb =
582 alloc_skb(sizeof(struct cpl_tid_release),
583 GFP_KERNEL);
584 }
585
586 /* use ctx as a next pointer in the tid release list */
cxgb3_queue_tid_release(struct t3cdev * tdev,unsigned int tid)587 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
588 {
589 struct t3c_data *td = T3C_DATA(tdev);
590 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
591
592 spin_lock_bh(&td->tid_release_lock);
593 p->ctx = (void *)td->tid_release_list;
594 p->client = NULL;
595 td->tid_release_list = p;
596 if (!p->ctx || td->release_list_incomplete)
597 schedule_work(&td->tid_release_task);
598 spin_unlock_bh(&td->tid_release_lock);
599 }
600
601 EXPORT_SYMBOL(cxgb3_queue_tid_release);
602
603 /*
604 * Remove a tid from the TID table. A client may defer processing its last
605 * CPL message if it is locked at the time it arrives, and while the message
606 * sits in the client's backlog the TID may be reused for another connection.
607 * To handle this we atomically switch the TID association if it still points
608 * to the original client context.
609 */
cxgb3_remove_tid(struct t3cdev * tdev,void * ctx,unsigned int tid)610 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
611 {
612 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
613
614 BUG_ON(tid >= t->ntids);
615 if (tdev->type == T3A)
616 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
617 else {
618 struct sk_buff *skb;
619
620 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
621 if (likely(skb)) {
622 mk_tid_release(skb, tid);
623 cxgb3_ofld_send(tdev, skb);
624 t->tid_tab[tid].ctx = NULL;
625 } else
626 cxgb3_queue_tid_release(tdev, tid);
627 }
628 atomic_dec(&t->tids_in_use);
629 }
630
631 EXPORT_SYMBOL(cxgb3_remove_tid);
632
cxgb3_alloc_atid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx)633 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
634 void *ctx)
635 {
636 int atid = -1;
637 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
638
639 spin_lock_bh(&t->atid_lock);
640 if (t->afree &&
641 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
642 t->ntids) {
643 union active_open_entry *p = t->afree;
644
645 atid = (p - t->atid_tab) + t->atid_base;
646 t->afree = p->next;
647 p->t3c_tid.ctx = ctx;
648 p->t3c_tid.client = client;
649 t->atids_in_use++;
650 }
651 spin_unlock_bh(&t->atid_lock);
652 return atid;
653 }
654
655 EXPORT_SYMBOL(cxgb3_alloc_atid);
656
657 /* Get the t3cdev associated with a net_device */
dev2t3cdev(struct net_device * dev)658 struct t3cdev *dev2t3cdev(struct net_device *dev)
659 {
660 const struct port_info *pi = netdev_priv(dev);
661
662 return (struct t3cdev *)pi->adapter;
663 }
664
665 EXPORT_SYMBOL(dev2t3cdev);
666
do_smt_write_rpl(struct t3cdev * dev,struct sk_buff * skb)667 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
668 {
669 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
670
671 if (rpl->status != CPL_ERR_NONE)
672 pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
673 rpl->status, GET_TID(rpl));
674
675 return CPL_RET_BUF_DONE;
676 }
677
do_l2t_write_rpl(struct t3cdev * dev,struct sk_buff * skb)678 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
679 {
680 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
681
682 if (rpl->status != CPL_ERR_NONE)
683 pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
684 rpl->status, GET_TID(rpl));
685
686 return CPL_RET_BUF_DONE;
687 }
688
do_rte_write_rpl(struct t3cdev * dev,struct sk_buff * skb)689 static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
690 {
691 struct cpl_rte_write_rpl *rpl = cplhdr(skb);
692
693 if (rpl->status != CPL_ERR_NONE)
694 pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
695 rpl->status, GET_TID(rpl));
696
697 return CPL_RET_BUF_DONE;
698 }
699
do_act_open_rpl(struct t3cdev * dev,struct sk_buff * skb)700 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
701 {
702 struct cpl_act_open_rpl *rpl = cplhdr(skb);
703 unsigned int atid = G_TID(ntohl(rpl->atid));
704 struct t3c_tid_entry *t3c_tid;
705
706 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
707 if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
708 t3c_tid->client->handlers &&
709 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
710 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
711 t3c_tid->
712 ctx);
713 } else {
714 pr_err("%s: received clientless CPL command 0x%x\n",
715 dev->name, CPL_ACT_OPEN_RPL);
716 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
717 }
718 }
719
do_stid_rpl(struct t3cdev * dev,struct sk_buff * skb)720 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
721 {
722 union opcode_tid *p = cplhdr(skb);
723 unsigned int stid = G_TID(ntohl(p->opcode_tid));
724 struct t3c_tid_entry *t3c_tid;
725
726 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
727 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
728 t3c_tid->client->handlers[p->opcode]) {
729 return t3c_tid->client->handlers[p->opcode] (dev, skb,
730 t3c_tid->ctx);
731 } else {
732 pr_err("%s: received clientless CPL command 0x%x\n",
733 dev->name, p->opcode);
734 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
735 }
736 }
737
do_hwtid_rpl(struct t3cdev * dev,struct sk_buff * skb)738 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
739 {
740 union opcode_tid *p = cplhdr(skb);
741 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
742 struct t3c_tid_entry *t3c_tid;
743
744 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
745 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
746 t3c_tid->client->handlers[p->opcode]) {
747 return t3c_tid->client->handlers[p->opcode]
748 (dev, skb, t3c_tid->ctx);
749 } else {
750 pr_err("%s: received clientless CPL command 0x%x\n",
751 dev->name, p->opcode);
752 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
753 }
754 }
755
do_cr(struct t3cdev * dev,struct sk_buff * skb)756 static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
757 {
758 struct cpl_pass_accept_req *req = cplhdr(skb);
759 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
760 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
761 struct t3c_tid_entry *t3c_tid;
762 unsigned int tid = GET_TID(req);
763
764 if (unlikely(tid >= t->ntids)) {
765 printk("%s: passive open TID %u too large\n",
766 dev->name, tid);
767 t3_fatal_err(tdev2adap(dev));
768 return CPL_RET_BUF_DONE;
769 }
770
771 t3c_tid = lookup_stid(t, stid);
772 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
773 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
774 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
775 (dev, skb, t3c_tid->ctx);
776 } else {
777 pr_err("%s: received clientless CPL command 0x%x\n",
778 dev->name, CPL_PASS_ACCEPT_REQ);
779 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
780 }
781 }
782
783 /*
784 * Returns an sk_buff for a reply CPL message of size len. If the input
785 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
786 * is allocated. The input skb must be of size at least len. Note that this
787 * operation does not destroy the original skb data even if it decides to reuse
788 * the buffer.
789 */
cxgb3_get_cpl_reply_skb(struct sk_buff * skb,size_t len,gfp_t gfp)790 static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
791 gfp_t gfp)
792 {
793 if (likely(!skb_cloned(skb))) {
794 BUG_ON(skb->len < len);
795 __skb_trim(skb, len);
796 skb_get(skb);
797 } else {
798 skb = alloc_skb(len, gfp);
799 if (skb)
800 __skb_put(skb, len);
801 }
802 return skb;
803 }
804
do_abort_req_rss(struct t3cdev * dev,struct sk_buff * skb)805 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
806 {
807 union opcode_tid *p = cplhdr(skb);
808 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
809 struct t3c_tid_entry *t3c_tid;
810
811 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
812 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
813 t3c_tid->client->handlers[p->opcode]) {
814 return t3c_tid->client->handlers[p->opcode]
815 (dev, skb, t3c_tid->ctx);
816 } else {
817 struct cpl_abort_req_rss *req = cplhdr(skb);
818 struct cpl_abort_rpl *rpl;
819 struct sk_buff *reply_skb;
820 unsigned int tid = GET_TID(req);
821 u8 cmd = req->status;
822
823 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
824 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
825 goto out;
826
827 reply_skb = cxgb3_get_cpl_reply_skb(skb,
828 sizeof(struct
829 cpl_abort_rpl),
830 GFP_ATOMIC);
831
832 if (!reply_skb) {
833 printk("do_abort_req_rss: couldn't get skb!\n");
834 goto out;
835 }
836 reply_skb->priority = CPL_PRIORITY_DATA;
837 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
838 rpl = cplhdr(reply_skb);
839 rpl->wr.wr_hi =
840 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
841 rpl->wr.wr_lo = htonl(V_WR_TID(tid));
842 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
843 rpl->cmd = cmd;
844 cxgb3_ofld_send(dev, reply_skb);
845 out:
846 return CPL_RET_BUF_DONE;
847 }
848 }
849
do_act_establish(struct t3cdev * dev,struct sk_buff * skb)850 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
851 {
852 struct cpl_act_establish *req = cplhdr(skb);
853 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
854 struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
855 struct t3c_tid_entry *t3c_tid;
856 unsigned int tid = GET_TID(req);
857
858 if (unlikely(tid >= t->ntids)) {
859 printk("%s: active establish TID %u too large\n",
860 dev->name, tid);
861 t3_fatal_err(tdev2adap(dev));
862 return CPL_RET_BUF_DONE;
863 }
864
865 t3c_tid = lookup_atid(t, atid);
866 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
867 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
868 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
869 (dev, skb, t3c_tid->ctx);
870 } else {
871 pr_err("%s: received clientless CPL command 0x%x\n",
872 dev->name, CPL_ACT_ESTABLISH);
873 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
874 }
875 }
876
do_trace(struct t3cdev * dev,struct sk_buff * skb)877 static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
878 {
879 struct cpl_trace_pkt *p = cplhdr(skb);
880
881 skb->protocol = htons(0xffff);
882 skb->dev = dev->lldev;
883 skb_pull(skb, sizeof(*p));
884 skb_reset_mac_header(skb);
885 netif_receive_skb(skb);
886 return 0;
887 }
888
889 /*
890 * That skb would better have come from process_responses() where we abuse
891 * ->priority and ->csum to carry our data. NB: if we get to per-arch
892 * ->csum, the things might get really interesting here.
893 */
894
get_hwtid(struct sk_buff * skb)895 static inline u32 get_hwtid(struct sk_buff *skb)
896 {
897 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
898 }
899
get_opcode(struct sk_buff * skb)900 static inline u32 get_opcode(struct sk_buff *skb)
901 {
902 return G_OPCODE(ntohl((__force __be32)skb->csum));
903 }
904
do_term(struct t3cdev * dev,struct sk_buff * skb)905 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
906 {
907 unsigned int hwtid = get_hwtid(skb);
908 unsigned int opcode = get_opcode(skb);
909 struct t3c_tid_entry *t3c_tid;
910
911 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
912 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
913 t3c_tid->client->handlers[opcode]) {
914 return t3c_tid->client->handlers[opcode] (dev, skb,
915 t3c_tid->ctx);
916 } else {
917 pr_err("%s: received clientless CPL command 0x%x\n",
918 dev->name, opcode);
919 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
920 }
921 }
922
nb_callback(struct notifier_block * self,unsigned long event,void * ctx)923 static int nb_callback(struct notifier_block *self, unsigned long event,
924 void *ctx)
925 {
926 switch (event) {
927 case (NETEVENT_NEIGH_UPDATE):{
928 cxgb_neigh_update((struct neighbour *)ctx);
929 break;
930 }
931 case (NETEVENT_REDIRECT):{
932 struct netevent_redirect *nr = ctx;
933 cxgb_redirect(nr->old, nr->new, nr->neigh,
934 nr->daddr);
935 cxgb_neigh_update(nr->neigh);
936 break;
937 }
938 default:
939 break;
940 }
941 return 0;
942 }
943
944 static struct notifier_block nb = {
945 .notifier_call = nb_callback
946 };
947
948 /*
949 * Process a received packet with an unknown/unexpected CPL opcode.
950 */
do_bad_cpl(struct t3cdev * dev,struct sk_buff * skb)951 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
952 {
953 pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
954 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
955 }
956
957 /*
958 * Handlers for each CPL opcode
959 */
960 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
961
962 /*
963 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
964 * to unregister an existing handler.
965 */
t3_register_cpl_handler(unsigned int opcode,cpl_handler_func h)966 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
967 {
968 if (opcode < NUM_CPL_CMDS)
969 cpl_handlers[opcode] = h ? h : do_bad_cpl;
970 else
971 pr_err("T3C: handler registration for opcode %x failed\n",
972 opcode);
973 }
974
975 EXPORT_SYMBOL(t3_register_cpl_handler);
976
977 /*
978 * T3CDEV's receive method.
979 */
process_rx(struct t3cdev * dev,struct sk_buff ** skbs,int n)980 static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
981 {
982 while (n--) {
983 struct sk_buff *skb = *skbs++;
984 unsigned int opcode = get_opcode(skb);
985 int ret = cpl_handlers[opcode] (dev, skb);
986
987 #if VALIDATE_TID
988 if (ret & CPL_RET_UNKNOWN_TID) {
989 union opcode_tid *p = cplhdr(skb);
990
991 pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
992 dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
993 }
994 #endif
995 if (ret & CPL_RET_BUF_DONE)
996 kfree_skb(skb);
997 }
998 return 0;
999 }
1000
1001 /*
1002 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1003 */
cxgb3_ofld_send(struct t3cdev * dev,struct sk_buff * skb)1004 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1005 {
1006 int r;
1007
1008 local_bh_disable();
1009 r = dev->send(dev, skb);
1010 local_bh_enable();
1011 return r;
1012 }
1013
1014 EXPORT_SYMBOL(cxgb3_ofld_send);
1015
is_offloading(struct net_device * dev)1016 static int is_offloading(struct net_device *dev)
1017 {
1018 struct adapter *adapter;
1019 int i;
1020
1021 read_lock_bh(&adapter_list_lock);
1022 list_for_each_entry(adapter, &adapter_list, adapter_list) {
1023 for_each_port(adapter, i) {
1024 if (dev == adapter->port[i]) {
1025 read_unlock_bh(&adapter_list_lock);
1026 return 1;
1027 }
1028 }
1029 }
1030 read_unlock_bh(&adapter_list_lock);
1031 return 0;
1032 }
1033
cxgb_neigh_update(struct neighbour * neigh)1034 static void cxgb_neigh_update(struct neighbour *neigh)
1035 {
1036 struct net_device *dev;
1037
1038 if (!neigh)
1039 return;
1040 dev = neigh->dev;
1041 if (dev && (is_offloading(dev))) {
1042 struct t3cdev *tdev = dev2t3cdev(dev);
1043
1044 BUG_ON(!tdev);
1045 t3_l2t_update(tdev, neigh);
1046 }
1047 }
1048
set_l2t_ix(struct t3cdev * tdev,u32 tid,struct l2t_entry * e)1049 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1050 {
1051 struct sk_buff *skb;
1052 struct cpl_set_tcb_field *req;
1053
1054 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1055 if (!skb) {
1056 pr_err("%s: cannot allocate skb!\n", __func__);
1057 return;
1058 }
1059 skb->priority = CPL_PRIORITY_CONTROL;
1060 req = skb_put(skb, sizeof(*req));
1061 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1062 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1063 req->reply = 0;
1064 req->cpu_idx = 0;
1065 req->word = htons(W_TCB_L2T_IX);
1066 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1067 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1068 tdev->send(tdev, skb);
1069 }
1070
cxgb_redirect(struct dst_entry * old,struct dst_entry * new,struct neighbour * neigh,const void * daddr)1071 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
1072 struct neighbour *neigh,
1073 const void *daddr)
1074 {
1075 struct net_device *dev;
1076 struct tid_info *ti;
1077 struct t3cdev *tdev;
1078 u32 tid;
1079 int update_tcb;
1080 struct l2t_entry *e;
1081 struct t3c_tid_entry *te;
1082
1083 dev = neigh->dev;
1084
1085 if (!is_offloading(dev))
1086 return;
1087 tdev = dev2t3cdev(dev);
1088 BUG_ON(!tdev);
1089
1090 /* Add new L2T entry */
1091 e = t3_l2t_get(tdev, new, dev, daddr);
1092 if (!e) {
1093 pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
1094 return;
1095 }
1096
1097 /* Walk tid table and notify clients of dst change. */
1098 ti = &(T3C_DATA(tdev))->tid_maps;
1099 for (tid = 0; tid < ti->ntids; tid++) {
1100 te = lookup_tid(ti, tid);
1101 BUG_ON(!te);
1102 if (te && te->ctx && te->client && te->client->redirect) {
1103 update_tcb = te->client->redirect(te->ctx, old, new, e);
1104 if (update_tcb) {
1105 rcu_read_lock();
1106 l2t_hold(L2DATA(tdev), e);
1107 rcu_read_unlock();
1108 set_l2t_ix(tdev, tid, e);
1109 }
1110 }
1111 }
1112 l2t_release(tdev, e);
1113 }
1114
1115 /*
1116 * Allocate and initialize the TID tables. Returns 0 on success.
1117 */
init_tid_tabs(struct tid_info * t,unsigned int ntids,unsigned int natids,unsigned int nstids,unsigned int atid_base,unsigned int stid_base)1118 static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1119 unsigned int natids, unsigned int nstids,
1120 unsigned int atid_base, unsigned int stid_base)
1121 {
1122 unsigned long size = ntids * sizeof(*t->tid_tab) +
1123 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1124
1125 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1126 if (!t->tid_tab)
1127 return -ENOMEM;
1128
1129 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1130 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1131 t->ntids = ntids;
1132 t->nstids = nstids;
1133 t->stid_base = stid_base;
1134 t->sfree = NULL;
1135 t->natids = natids;
1136 t->atid_base = atid_base;
1137 t->afree = NULL;
1138 t->stids_in_use = t->atids_in_use = 0;
1139 atomic_set(&t->tids_in_use, 0);
1140 spin_lock_init(&t->stid_lock);
1141 spin_lock_init(&t->atid_lock);
1142
1143 /*
1144 * Setup the free lists for stid_tab and atid_tab.
1145 */
1146 if (nstids) {
1147 while (--nstids)
1148 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1149 t->sfree = t->stid_tab;
1150 }
1151 if (natids) {
1152 while (--natids)
1153 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1154 t->afree = t->atid_tab;
1155 }
1156 return 0;
1157 }
1158
free_tid_maps(struct tid_info * t)1159 static void free_tid_maps(struct tid_info *t)
1160 {
1161 kvfree(t->tid_tab);
1162 }
1163
add_adapter(struct adapter * adap)1164 static inline void add_adapter(struct adapter *adap)
1165 {
1166 write_lock_bh(&adapter_list_lock);
1167 list_add_tail(&adap->adapter_list, &adapter_list);
1168 write_unlock_bh(&adapter_list_lock);
1169 }
1170
remove_adapter(struct adapter * adap)1171 static inline void remove_adapter(struct adapter *adap)
1172 {
1173 write_lock_bh(&adapter_list_lock);
1174 list_del(&adap->adapter_list);
1175 write_unlock_bh(&adapter_list_lock);
1176 }
1177
cxgb3_offload_activate(struct adapter * adapter)1178 int cxgb3_offload_activate(struct adapter *adapter)
1179 {
1180 struct t3cdev *dev = &adapter->tdev;
1181 int natids, err;
1182 struct t3c_data *t;
1183 struct tid_range stid_range, tid_range;
1184 struct mtutab mtutab;
1185 unsigned int l2t_capacity;
1186 struct l2t_data *l2td;
1187
1188 t = kzalloc(sizeof(*t), GFP_KERNEL);
1189 if (!t)
1190 return -ENOMEM;
1191
1192 err = -EOPNOTSUPP;
1193 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1194 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1195 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1196 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1197 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1198 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1199 goto out_free;
1200
1201 err = -ENOMEM;
1202 l2td = t3_init_l2t(l2t_capacity);
1203 if (!l2td)
1204 goto out_free;
1205
1206 natids = min(tid_range.num / 2, MAX_ATIDS);
1207 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1208 stid_range.num, ATID_BASE, stid_range.base);
1209 if (err)
1210 goto out_free_l2t;
1211
1212 t->mtus = mtutab.mtus;
1213 t->nmtus = mtutab.size;
1214
1215 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1216 spin_lock_init(&t->tid_release_lock);
1217 INIT_LIST_HEAD(&t->list_node);
1218 t->dev = dev;
1219
1220 RCU_INIT_POINTER(dev->l2opt, l2td);
1221 T3C_DATA(dev) = t;
1222 dev->recv = process_rx;
1223 dev->neigh_update = t3_l2t_update;
1224
1225 /* Register netevent handler once */
1226 if (list_empty(&adapter_list))
1227 register_netevent_notifier(&nb);
1228
1229 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1230 t->release_list_incomplete = 0;
1231
1232 add_adapter(adapter);
1233 return 0;
1234
1235 out_free_l2t:
1236 kvfree(l2td);
1237 out_free:
1238 kfree(t);
1239 return err;
1240 }
1241
clean_l2_data(struct rcu_head * head)1242 static void clean_l2_data(struct rcu_head *head)
1243 {
1244 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1245 kvfree(d);
1246 }
1247
1248
cxgb3_offload_deactivate(struct adapter * adapter)1249 void cxgb3_offload_deactivate(struct adapter *adapter)
1250 {
1251 struct t3cdev *tdev = &adapter->tdev;
1252 struct t3c_data *t = T3C_DATA(tdev);
1253 struct l2t_data *d;
1254
1255 remove_adapter(adapter);
1256 if (list_empty(&adapter_list))
1257 unregister_netevent_notifier(&nb);
1258
1259 free_tid_maps(&t->tid_maps);
1260 T3C_DATA(tdev) = NULL;
1261 rcu_read_lock();
1262 d = L2DATA(tdev);
1263 rcu_read_unlock();
1264 RCU_INIT_POINTER(tdev->l2opt, NULL);
1265 call_rcu(&d->rcu_head, clean_l2_data);
1266 kfree_skb(t->nofail_skb);
1267 kfree(t);
1268 }
1269
register_tdev(struct t3cdev * tdev)1270 static inline void register_tdev(struct t3cdev *tdev)
1271 {
1272 static int unit;
1273
1274 mutex_lock(&cxgb3_db_lock);
1275 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1276 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1277 mutex_unlock(&cxgb3_db_lock);
1278 }
1279
unregister_tdev(struct t3cdev * tdev)1280 static inline void unregister_tdev(struct t3cdev *tdev)
1281 {
1282 mutex_lock(&cxgb3_db_lock);
1283 list_del(&tdev->ofld_dev_list);
1284 mutex_unlock(&cxgb3_db_lock);
1285 }
1286
adap2type(struct adapter * adapter)1287 static inline int adap2type(struct adapter *adapter)
1288 {
1289 int type = 0;
1290
1291 switch (adapter->params.rev) {
1292 case T3_REV_A:
1293 type = T3A;
1294 break;
1295 case T3_REV_B:
1296 case T3_REV_B2:
1297 type = T3B;
1298 break;
1299 case T3_REV_C:
1300 type = T3C;
1301 break;
1302 }
1303 return type;
1304 }
1305
cxgb3_adapter_ofld(struct adapter * adapter)1306 void cxgb3_adapter_ofld(struct adapter *adapter)
1307 {
1308 struct t3cdev *tdev = &adapter->tdev;
1309
1310 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1311
1312 cxgb3_set_dummy_ops(tdev);
1313 tdev->send = t3_offload_tx;
1314 tdev->ctl = cxgb_offload_ctl;
1315 tdev->type = adap2type(adapter);
1316
1317 register_tdev(tdev);
1318 }
1319
cxgb3_adapter_unofld(struct adapter * adapter)1320 void cxgb3_adapter_unofld(struct adapter *adapter)
1321 {
1322 struct t3cdev *tdev = &adapter->tdev;
1323
1324 tdev->recv = NULL;
1325 tdev->neigh_update = NULL;
1326
1327 unregister_tdev(tdev);
1328 }
1329
cxgb3_offload_init(void)1330 void __init cxgb3_offload_init(void)
1331 {
1332 int i;
1333
1334 for (i = 0; i < NUM_CPL_CMDS; ++i)
1335 cpl_handlers[i] = do_bad_cpl;
1336
1337 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1338 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1339 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1340 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1341 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1342 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1343 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1344 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1345 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1346 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1347 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1348 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1349 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1350 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1351 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1352 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1353 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1354 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1355 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1356 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1357 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1358 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1359 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1360 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1361 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1362 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1363 }
1364