Lines Matching full:msg

176 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,  in put_driver_name_print_type()  argument
179 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) in put_driver_name_print_type()
182 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) in put_driver_name_print_type()
188 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u32() argument
192 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u32()
194 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) in _rdma_nl_put_driver_u32()
200 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u64() argument
204 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u64()
206 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, in _rdma_nl_put_driver_u64()
213 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_string() argument
216 if (put_driver_name_print_type(msg, name, in rdma_nl_put_driver_string()
219 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) in rdma_nl_put_driver_string()
226 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) in rdma_nl_put_driver_u32() argument
228 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u32()
233 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_u32_hex() argument
236 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u32_hex()
241 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64() argument
243 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u64()
248 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64_hex() argument
250 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u64_hex()
261 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) in fill_nldev_handle() argument
263 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) in fill_nldev_handle()
265 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, in fill_nldev_handle()
272 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) in fill_dev_info() argument
278 if (fill_nldev_handle(msg, device)) in fill_dev_info()
281 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) in fill_dev_info()
285 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_dev_info()
292 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) in fill_dev_info()
295 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, in fill_dev_info()
299 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, in fill_dev_info()
303 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) in fill_dev_info()
305 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) in fill_dev_info()
309 nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_TYPE, device->type)) in fill_dev_info()
313 nla_put_string(msg, RDMA_NLDEV_ATTR_PARENT_NAME, in fill_dev_info()
317 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE, in fill_dev_info()
328 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); in fill_dev_info()
330 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); in fill_dev_info()
332 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); in fill_dev_info()
334 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); in fill_dev_info()
336 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, in fill_dev_info()
341 static int fill_port_info(struct sk_buff *msg, in fill_port_info() argument
350 if (fill_nldev_handle(msg, device)) in fill_port_info()
353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in fill_port_info()
365 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_port_info()
368 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, in fill_port_info()
371 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) in fill_port_info()
373 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) in fill_port_info()
375 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) in fill_port_info()
378 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) in fill_port_info()
380 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) in fill_port_info()
385 ret = nla_put_u32(msg, in fill_port_info()
389 ret = nla_put_string(msg, in fill_port_info()
398 static int fill_res_info_entry(struct sk_buff *msg, in fill_res_info_entry() argument
403 entry_attr = nla_nest_start_noflag(msg, in fill_res_info_entry()
408 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) in fill_res_info_entry()
410 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, in fill_res_info_entry()
414 nla_nest_end(msg, entry_attr); in fill_res_info_entry()
418 nla_nest_cancel(msg, entry_attr); in fill_res_info_entry()
422 static int fill_res_info(struct sk_buff *msg, struct ib_device *device, in fill_res_info() argument
438 if (fill_nldev_handle(msg, device)) in fill_res_info()
441 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); in fill_res_info()
449 ret = fill_res_info_entry(msg, names[i], curr); in fill_res_info()
454 nla_nest_end(msg, table_attr); in fill_res_info()
458 nla_nest_cancel(msg, table_attr); in fill_res_info()
462 static int fill_res_name_pid(struct sk_buff *msg, in fill_res_name_pid() argument
472 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, in fill_res_name_pid()
489 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); in fill_res_name_pid()
495 static int fill_res_qp_entry_query(struct sk_buff *msg, in fill_res_qp_entry_query() argument
509 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, in fill_res_qp_entry_query()
512 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, in fill_res_qp_entry_query()
517 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) in fill_res_qp_entry_query()
522 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, in fill_res_qp_entry_query()
526 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) in fill_res_qp_entry_query()
528 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) in fill_res_qp_entry_query()
532 return dev->ops.fill_res_qp_entry(msg, qp); in fill_res_qp_entry_query()
538 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_entry() argument
549 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) in fill_res_qp_entry()
552 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); in fill_res_qp_entry()
557 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) in fill_res_qp_entry()
560 ret = fill_res_name_pid(msg, res); in fill_res_qp_entry()
564 return fill_res_qp_entry_query(msg, res, dev, qp); in fill_res_qp_entry()
567 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_raw_entry() argument
577 return dev->ops.fill_res_qp_entry_raw(msg, qp); in fill_res_qp_raw_entry()
580 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cm_id_entry() argument
592 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) in fill_res_cm_id_entry()
596 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) in fill_res_cm_id_entry()
598 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) in fill_res_cm_id_entry()
602 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) in fill_res_cm_id_entry()
605 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) in fill_res_cm_id_entry()
609 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, in fill_res_cm_id_entry()
614 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, in fill_res_cm_id_entry()
619 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) in fill_res_cm_id_entry()
622 if (fill_res_name_pid(msg, res)) in fill_res_cm_id_entry()
626 return dev->ops.fill_res_cm_id_entry(msg, cm_id); in fill_res_cm_id_entry()
632 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_entry() argument
638 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) in fill_res_cq_entry()
640 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_cq_entry()
646 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) in fill_res_cq_entry()
649 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) in fill_res_cq_entry()
652 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) in fill_res_cq_entry()
655 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_cq_entry()
659 if (fill_res_name_pid(msg, res)) in fill_res_cq_entry()
663 dev->ops.fill_res_cq_entry(msg, cq) : 0; in fill_res_cq_entry()
666 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_raw_entry() argument
674 return dev->ops.fill_res_cq_entry_raw(msg, cq); in fill_res_cq_raw_entry()
677 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_entry() argument
684 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) in fill_res_mr_entry()
686 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) in fill_res_mr_entry()
690 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, in fill_res_mr_entry()
694 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_res_mr_entry()
698 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) in fill_res_mr_entry()
701 if (fill_res_name_pid(msg, res)) in fill_res_mr_entry()
705 dev->ops.fill_res_mr_entry(msg, mr) : in fill_res_mr_entry()
709 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_raw_entry() argument
717 return dev->ops.fill_res_mr_entry_raw(msg, mr); in fill_res_mr_raw_entry()
720 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_pd_entry() argument
726 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, in fill_res_pd_entry()
730 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, in fill_res_pd_entry()
734 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_pd_entry()
738 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) in fill_res_pd_entry()
742 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_pd_entry()
746 return fill_res_name_pid(msg, res); in fill_res_pd_entry()
751 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_ctx_entry() argument
759 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) in fill_res_ctx_entry()
762 return fill_res_name_pid(msg, res); in fill_res_ctx_entry()
765 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, in fill_res_range_qp_entry() argument
773 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_res_range_qp_entry()
778 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) in fill_res_range_qp_entry()
781 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) in fill_res_range_qp_entry()
783 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) in fill_res_range_qp_entry()
786 nla_nest_end(msg, entry_attr); in fill_res_range_qp_entry()
790 nla_nest_cancel(msg, entry_attr); in fill_res_range_qp_entry()
794 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) in fill_res_srq_qps() argument
803 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_res_srq_qps()
826 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
837 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
840 nla_nest_end(msg, table_attr); in fill_res_srq_qps()
847 nla_nest_cancel(msg, table_attr); in fill_res_srq_qps()
851 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_entry() argument
857 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) in fill_res_srq_entry()
860 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) in fill_res_srq_entry()
863 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) in fill_res_srq_entry()
867 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, in fill_res_srq_entry()
872 if (fill_res_srq_qps(msg, srq)) in fill_res_srq_entry()
875 if (fill_res_name_pid(msg, res)) in fill_res_srq_entry()
879 return dev->ops.fill_res_srq_entry(msg, srq); in fill_res_srq_entry()
887 static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_raw_entry() argument
895 return dev->ops.fill_res_srq_entry_raw(msg, srq); in fill_res_srq_raw_entry()
898 static int fill_stat_counter_mode(struct sk_buff *msg, in fill_stat_counter_mode() argument
903 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) in fill_stat_counter_mode()
908 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) in fill_stat_counter_mode()
912 fill_res_name_pid(msg, &counter->res)) in fill_stat_counter_mode()
919 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) in fill_stat_counter_qp_entry() argument
923 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_stat_counter_qp_entry()
927 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) in fill_stat_counter_qp_entry()
930 nla_nest_end(msg, entry_attr); in fill_stat_counter_qp_entry()
934 nla_nest_cancel(msg, entry_attr); in fill_stat_counter_qp_entry()
938 static int fill_stat_counter_qps(struct sk_buff *msg, in fill_stat_counter_qps() argument
948 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_stat_counter_qps()
959 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); in fill_stat_counter_qps()
965 nla_nest_end(msg, table_attr); in fill_stat_counter_qps()
970 nla_nest_cancel(msg, table_attr); in fill_stat_counter_qps()
974 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, in rdma_nl_stat_hwcounter_entry() argument
979 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); in rdma_nl_stat_hwcounter_entry()
983 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, in rdma_nl_stat_hwcounter_entry()
986 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, in rdma_nl_stat_hwcounter_entry()
990 nla_nest_end(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
994 nla_nest_cancel(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
999 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_stat_mr_entry() argument
1005 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_stat_mr_entry()
1009 return dev->ops.fill_stat_mr_entry(msg, mr); in fill_stat_mr_entry()
1016 static int fill_stat_counter_hwcounters(struct sk_buff *msg, in fill_stat_counter_hwcounters() argument
1023 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in fill_stat_counter_hwcounters()
1031 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, in fill_stat_counter_hwcounters()
1037 nla_nest_end(msg, table_attr); in fill_stat_counter_hwcounters()
1042 nla_nest_cancel(msg, table_attr); in fill_stat_counter_hwcounters()
1046 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_counter_entry() argument
1059 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || in fill_res_counter_entry()
1060 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || in fill_res_counter_entry()
1061 fill_stat_counter_mode(msg, counter) || in fill_res_counter_entry()
1062 fill_stat_counter_qps(msg, counter) || in fill_res_counter_entry()
1063 fill_stat_counter_hwcounters(msg, counter)) in fill_res_counter_entry()
1074 struct sk_buff *msg; in nldev_get_doit() local
1089 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_doit()
1090 if (!msg) { in nldev_get_doit()
1095 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_doit()
1103 err = fill_dev_info(msg, device); in nldev_get_doit()
1107 nlmsg_end(msg, nlh); in nldev_get_doit()
1110 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_doit()
1113 nlmsg_free(msg); in nldev_get_doit()
1214 struct sk_buff *msg; in nldev_port_get_doit() local
1237 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_port_get_doit()
1238 if (!msg) { in nldev_port_get_doit()
1243 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_port_get_doit()
1251 err = fill_port_info(msg, device, port, sock_net(skb->sk)); in nldev_port_get_doit()
1255 nlmsg_end(msg, nlh); in nldev_port_get_doit()
1258 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_port_get_doit()
1261 nlmsg_free(msg); in nldev_port_get_doit()
1331 struct sk_buff *msg; in nldev_res_get_doit() local
1348 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_res_get_doit()
1349 if (!msg) { in nldev_res_get_doit()
1354 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_res_get_doit()
1362 ret = fill_res_info(msg, device, show_details); in nldev_res_get_doit()
1366 nlmsg_end(msg, nlh); in nldev_res_get_doit()
1368 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_res_get_doit()
1371 nlmsg_free(msg); in nldev_res_get_doit()
1482 struct sk_buff *msg; in res_get_common_doit() local
1516 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in res_get_common_doit()
1517 if (!msg) { in res_get_common_doit()
1522 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in res_get_common_doit()
1527 if (!nlh || fill_nldev_handle(msg, device)) { in res_get_common_doit()
1534 ret = fill_func(msg, has_cap_net_admin, res, port); in res_get_common_doit()
1539 nlmsg_end(msg, nlh); in res_get_common_doit()
1541 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in res_get_common_doit()
1544 nlmsg_free(msg); in res_get_common_doit()
1836 struct sk_buff *msg; in nldev_get_chardev() local
1867 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_chardev()
1868 if (!msg) { in nldev_get_chardev()
1872 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_chardev()
1881 data.nl_msg = msg; in nldev_get_chardev()
1886 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, in nldev_get_chardev()
1891 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, in nldev_get_chardev()
1895 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, in nldev_get_chardev()
1901 nlmsg_end(msg, nlh); in nldev_get_chardev()
1905 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_chardev()
1910 nlmsg_free(msg); in nldev_get_chardev()
1921 struct sk_buff *msg; in nldev_sys_get_doit() local
1929 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_sys_get_doit()
1930 if (!msg) in nldev_sys_get_doit()
1933 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_sys_get_doit()
1938 nlmsg_free(msg); in nldev_sys_get_doit()
1942 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, in nldev_sys_get_doit()
1945 nlmsg_free(msg); in nldev_sys_get_doit()
1949 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, in nldev_sys_get_doit()
1952 nlmsg_free(msg); in nldev_sys_get_doit()
1956 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_MONITOR_MODE, 1); in nldev_sys_get_doit()
1958 nlmsg_free(msg); in nldev_sys_get_doit()
1971 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); in nldev_sys_get_doit()
1973 nlmsg_end(msg, nlh); in nldev_sys_get_doit()
1974 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_sys_get_doit()
2025 static int nldev_stat_set_mode_doit(struct sk_buff *msg, in nldev_stat_set_mode_doit() argument
2061 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_set_mode_doit()
2062 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_set_mode_doit()
2124 struct sk_buff *msg; in nldev_stat_set_doit() local
2151 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_set_doit()
2152 if (!msg) { in nldev_stat_set_doit()
2156 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_set_doit()
2160 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_set_doit()
2161 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in nldev_stat_set_doit()
2167 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); in nldev_stat_set_doit()
2178 nlmsg_end(msg, nlh); in nldev_stat_set_doit()
2180 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_set_doit()
2183 nlmsg_free(msg); in nldev_stat_set_doit()
2194 struct sk_buff *msg; in nldev_stat_del_doit() local
2220 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_del_doit()
2221 if (!msg) { in nldev_stat_del_doit()
2225 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_del_doit()
2236 if (fill_nldev_handle(msg, device) || in nldev_stat_del_doit()
2237 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in nldev_stat_del_doit()
2238 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_del_doit()
2239 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_del_doit()
2248 nlmsg_end(msg, nlh); in nldev_stat_del_doit()
2250 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_del_doit()
2253 nlmsg_free(msg); in nldev_stat_del_doit()
2268 struct sk_buff *msg; in stat_get_doit_default_counter() local
2292 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_default_counter()
2293 if (!msg) { in stat_get_doit_default_counter()
2298 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_default_counter()
2303 if (!nlh || fill_nldev_handle(msg, device) || in stat_get_doit_default_counter()
2304 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in stat_get_doit_default_counter()
2317 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in stat_get_doit_default_counter()
2328 if (rdma_nl_stat_hwcounter_entry(msg, in stat_get_doit_default_counter()
2334 nla_nest_end(msg, table_attr); in stat_get_doit_default_counter()
2337 nlmsg_end(msg, nlh); in stat_get_doit_default_counter()
2339 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_default_counter()
2342 nla_nest_cancel(msg, table_attr); in stat_get_doit_default_counter()
2346 nlmsg_free(msg); in stat_get_doit_default_counter()
2359 struct sk_buff *msg; in stat_get_doit_qp() local
2381 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_qp()
2382 if (!msg) { in stat_get_doit_qp()
2387 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_qp()
2400 if (fill_nldev_handle(msg, device) || in stat_get_doit_qp()
2401 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in stat_get_doit_qp()
2402 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { in stat_get_doit_qp()
2408 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { in stat_get_doit_qp()
2413 nlmsg_end(msg, nlh); in stat_get_doit_qp()
2415 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_qp()
2418 nlmsg_free(msg); in stat_get_doit_qp()
2488 struct sk_buff *msg; in nldev_stat_get_counter_status_doit() local
2515 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_get_counter_status_doit()
2516 if (!msg) { in nldev_stat_get_counter_status_doit()
2522 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_get_counter_status_doit()
2527 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_get_counter_status_doit()
2528 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in nldev_stat_get_counter_status_doit()
2531 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in nldev_stat_get_counter_status_doit()
2537 entry = nla_nest_start(msg, in nldev_stat_get_counter_status_doit()
2542 if (nla_put_string(msg, in nldev_stat_get_counter_status_doit()
2545 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) in nldev_stat_get_counter_status_doit()
2549 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, in nldev_stat_get_counter_status_doit()
2553 nla_nest_end(msg, entry); in nldev_stat_get_counter_status_doit()
2557 nla_nest_end(msg, table); in nldev_stat_get_counter_status_doit()
2558 nlmsg_end(msg, nlh); in nldev_stat_get_counter_status_doit()
2560 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_get_counter_status_doit()
2563 nla_nest_cancel(msg, entry); in nldev_stat_get_counter_status_doit()
2566 nla_nest_cancel(msg, table); in nldev_stat_get_counter_status_doit()
2568 nlmsg_free(msg); in nldev_stat_get_counter_status_doit()
2732 static int fill_mon_netdev_rename(struct sk_buff *msg, in fill_mon_netdev_rename() argument
2742 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); in fill_mon_netdev_rename()
2745 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); in fill_mon_netdev_rename()
2751 static int fill_mon_netdev_association(struct sk_buff *msg, in fill_mon_netdev_association() argument
2761 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index); in fill_mon_netdev_association()
2765 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, in fill_mon_netdev_association()
2770 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port); in fill_mon_netdev_association()
2775 ret = nla_put_u32(msg, in fill_mon_netdev_association()
2780 ret = nla_put_string(msg, in fill_mon_netdev_association()