1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include <net/nexthop.h>
6 #include "lag/lag.h"
7 #include "eswitch.h"
8 #include "esw/acl/ofld.h"
9 #include "lib/events.h"
10 
mlx5_mpesw_metadata_cleanup(struct mlx5_lag * ldev)11 static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
12 {
13 	struct mlx5_core_dev *dev;
14 	struct mlx5_eswitch *esw;
15 	u32 pf_metadata;
16 	int i;
17 
18 	mlx5_ldev_for_each(i, 0, ldev) {
19 		dev = ldev->pf[i].dev;
20 		esw = dev->priv.eswitch;
21 		pf_metadata = ldev->lag_mpesw.pf_metadata[i];
22 		if (!pf_metadata)
23 			continue;
24 		mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
25 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
26 					 (void *)0);
27 		mlx5_esw_match_metadata_free(esw, pf_metadata);
28 		ldev->lag_mpesw.pf_metadata[i] = 0;
29 	}
30 }
31 
mlx5_mpesw_metadata_set(struct mlx5_lag * ldev)32 static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
33 {
34 	struct mlx5_core_dev *dev;
35 	struct mlx5_eswitch *esw;
36 	u32 pf_metadata;
37 	int i, err;
38 
39 	mlx5_ldev_for_each(i, 0, ldev) {
40 		dev = ldev->pf[i].dev;
41 		esw = dev->priv.eswitch;
42 		pf_metadata = mlx5_esw_match_metadata_alloc(esw);
43 		if (!pf_metadata) {
44 			err = -ENOSPC;
45 			goto err_metadata;
46 		}
47 
48 		ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
49 		err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
50 								 pf_metadata);
51 		if (err)
52 			goto err_metadata;
53 	}
54 
55 	mlx5_ldev_for_each(i, 0, ldev) {
56 		dev = ldev->pf[i].dev;
57 		mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
58 					 (void *)0);
59 	}
60 
61 	return 0;
62 
63 err_metadata:
64 	mlx5_mpesw_metadata_cleanup(ldev);
65 	return err;
66 }
67 
68 #define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
enable_mpesw(struct mlx5_lag * ldev)69 static int enable_mpesw(struct mlx5_lag *ldev)
70 {
71 	int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
72 	struct mlx5_core_dev *dev0;
73 	int err;
74 	int i;
75 
76 	if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
77 		return -EINVAL;
78 
79 	dev0 = ldev->pf[idx].dev;
80 	if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
81 		return -EOPNOTSUPP;
82 
83 	if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
84 	    !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
85 	    !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
86 	    !mlx5_lag_check_prereq(ldev) ||
87 	    !mlx5_lag_shared_fdb_supported(ldev))
88 		return -EOPNOTSUPP;
89 
90 	err = mlx5_mpesw_metadata_set(ldev);
91 	if (err)
92 		return err;
93 
94 	mlx5_lag_remove_devices(ldev);
95 
96 	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
97 	if (err) {
98 		mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
99 		goto err_add_devices;
100 	}
101 
102 	dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
103 	mlx5_rescan_drivers_locked(dev0);
104 	mlx5_ldev_for_each(i, 0, ldev) {
105 		err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
106 		if (err)
107 			goto err_rescan_drivers;
108 	}
109 
110 	return 0;
111 
112 err_rescan_drivers:
113 	dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
114 	mlx5_rescan_drivers_locked(dev0);
115 	mlx5_deactivate_lag(ldev);
116 err_add_devices:
117 	mlx5_lag_add_devices(ldev);
118 	mlx5_ldev_for_each(i, 0, ldev)
119 		mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
120 	mlx5_mpesw_metadata_cleanup(ldev);
121 	return err;
122 }
123 
disable_mpesw(struct mlx5_lag * ldev)124 static void disable_mpesw(struct mlx5_lag *ldev)
125 {
126 	if (ldev->mode == MLX5_LAG_MODE_MPESW) {
127 		mlx5_mpesw_metadata_cleanup(ldev);
128 		mlx5_disable_lag(ldev);
129 	}
130 }
131 
mlx5_mpesw_work(struct work_struct * work)132 static void mlx5_mpesw_work(struct work_struct *work)
133 {
134 	struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
135 	struct mlx5_devcom_comp_dev *devcom;
136 	struct mlx5_lag *ldev = mpesww->lag;
137 
138 	devcom = mlx5_lag_get_devcom_comp(ldev);
139 	if (!devcom)
140 		return;
141 
142 	mlx5_devcom_comp_lock(devcom);
143 	mutex_lock(&ldev->lock);
144 	if (ldev->mode_changes_in_progress) {
145 		mpesww->result = -EAGAIN;
146 		goto unlock;
147 	}
148 
149 	if (mpesww->op == MLX5_MPESW_OP_ENABLE)
150 		mpesww->result = enable_mpesw(ldev);
151 	else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
152 		disable_mpesw(ldev);
153 unlock:
154 	mutex_unlock(&ldev->lock);
155 	mlx5_devcom_comp_unlock(devcom);
156 	complete(&mpesww->comp);
157 }
158 
mlx5_lag_mpesw_queue_work(struct mlx5_core_dev * dev,enum mpesw_op op)159 static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
160 				     enum mpesw_op op)
161 {
162 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
163 	struct mlx5_mpesw_work_st *work;
164 	int err = 0;
165 
166 	if (!ldev)
167 		return 0;
168 
169 	work = kzalloc(sizeof(*work), GFP_KERNEL);
170 	if (!work)
171 		return -ENOMEM;
172 
173 	INIT_WORK(&work->work, mlx5_mpesw_work);
174 	init_completion(&work->comp);
175 	work->op = op;
176 	work->lag = ldev;
177 
178 	if (!queue_work(ldev->wq, &work->work)) {
179 		mlx5_core_warn(dev, "failed to queue mpesw work\n");
180 		err = -EINVAL;
181 		goto out;
182 	}
183 	wait_for_completion(&work->comp);
184 	err = work->result;
185 out:
186 	kfree(work);
187 	return err;
188 }
189 
mlx5_lag_mpesw_disable(struct mlx5_core_dev * dev)190 void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
191 {
192 	mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
193 }
194 
mlx5_lag_mpesw_enable(struct mlx5_core_dev * dev)195 int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
196 {
197 	return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
198 }
199 
mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev * mdev,struct net_device * out_dev,struct netlink_ext_ack * extack)200 int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
201 			     struct net_device *out_dev,
202 			     struct netlink_ext_ack *extack)
203 {
204 	struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
205 
206 	if (!netif_is_bond_master(out_dev) || !ldev)
207 		return 0;
208 
209 	if (ldev->mode != MLX5_LAG_MODE_MPESW)
210 		return 0;
211 
212 	NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
213 	return -EOPNOTSUPP;
214 }
215 
mlx5_lag_is_mpesw(struct mlx5_core_dev * dev)216 bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
217 {
218 	struct mlx5_lag *ldev = mlx5_lag_dev(dev);
219 
220 	return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
221 }
222 EXPORT_SYMBOL(mlx5_lag_is_mpesw);
223