1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
3 
4 #include <linux/mlx5/vport.h>
5 #include <mlx5_core.h>
6 #include <fs_core.h>
7 #include <fs_cmd.h>
8 #include "fs_hws_pools.h"
9 #include "mlx5hws.h"
10 
11 #define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
12 #define MLX5HWS_CTX_QUEUE_SIZE 256
13 
14 static struct mlx5hws_action *
15 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
16 static void
17 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
18 			unsigned long index);
19 static void
20 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
21 			unsigned long index);
22 
mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev * dev,struct mlx5_fs_hws_context * fs_ctx)23 static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
24 					 struct mlx5_fs_hws_context *fs_ctx)
25 {
26 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
27 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
28 	struct mlx5hws_action_reformat_header reformat_hdr = {};
29 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
30 	enum mlx5hws_action_type action_type;
31 	int err = -ENOSPC;
32 
33 	hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
34 	if (!hws_pool->tag_action)
35 		return err;
36 	hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
37 	if (!hws_pool->pop_vlan_action)
38 		goto destroy_tag;
39 	hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
40 	if (!hws_pool->push_vlan_action)
41 		goto destroy_pop_vlan;
42 	hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
43 	if (!hws_pool->drop_action)
44 		goto destroy_push_vlan;
45 	action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
46 	hws_pool->decapl2_action =
47 		mlx5hws_action_create_reformat(ctx, action_type, 1,
48 					       &reformat_hdr, 0, flags);
49 	if (!hws_pool->decapl2_action)
50 		goto destroy_drop;
51 	hws_pool->remove_hdr_vlan_action =
52 		mlx5_fs_create_action_remove_header_vlan(ctx);
53 	if (!hws_pool->remove_hdr_vlan_action)
54 		goto destroy_decapl2;
55 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
56 				       MLX5HWS_ACTION_TYP_INSERT_HEADER);
57 	if (err)
58 		goto destroy_remove_hdr;
59 	err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
60 				       MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
61 	if (err)
62 		goto cleanup_insert_hdr;
63 	xa_init(&hws_pool->el2tol3tnl_pools);
64 	xa_init(&hws_pool->el2tol2tnl_pools);
65 	xa_init(&hws_pool->mh_pools);
66 	xa_init(&hws_pool->table_dests);
67 	xa_init(&hws_pool->vport_dests);
68 	xa_init(&hws_pool->vport_vhca_dests);
69 	return 0;
70 
71 cleanup_insert_hdr:
72 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
73 destroy_remove_hdr:
74 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
75 destroy_decapl2:
76 	mlx5hws_action_destroy(hws_pool->decapl2_action);
77 destroy_drop:
78 	mlx5hws_action_destroy(hws_pool->drop_action);
79 destroy_push_vlan:
80 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
81 destroy_pop_vlan:
82 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
83 destroy_tag:
84 	mlx5hws_action_destroy(hws_pool->tag_action);
85 	return err;
86 }
87 
mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context * fs_ctx)88 static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
89 {
90 	struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
91 	struct mlx5hws_action *action;
92 	struct mlx5_fs_pool *pool;
93 	unsigned long i;
94 
95 	xa_for_each(&hws_pool->vport_vhca_dests, i, action)
96 		mlx5hws_action_destroy(action);
97 	xa_destroy(&hws_pool->vport_vhca_dests);
98 	xa_for_each(&hws_pool->vport_dests, i, action)
99 		mlx5hws_action_destroy(action);
100 	xa_destroy(&hws_pool->vport_dests);
101 	xa_destroy(&hws_pool->table_dests);
102 	xa_for_each(&hws_pool->mh_pools, i, pool)
103 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
104 	xa_destroy(&hws_pool->mh_pools);
105 	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
106 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
107 	xa_destroy(&hws_pool->el2tol2tnl_pools);
108 	xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
109 		mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
110 	xa_destroy(&hws_pool->el2tol3tnl_pools);
111 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
112 	mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
113 	mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
114 	mlx5hws_action_destroy(hws_pool->decapl2_action);
115 	mlx5hws_action_destroy(hws_pool->drop_action);
116 	mlx5hws_action_destroy(hws_pool->push_vlan_action);
117 	mlx5hws_action_destroy(hws_pool->pop_vlan_action);
118 	mlx5hws_action_destroy(hws_pool->tag_action);
119 }
120 
mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace * ns)121 static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
122 {
123 	struct mlx5hws_context_attr hws_ctx_attr = {};
124 	int err;
125 
126 	hws_ctx_attr.queues = min_t(int, num_online_cpus(),
127 				    MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
128 	hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
129 
130 	ns->fs_hws_context.hws_ctx =
131 		mlx5hws_context_open(ns->dev, &hws_ctx_attr);
132 	if (!ns->fs_hws_context.hws_ctx) {
133 		mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
134 		return -EINVAL;
135 	}
136 	err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
137 	if (err) {
138 		mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
139 		mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
140 		return err;
141 	}
142 	return 0;
143 }
144 
mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace * ns)145 static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
146 {
147 	mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
148 	return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
149 }
150 
mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)151 static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
152 				 struct mlx5_flow_root_namespace *peer_ns,
153 				 u16 peer_vhca_id)
154 {
155 	struct mlx5hws_context *peer_ctx = NULL;
156 
157 	if (peer_ns)
158 		peer_ctx = peer_ns->fs_hws_context.hws_ctx;
159 	mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
160 				 peer_vhca_id);
161 	return 0;
162 }
163 
mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)164 static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
165 				       struct mlx5_flow_table *ft,
166 				       struct mlx5_flow_table *next_ft)
167 {
168 	struct mlx5hws_table *next_tbl;
169 	int err;
170 
171 	if (!ns->fs_hws_context.hws_ctx)
172 		return -EINVAL;
173 
174 	/* if no change required, return */
175 	if (!next_ft && !ft->fs_hws_table.miss_ft_set)
176 		return 0;
177 
178 	next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
179 	err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
180 	if (err) {
181 		mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
182 		return err;
183 	}
184 	ft->fs_hws_table.miss_ft_set = !!next_tbl;
185 	return 0;
186 }
187 
mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)188 static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
189 					      struct mlx5_flow_table *ft)
190 {
191 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
192 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
193 	struct mlx5hws_action *dest_ft_action;
194 	struct xarray *dests_xa;
195 	int err;
196 
197 	dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
198 							      ft->id, flags);
199 	if (!dest_ft_action) {
200 		mlx5_core_err(ns->dev, "Failed creating dest table action\n");
201 		return -ENOMEM;
202 	}
203 
204 	dests_xa = &fs_ctx->hws_pool.table_dests;
205 	err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
206 	if (err)
207 		mlx5hws_action_destroy(dest_ft_action);
208 	return err;
209 }
210 
mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)211 static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
212 					      struct mlx5_flow_table *ft)
213 {
214 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
215 	struct mlx5hws_action *dest_ft_action;
216 	struct xarray *dests_xa;
217 	int err;
218 
219 	dests_xa = &fs_ctx->hws_pool.table_dests;
220 	dest_ft_action = xa_erase(dests_xa, ft->id);
221 	if (!dest_ft_action) {
222 		mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
223 		return -ENOENT;
224 	}
225 
226 	err = mlx5hws_action_destroy(dest_ft_action);
227 	if (err)
228 		mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
229 	return err;
230 }
231 
mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)232 static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
233 					  struct mlx5_flow_table *ft,
234 					  struct mlx5_flow_table_attr *ft_attr,
235 					  struct mlx5_flow_table *next_ft)
236 {
237 	struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
238 	struct mlx5hws_table_attr tbl_attr = {};
239 	struct mlx5hws_table *tbl;
240 	int err;
241 
242 	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
243 		err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
244 								   next_ft);
245 		if (err)
246 			return err;
247 		err = mlx5_fs_add_flow_table_dest_action(ns, ft);
248 		if (err)
249 			mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
250 		return err;
251 	}
252 
253 	if (ns->table_type != FS_FT_FDB) {
254 		mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
255 			      ns->table_type);
256 		return -EOPNOTSUPP;
257 	}
258 
259 	tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
260 	tbl_attr.level = ft_attr->level;
261 	tbl = mlx5hws_table_create(ctx, &tbl_attr);
262 	if (!tbl) {
263 		mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
264 		return -EINVAL;
265 	}
266 
267 	ft->fs_hws_table.hws_table = tbl;
268 	ft->id = mlx5hws_table_get_id(tbl);
269 
270 	if (next_ft) {
271 		err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
272 		if (err)
273 			goto destroy_table;
274 	}
275 
276 	ft->max_fte = INT_MAX;
277 
278 	err = mlx5_fs_add_flow_table_dest_action(ns, ft);
279 	if (err)
280 		goto clear_ft_miss;
281 	return 0;
282 
283 clear_ft_miss:
284 	mlx5_fs_set_ft_default_miss(ns, ft, NULL);
285 destroy_table:
286 	mlx5hws_table_destroy(tbl);
287 	ft->fs_hws_table.hws_table = NULL;
288 	return err;
289 }
290 
mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)291 static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
292 					   struct mlx5_flow_table *ft)
293 {
294 	int err;
295 
296 	err = mlx5_fs_del_flow_table_dest_action(ns, ft);
297 	if (err)
298 		mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
299 
300 	if (mlx5_fs_cmd_is_fw_term_table(ft))
301 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
302 
303 	err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
304 	if (err)
305 		mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
306 
307 	err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
308 	if (err)
309 		mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
310 
311 	return err;
312 }
313 
mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)314 static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
315 					  struct mlx5_flow_table *ft,
316 					  struct mlx5_flow_table *next_ft)
317 {
318 	if (mlx5_fs_cmd_is_fw_term_table(ft))
319 		return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
320 
321 	return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
322 }
323 
mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)324 static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
325 				       struct mlx5_flow_table *ft,
326 				       u32 underlay_qpn,
327 				       bool disconnect)
328 {
329 	return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
330 							 disconnect);
331 }
332 
mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)333 static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
334 					  struct mlx5_flow_table *ft, u32 *in,
335 					  struct mlx5_flow_group *fg)
336 {
337 	struct mlx5hws_match_parameters mask;
338 	struct mlx5hws_bwc_matcher *matcher;
339 	u8 match_criteria_enable;
340 	u32 priority;
341 
342 	if (mlx5_fs_cmd_is_fw_term_table(ft))
343 		return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
344 
345 	mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
346 	mask.match_sz = sizeof(fg->mask.match_criteria);
347 
348 	match_criteria_enable = MLX5_GET(create_flow_group_in, in,
349 					 match_criteria_enable);
350 	priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
351 	matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
352 					     priority, match_criteria_enable,
353 					     &mask);
354 	if (!matcher) {
355 		mlx5_core_err(ns->dev, "Failed creating matcher\n");
356 		return -EINVAL;
357 	}
358 
359 	fg->fs_hws_matcher.matcher = matcher;
360 	return 0;
361 }
362 
mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)363 static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
364 					   struct mlx5_flow_table *ft,
365 					   struct mlx5_flow_group *fg)
366 {
367 	if (mlx5_fs_cmd_is_fw_term_table(ft))
368 		return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
369 
370 	return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
371 }
372 
373 static struct mlx5hws_action *
mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)374 mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
375 			   struct mlx5_flow_rule *dst)
376 {
377 	return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
378 }
379 
380 static struct mlx5hws_action *
mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)381 mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
382 				  struct mlx5_flow_rule *dst)
383 {
384 	u32 table_num = dst->dest_attr.ft_num;
385 
386 	return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
387 }
388 
389 static struct mlx5hws_action *
mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst)390 mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
391 				     struct mlx5_flow_rule *dst)
392 {
393 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
394 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
395 	u32 table_num = dst->dest_attr.ft_num;
396 
397 	return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
398 }
399 
400 static struct mlx5hws_action *
mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_flow_rule * dst,bool is_dest_type_uplink)401 mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
402 			      struct mlx5_flow_rule *dst,
403 			      bool is_dest_type_uplink)
404 {
405 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
406 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
407 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
408 	struct mlx5hws_action *dest;
409 	struct xarray *dests_xa;
410 	bool vhca_id_valid;
411 	unsigned long idx;
412 	u16 vport_num;
413 	int err;
414 
415 	vhca_id_valid = is_dest_type_uplink ||
416 			(dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
417 	vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
418 	if (vhca_id_valid) {
419 		dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
420 		idx = (unsigned long)dest_attr->vport.vhca_id << 16 | vport_num;
421 	} else {
422 		dests_xa = &fs_ctx->hws_pool.vport_dests;
423 		idx = vport_num;
424 	}
425 dest_load:
426 	dest = xa_load(dests_xa, idx);
427 	if (dest)
428 		return dest;
429 
430 	dest = mlx5hws_action_create_dest_vport(ctx, vport_num,	vhca_id_valid,
431 						dest_attr->vport.vhca_id, flags);
432 
433 	err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
434 	if (err) {
435 		mlx5hws_action_destroy(dest);
436 		dest = NULL;
437 
438 		if (err == -EBUSY)
439 			/* xarray entry was already stored by another thread */
440 			goto dest_load;
441 	}
442 
443 	return dest;
444 }
445 
446 static struct mlx5hws_action *
mlx5_fs_create_dest_action_range(struct mlx5hws_context * ctx,struct mlx5_flow_rule * dst)447 mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
448 				 struct mlx5_flow_rule *dst)
449 {
450 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
451 	struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
452 
453 	return mlx5hws_action_create_dest_match_range(ctx,
454 						      dest_attr->range.field,
455 						      dest_attr->range.hit_ft,
456 						      dest_attr->range.miss_ft,
457 						      dest_attr->range.min,
458 						      dest_attr->range.max,
459 						      flags);
460 }
461 
462 static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context * ctx,struct mlx5hws_action_dest_attr * dests,u32 num_of_dests,bool ignore_flow_level,u32 flow_source)463 mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
464 				 struct mlx5hws_action_dest_attr *dests,
465 				 u32 num_of_dests, bool ignore_flow_level,
466 				 u32 flow_source)
467 {
468 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
469 
470 	return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
471 						ignore_flow_level,
472 						flow_source, flags);
473 }
474 
475 static struct mlx5hws_action *
mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context * fs_ctx)476 mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
477 {
478 	return fs_ctx->hws_pool.push_vlan_action;
479 }
480 
mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan * vlan)481 static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
482 {
483 	u16 n_ethtype = vlan->ethtype;
484 	u8 prio = vlan->prio;
485 	u16 vid = vlan->vid;
486 
487 	return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
488 }
489 
490 static struct mlx5hws_action *
mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context * fs_ctx)491 mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
492 {
493 	return fs_ctx->hws_pool.pop_vlan_action;
494 }
495 
496 static struct mlx5hws_action *
mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context * fs_ctx)497 mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
498 {
499 	return fs_ctx->hws_pool.decapl2_action;
500 }
501 
502 static struct mlx5hws_action *
mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context * fs_ctx)503 mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
504 {
505 	return fs_ctx->hws_pool.drop_action;
506 }
507 
508 static struct mlx5hws_action *
mlx5_fs_get_action_tag(struct mlx5_fs_hws_context * fs_ctx)509 mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
510 {
511 	return fs_ctx->hws_pool.tag_action;
512 }
513 
514 static struct mlx5hws_action *
mlx5_fs_create_action_last(struct mlx5hws_context * ctx)515 mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
516 {
517 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
518 
519 	return mlx5hws_action_create_last(ctx, flags);
520 }
521 
mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action * fs_action)522 static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
523 {
524 	switch (mlx5hws_action_get_type(fs_action->action)) {
525 	case MLX5HWS_ACTION_TYP_CTR:
526 		mlx5_fc_put_hws_action(fs_action->counter);
527 		break;
528 	default:
529 		mlx5hws_action_destroy(fs_action->action);
530 	}
531 }
532 
533 static void
mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action ** fs_actions,int * num_fs_actions)534 mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
535 			   int *num_fs_actions)
536 {
537 	int i;
538 
539 	/* Free in reverse order to handle action dependencies */
540 	for (i = *num_fs_actions - 1; i >= 0; i--)
541 		mlx5_fs_destroy_fs_action(*fs_actions + i);
542 	*num_fs_actions = 0;
543 	kfree(*fs_actions);
544 	*fs_actions = NULL;
545 }
546 
547 /* Splits FTE's actions into cached, rule and destination actions.
548  * The cached and destination actions are saved on the fte hws rule.
549  * The rule actions are returned as a parameter, together with their count.
550  * We want to support a rule with 32 destinations, which means we need to
551  * account for 32 destinations plus usually a counter plus one more action
552  * for a multi-destination flow table.
553  * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
554  */
555 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte,struct mlx5hws_rule_action ** ractions)556 static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
557 				       struct mlx5_flow_table *ft,
558 				       struct mlx5_flow_group *group,
559 				       struct fs_fte *fte,
560 				       struct mlx5hws_rule_action **ractions)
561 {
562 	struct mlx5_flow_act *fte_action = &fte->act_dests.action;
563 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
564 	struct mlx5hws_action_dest_attr *dest_actions;
565 	struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
566 	struct mlx5_fs_hws_rule_action *fs_actions;
567 	struct mlx5_core_dev *dev = ns->dev;
568 	struct mlx5hws_action *dest_action;
569 	struct mlx5hws_action *tmp_action;
570 	struct mlx5_fs_hws_pr *pr_data;
571 	struct mlx5_fs_hws_mh *mh_data;
572 	bool delay_encap_set = false;
573 	struct mlx5_flow_rule *dst;
574 	int num_dest_actions = 0;
575 	int num_fs_actions = 0;
576 	int num_actions = 0;
577 	int err;
578 
579 	*ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
580 			    GFP_KERNEL);
581 	if (!*ractions) {
582 		err = -ENOMEM;
583 		goto out_err;
584 	}
585 
586 	fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
587 			     sizeof(*fs_actions), GFP_KERNEL);
588 	if (!fs_actions) {
589 		err = -ENOMEM;
590 		goto free_actions_alloc;
591 	}
592 
593 	dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
594 			       sizeof(*dest_actions), GFP_KERNEL);
595 	if (!dest_actions) {
596 		err = -ENOMEM;
597 		goto free_fs_actions_alloc;
598 	}
599 
600 	/* The order of the actions are must to be kept, only the following
601 	 * order is supported by HW steering:
602 	 * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
603 	 *      -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
604 	 *      -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
605 	 */
606 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
607 		tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
608 		if (!tmp_action) {
609 			err = -ENOMEM;
610 			goto free_dest_actions_alloc;
611 		}
612 		(*ractions)[num_actions++].action = tmp_action;
613 	}
614 
615 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
616 		int reformat_type = fte_action->pkt_reformat->reformat_type;
617 
618 		if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
619 			mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
620 			err = -EINVAL;
621 			goto free_actions;
622 		}
623 
624 		if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
625 			pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
626 			(*ractions)[num_actions].reformat.offset = pr_data->offset;
627 			(*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
628 			(*ractions)[num_actions].reformat.data = pr_data->data;
629 			(*ractions)[num_actions++].action =
630 				fte_action->pkt_reformat->fs_hws_action.hws_action;
631 		} else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
632 			(*ractions)[num_actions++].action =
633 				fte_action->pkt_reformat->fs_hws_action.hws_action;
634 		} else {
635 			delay_encap_set = true;
636 		}
637 	}
638 
639 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
640 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
641 		if (!tmp_action) {
642 			err = -ENOMEM;
643 			goto free_actions;
644 		}
645 		(*ractions)[num_actions++].action = tmp_action;
646 	}
647 
648 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
649 		tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
650 		if (!tmp_action) {
651 			err = -ENOMEM;
652 			goto free_actions;
653 		}
654 		(*ractions)[num_actions++].action = tmp_action;
655 	}
656 
657 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
658 		mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
659 		(*ractions)[num_actions].modify_header.offset = mh_data->offset;
660 		(*ractions)[num_actions].modify_header.data = mh_data->data;
661 		(*ractions)[num_actions++].action =
662 			fte_action->modify_hdr->fs_hws_action.hws_action;
663 	}
664 
665 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
666 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
667 		if (!tmp_action) {
668 			err = -ENOMEM;
669 			goto free_actions;
670 		}
671 		(*ractions)[num_actions].push_vlan.vlan_hdr =
672 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
673 		(*ractions)[num_actions++].action = tmp_action;
674 	}
675 
676 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
677 		tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
678 		if (!tmp_action) {
679 			err = -ENOMEM;
680 			goto free_actions;
681 		}
682 		(*ractions)[num_actions].push_vlan.vlan_hdr =
683 			htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
684 		(*ractions)[num_actions++].action = tmp_action;
685 	}
686 
687 	if (delay_encap_set) {
688 		pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
689 		(*ractions)[num_actions].reformat.offset = pr_data->offset;
690 		(*ractions)[num_actions].reformat.data = pr_data->data;
691 		(*ractions)[num_actions++].action =
692 			fte_action->pkt_reformat->fs_hws_action.hws_action;
693 	}
694 
695 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
696 		list_for_each_entry(dst, &fte->node.children, node.list) {
697 			struct mlx5_fc *counter;
698 
699 			if (dst->dest_attr.type !=
700 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
701 				continue;
702 
703 			if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
704 				err = -EOPNOTSUPP;
705 				goto free_actions;
706 			}
707 
708 			counter = dst->dest_attr.counter;
709 			tmp_action = mlx5_fc_get_hws_action(ctx, counter);
710 			if (!tmp_action) {
711 				err = -EINVAL;
712 				goto free_actions;
713 			}
714 
715 			(*ractions)[num_actions].counter.offset =
716 				mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
717 			(*ractions)[num_actions++].action = tmp_action;
718 			fs_actions[num_fs_actions].action = tmp_action;
719 			fs_actions[num_fs_actions++].counter = counter;
720 		}
721 	}
722 
723 	if (fte->act_dests.flow_context.flow_tag) {
724 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
725 			err = -EOPNOTSUPP;
726 			goto free_actions;
727 		}
728 		tmp_action = mlx5_fs_get_action_tag(fs_ctx);
729 		if (!tmp_action) {
730 			err = -ENOMEM;
731 			goto free_actions;
732 		}
733 		(*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
734 		(*ractions)[num_actions++].action = tmp_action;
735 	}
736 
737 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
738 		err = -EOPNOTSUPP;
739 		goto free_actions;
740 	}
741 
742 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
743 		dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
744 		if (!dest_action) {
745 			err = -ENOMEM;
746 			goto free_actions;
747 		}
748 		dest_actions[num_dest_actions++].dest = dest_action;
749 	}
750 
751 	if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
752 		list_for_each_entry(dst, &fte->node.children, node.list) {
753 			struct mlx5_flow_destination *attr = &dst->dest_attr;
754 			bool type_uplink =
755 				attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
756 
757 			if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
758 			    num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
759 				err = -EOPNOTSUPP;
760 				goto free_actions;
761 			}
762 			if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
763 				continue;
764 
765 			switch (attr->type) {
766 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
767 				dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
768 				break;
769 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
770 				dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
771 										dst);
772 				if (dest_action)
773 					break;
774 				dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
775 										   dst);
776 				fs_actions[num_fs_actions++].action = dest_action;
777 				break;
778 			case MLX5_FLOW_DESTINATION_TYPE_RANGE:
779 				dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
780 				fs_actions[num_fs_actions++].action = dest_action;
781 				break;
782 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
783 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
784 				dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
785 									    type_uplink);
786 				break;
787 			default:
788 				err = -EOPNOTSUPP;
789 				goto free_actions;
790 			}
791 			if (!dest_action) {
792 				err = -ENOMEM;
793 				goto free_actions;
794 			}
795 			dest_actions[num_dest_actions++].dest = dest_action;
796 		}
797 	}
798 
799 	if (num_dest_actions == 1) {
800 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
801 			err = -EOPNOTSUPP;
802 			goto free_actions;
803 		}
804 		(*ractions)[num_actions++].action = dest_actions->dest;
805 	} else if (num_dest_actions > 1) {
806 		u32 flow_source = fte->act_dests.flow_context.flow_source;
807 		bool ignore_flow_level;
808 
809 		if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
810 		    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
811 			err = -EOPNOTSUPP;
812 			goto free_actions;
813 		}
814 		ignore_flow_level =
815 			!!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
816 		tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
817 							      num_dest_actions,
818 							      ignore_flow_level,
819 							      flow_source);
820 		if (!tmp_action) {
821 			err = -EOPNOTSUPP;
822 			goto free_actions;
823 		}
824 		fs_actions[num_fs_actions++].action = tmp_action;
825 		(*ractions)[num_actions++].action = tmp_action;
826 	}
827 
828 	if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
829 	    num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
830 		err = -EOPNOTSUPP;
831 		goto free_actions;
832 	}
833 
834 	tmp_action = mlx5_fs_create_action_last(ctx);
835 	if (!tmp_action) {
836 		err = -ENOMEM;
837 		goto free_actions;
838 	}
839 	fs_actions[num_fs_actions++].action = tmp_action;
840 	(*ractions)[num_actions++].action = tmp_action;
841 
842 	kfree(dest_actions);
843 
844 	/* Actions created specifically for this rule will be destroyed
845 	 * once rule is deleted.
846 	 */
847 	fte->fs_hws_rule.num_fs_actions = num_fs_actions;
848 	fte->fs_hws_rule.hws_fs_actions = fs_actions;
849 
850 	return 0;
851 
852 free_actions:
853 	mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
854 free_dest_actions_alloc:
855 	kfree(dest_actions);
856 free_fs_actions_alloc:
857 	kfree(fs_actions);
858 free_actions_alloc:
859 	kfree(*ractions);
860 	*ractions = NULL;
861 out_err:
862 	return err;
863 }
864 
mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)865 static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
866 				   struct mlx5_flow_table *ft,
867 				   struct mlx5_flow_group *group,
868 				   struct fs_fte *fte)
869 {
870 	struct mlx5hws_match_parameters params;
871 	struct mlx5hws_rule_action *ractions;
872 	struct mlx5hws_bwc_rule *rule;
873 	int err = 0;
874 
875 	if (mlx5_fs_cmd_is_fw_term_table(ft)) {
876 		/* Packet reformat on terminamtion table not supported yet */
877 		if (fte->act_dests.action.action &
878 		    MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
879 			return -EOPNOTSUPP;
880 		return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
881 	}
882 
883 	err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
884 	if (err)
885 		goto out_err;
886 
887 	params.match_sz = sizeof(fte->val);
888 	params.match_buf = fte->val;
889 
890 	rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
891 				       fte->act_dests.flow_context.flow_source,
892 				       ractions);
893 	kfree(ractions);
894 	if (!rule) {
895 		err = -EINVAL;
896 		goto free_actions;
897 	}
898 
899 	fte->fs_hws_rule.bwc_rule = rule;
900 	return 0;
901 
902 free_actions:
903 	mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
904 				   &fte->fs_hws_rule.num_fs_actions);
905 out_err:
906 	mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
907 	return err;
908 }
909 
mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)910 static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
911 				   struct mlx5_flow_table *ft,
912 				   struct fs_fte *fte)
913 {
914 	struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
915 	int err;
916 
917 	if (mlx5_fs_cmd_is_fw_term_table(ft))
918 		return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
919 
920 	err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
921 	rule->bwc_rule = NULL;
922 
923 	mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
924 
925 	return err;
926 }
927 
mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)928 static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
929 				   struct mlx5_flow_table *ft,
930 				   struct mlx5_flow_group *group,
931 				   int modify_mask,
932 				   struct fs_fte *fte)
933 {
934 	int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
935 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
936 		BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
937 	struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
938 	struct mlx5hws_rule_action *ractions;
939 	int saved_num_fs_actions;
940 	int ret;
941 
942 	if (mlx5_fs_cmd_is_fw_term_table(ft))
943 		return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
944 							     modify_mask, fte);
945 
946 	if ((modify_mask & ~allowed_mask) != 0)
947 		return -EINVAL;
948 
949 	saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
950 	saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
951 
952 	ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
953 	if (ret)
954 		return ret;
955 
956 	ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
957 	kfree(ractions);
958 	if (ret)
959 		goto restore_actions;
960 
961 	mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
962 	return ret;
963 
964 restore_actions:
965 	mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
966 				   &fte->fs_hws_rule.num_fs_actions);
967 	fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
968 	fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
969 	return ret;
970 }
971 
972 static struct mlx5hws_action *
mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context * ctx)973 mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
974 {
975 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
976 	struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
977 
978 	/* MAC anchor not supported in HWS reformat, use VLAN anchor */
979 	remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
980 	remove_hdr_vlan.offset = 0;
981 	remove_hdr_vlan.size = sizeof(struct vlan_hdr);
982 	return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
983 }
984 
985 static struct mlx5hws_action *
mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context * fs_ctx,struct mlx5_pkt_reformat_params * params)986 mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
987 				      struct mlx5_pkt_reformat_params *params)
988 {
989 	if (!params ||
990 	    params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
991 	    params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
992 	    params->size != sizeof(struct vlan_hdr))
993 		return NULL;
994 
995 	return fs_ctx->hws_pool.remove_hdr_vlan_action;
996 }
997 
998 static int
mlx5_fs_verify_insert_header_params(struct mlx5_core_dev * mdev,struct mlx5_pkt_reformat_params * params)999 mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
1000 				    struct mlx5_pkt_reformat_params *params)
1001 {
1002 	if ((!params->data && params->size) || (params->data && !params->size) ||
1003 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
1004 	    MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
1005 		mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
1006 		return -EINVAL;
1007 	}
1008 	if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
1009 	    params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
1010 	    params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
1011 		mlx5_core_err(mdev, "Only vlan insert header supported\n");
1012 		return -EOPNOTSUPP;
1013 	}
1014 	return 0;
1015 }
1016 
1017 static int
mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params)1018 mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
1019 				  struct mlx5_pkt_reformat_params *params)
1020 {
1021 	if (params->param_0 || params->param_1) {
1022 		mlx5_core_err(dev, "Invalid reformat params\n");
1023 		return -EINVAL;
1024 	}
1025 	return 0;
1026 }
1027 
1028 static struct mlx5_fs_pool *
mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev * dev,struct xarray * pr_pools,enum mlx5hws_action_type reformat_type,size_t size)1029 mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
1030 			  enum mlx5hws_action_type reformat_type, size_t size)
1031 {
1032 	struct mlx5_fs_pool *pr_pool;
1033 	unsigned long index = size;
1034 	int err;
1035 
1036 	pr_pool = xa_load(pr_pools, index);
1037 	if (pr_pool)
1038 		return pr_pool;
1039 
1040 	pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
1041 	if (!pr_pool)
1042 		return ERR_PTR(-ENOMEM);
1043 	err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
1044 	if (err)
1045 		goto free_pr_pool;
1046 	err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
1047 	if (err)
1048 		goto cleanup_pr_pool;
1049 	return pr_pool;
1050 
1051 cleanup_pr_pool:
1052 	mlx5_fs_hws_pr_pool_cleanup(pr_pool);
1053 free_pr_pool:
1054 	kfree(pr_pool);
1055 	return ERR_PTR(err);
1056 }
1057 
1058 static void
mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool * pool,struct xarray * pr_pools,unsigned long index)1059 mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
1060 			unsigned long index)
1061 {
1062 	xa_erase(pr_pools, index);
1063 	mlx5_fs_hws_pr_pool_cleanup(pool);
1064 	kfree(pool);
1065 }
1066 
1067 static int
mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)1068 mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
1069 				   struct mlx5_pkt_reformat_params *params,
1070 				   enum mlx5_flow_namespace_type namespace,
1071 				   struct mlx5_pkt_reformat *pkt_reformat)
1072 {
1073 	struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
1074 	struct mlx5_fs_hws_actions_pool *hws_pool;
1075 	struct mlx5hws_action *hws_action = NULL;
1076 	struct mlx5_fs_hws_pr *pr_data = NULL;
1077 	struct mlx5_fs_pool *pr_pool = NULL;
1078 	struct mlx5_core_dev *dev = ns->dev;
1079 	u8 hdr_idx = 0;
1080 	int err;
1081 
1082 	if (!params)
1083 		return -EINVAL;
1084 
1085 	hws_pool = &fs_ctx->hws_pool;
1086 
1087 	switch (params->type) {
1088 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1089 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1090 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1091 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1092 			return -EINVAL;
1093 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1094 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1095 						    params->size);
1096 		if (IS_ERR(pr_pool))
1097 			return PTR_ERR(pr_pool);
1098 		break;
1099 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1100 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1101 			return -EINVAL;
1102 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
1103 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
1104 						    params->size);
1105 		if (IS_ERR(pr_pool))
1106 			return PTR_ERR(pr_pool);
1107 		break;
1108 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1109 		if (mlx5_fs_verify_encap_decap_params(dev, params))
1110 			return -EINVAL;
1111 		pr_pool = &hws_pool->dl3tnltol2_pool;
1112 		hdr_idx = params->size == ETH_HLEN ?
1113 			  MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
1114 			  MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
1115 		break;
1116 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1117 		err = mlx5_fs_verify_insert_header_params(dev, params);
1118 		if (err)
1119 			return err;
1120 		pr_pool = &hws_pool->insert_hdr_pool;
1121 		break;
1122 	case MLX5_REFORMAT_TYPE_REMOVE_HDR:
1123 		hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
1124 		if (!hws_action)
1125 			mlx5_core_err(dev, "Only vlan remove header supported\n");
1126 		break;
1127 	default:
1128 		mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
1129 			      params->type);
1130 		return -EOPNOTSUPP;
1131 	}
1132 
1133 	if (pr_pool) {
1134 		pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
1135 		if (IS_ERR_OR_NULL(pr_data))
1136 			return !pr_data ? -EINVAL : PTR_ERR(pr_data);
1137 		hws_action = pr_data->bulk->hws_action;
1138 		if (!hws_action) {
1139 			mlx5_core_err(dev,
1140 				      "Failed allocating packet-reformat action\n");
1141 			err = -EINVAL;
1142 			goto release_pr;
1143 		}
1144 		pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
1145 		if (!pr_data->data) {
1146 			err = -ENOMEM;
1147 			goto release_pr;
1148 		}
1149 		pr_data->hdr_idx = hdr_idx;
1150 		pr_data->data_size = params->size;
1151 		pkt_reformat->fs_hws_action.pr_data = pr_data;
1152 	}
1153 
1154 	pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1155 	pkt_reformat->fs_hws_action.hws_action = hws_action;
1156 	return 0;
1157 
1158 release_pr:
1159 	if (pr_pool && pr_data)
1160 		mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1161 	return err;
1162 }
1163 
mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)1164 static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
1165 						 struct mlx5_pkt_reformat *pkt_reformat)
1166 {
1167 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1168 	struct mlx5_core_dev *dev = ns->dev;
1169 	struct mlx5_fs_hws_pr *pr_data;
1170 	struct mlx5_fs_pool *pr_pool;
1171 
1172 	if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
1173 		return;
1174 
1175 	if (!pkt_reformat->fs_hws_action.pr_data) {
1176 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1177 		return;
1178 	}
1179 	pr_data = pkt_reformat->fs_hws_action.pr_data;
1180 
1181 	switch (pkt_reformat->reformat_type) {
1182 	case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1183 	case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
1184 	case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
1185 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1186 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1187 						    pr_data->data_size);
1188 		break;
1189 	case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
1190 		pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
1191 						    MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
1192 						    pr_data->data_size);
1193 		break;
1194 	case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
1195 		pr_pool = &hws_pool->dl3tnltol2_pool;
1196 		break;
1197 	case MLX5_REFORMAT_TYPE_INSERT_HDR:
1198 		pr_pool = &hws_pool->insert_hdr_pool;
1199 		break;
1200 	default:
1201 		mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
1202 		return;
1203 	}
1204 	if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
1205 		mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
1206 		return;
1207 	}
1208 	kfree(pr_data->data);
1209 	mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
1210 	pkt_reformat->fs_hws_action.pr_data = NULL;
1211 }
1212 
1213 static struct mlx5_fs_pool *
mlx5_fs_create_mh_pool(struct mlx5_core_dev * dev,struct mlx5hws_action_mh_pattern * pattern,struct xarray * mh_pools,unsigned long index)1214 mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
1215 		       struct mlx5hws_action_mh_pattern *pattern,
1216 		       struct xarray *mh_pools, unsigned long index)
1217 {
1218 	struct mlx5_fs_pool *pool;
1219 	int err;
1220 
1221 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1222 	if (!pool)
1223 		return ERR_PTR(-ENOMEM);
1224 	err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
1225 	if (err)
1226 		goto free_pool;
1227 	err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
1228 	if (err)
1229 		goto cleanup_pool;
1230 	return pool;
1231 
1232 cleanup_pool:
1233 	mlx5_fs_hws_mh_pool_cleanup(pool);
1234 free_pool:
1235 	kfree(pool);
1236 	return ERR_PTR(err);
1237 }
1238 
1239 static void
mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool * pool,struct xarray * mh_pools,unsigned long index)1240 mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
1241 			unsigned long index)
1242 {
1243 	xa_erase(mh_pools, index);
1244 	mlx5_fs_hws_mh_pool_cleanup(pool);
1245 	kfree(pool);
1246 }
1247 
mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)1248 static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
1249 					    u8 namespace, u8 num_actions,
1250 					    void *modify_actions,
1251 					    struct mlx5_modify_hdr *modify_hdr)
1252 {
1253 	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
1254 	struct mlx5hws_action_mh_pattern pattern = {};
1255 	struct mlx5_fs_hws_mh *mh_data = NULL;
1256 	struct mlx5hws_action *hws_action;
1257 	struct mlx5_fs_pool *pool;
1258 	unsigned long i, cnt = 0;
1259 	bool known_pattern;
1260 	int err;
1261 
1262 	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1263 	pattern.data = modify_actions;
1264 
1265 	known_pattern = false;
1266 	xa_for_each(&hws_pool->mh_pools, i, pool) {
1267 		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
1268 			known_pattern = true;
1269 			break;
1270 		}
1271 		cnt++;
1272 	}
1273 
1274 	if (!known_pattern) {
1275 		pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
1276 					      &hws_pool->mh_pools, cnt);
1277 		if (IS_ERR(pool))
1278 			return PTR_ERR(pool);
1279 	}
1280 	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
1281 	if (IS_ERR(mh_data)) {
1282 		err = PTR_ERR(mh_data);
1283 		goto destroy_pool;
1284 	}
1285 	hws_action = mh_data->bulk->hws_action;
1286 	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
1287 	if (!mh_data->data) {
1288 		err = -ENOMEM;
1289 		goto release_mh;
1290 	}
1291 	modify_hdr->fs_hws_action.mh_data = mh_data;
1292 	modify_hdr->fs_hws_action.fs_pool = pool;
1293 	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
1294 	modify_hdr->fs_hws_action.hws_action = hws_action;
1295 
1296 	return 0;
1297 
1298 release_mh:
1299 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1300 destroy_pool:
1301 	if (!known_pattern)
1302 		mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
1303 	return err;
1304 }
1305 
mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1306 static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1307 					       struct mlx5_modify_hdr *modify_hdr)
1308 {
1309 	struct mlx5_fs_hws_mh *mh_data;
1310 	struct mlx5_fs_pool *pool;
1311 
1312 	if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
1313 		mlx5_core_err(ns->dev, "Failed release modify-header\n");
1314 		return;
1315 	}
1316 
1317 	mh_data = modify_hdr->fs_hws_action.mh_data;
1318 	kfree(mh_data->data);
1319 	pool = modify_hdr->fs_hws_action.fs_pool;
1320 	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
1321 	modify_hdr->fs_hws_action.mh_data = NULL;
1322 }
1323 
mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1324 static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
1325 					     u16 format_id, u32 *match_mask)
1326 {
1327 	return -EOPNOTSUPP;
1328 }
1329 
mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1330 static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1331 					      int definer_id)
1332 {
1333 	return -EOPNOTSUPP;
1334 }
1335 
mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1336 static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
1337 					 enum fs_flow_table_type ft_type)
1338 {
1339 	if (ft_type != FS_FT_FDB)
1340 		return 0;
1341 
1342 	return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
1343 	       MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
1344 	       MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
1345 }
1346 
mlx5_fs_hws_is_supported(struct mlx5_core_dev * dev)1347 bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
1348 {
1349 	return mlx5hws_is_supported(dev);
1350 }
1351 
1352 static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
1353 	.create_flow_table = mlx5_cmd_hws_create_flow_table,
1354 	.destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
1355 	.modify_flow_table = mlx5_cmd_hws_modify_flow_table,
1356 	.update_root_ft = mlx5_cmd_hws_update_root_ft,
1357 	.create_flow_group = mlx5_cmd_hws_create_flow_group,
1358 	.destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
1359 	.create_fte = mlx5_cmd_hws_create_fte,
1360 	.delete_fte = mlx5_cmd_hws_delete_fte,
1361 	.update_fte = mlx5_cmd_hws_update_fte,
1362 	.packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
1363 	.packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
1364 	.modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
1365 	.modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
1366 	.create_match_definer = mlx5_cmd_hws_create_match_definer,
1367 	.destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
1368 	.create_ns = mlx5_cmd_hws_create_ns,
1369 	.destroy_ns = mlx5_cmd_hws_destroy_ns,
1370 	.set_peer = mlx5_cmd_hws_set_peer,
1371 	.get_capabilities = mlx5_cmd_hws_get_capabilities,
1372 };
1373 
mlx5_fs_cmd_get_hws_cmds(void)1374 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
1375 {
1376 	return &mlx5_flow_cmds_hws;
1377 }
1378