1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste_v1.h"
7
8 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
9 [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
10 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
11 },
12 [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
13 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
14 },
15 [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
16 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
17 },
18 [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
19 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
20 },
21 [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
22 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
23 },
24 [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
25 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
26 },
27 [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
28 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
29 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
30 },
31 [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
32 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
33 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
34 },
35 [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
36 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
37 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
38 },
39 [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
40 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
41 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
42 },
43 [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
44 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
45 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
46 },
47 [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
48 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
49 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
50 },
51 [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
52 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
53 .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
54 },
55 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
56 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
57 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
58 },
59 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
60 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
61 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
62 },
63 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
64 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
65 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
66 },
67 [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
68 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
69 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
70 },
71 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
72 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
73 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
74 },
75 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
76 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
77 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
78 },
79 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
80 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
81 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
82 },
83 [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
84 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
85 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
86 },
87 [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
88 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
89 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
90 },
91 [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
92 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
93 .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
94 },
95 [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
96 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
97 },
98 [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
99 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
100 },
101 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
102 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
103 },
104 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
105 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
106 },
107 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
108 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
109 },
110 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
111 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
112 },
113 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
114 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
115 },
116 [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
117 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
118 },
119 [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
120 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
121 },
122 [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
123 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
124 },
125 [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
126 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
127 },
128 [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
129 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
130 },
131 [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
132 .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
133 },
134 };
135
dr_ste_v1_set_entry_type(u8 * hw_ste_p,u8 entry_type)136 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
137 {
138 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
139 }
140
dr_ste_v1_is_miss_addr_set(u8 * hw_ste_p)141 bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
142 {
143 u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
144
145 /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
146 * are part of the action, so they both set as part of STE init
147 */
148 return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
149 }
150
dr_ste_v1_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)151 void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
152 {
153 u64 index = miss_addr >> 6;
154
155 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
156 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
157 }
158
dr_ste_v1_get_miss_addr(u8 * hw_ste_p)159 u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
160 {
161 u64 index =
162 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
163 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
164
165 return index << 6;
166 }
167
dr_ste_v1_set_byte_mask(u8 * hw_ste_p,u16 byte_mask)168 void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
169 {
170 MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
171 }
172
dr_ste_v1_get_byte_mask(u8 * hw_ste_p)173 u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
174 {
175 return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
176 }
177
dr_ste_v1_set_lu_type(u8 * hw_ste_p,u16 lu_type)178 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
179 {
180 MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
181 MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
182 }
183
dr_ste_v1_set_next_lu_type(u8 * hw_ste_p,u16 lu_type)184 void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
185 {
186 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
187 MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
188 }
189
dr_ste_v1_get_next_lu_type(u8 * hw_ste_p)190 u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
191 {
192 u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
193 u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
194
195 return (mode << 8 | index);
196 }
197
dr_ste_v1_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)198 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
199 {
200 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
201 }
202
dr_ste_v1_set_hit_addr(u8 * hw_ste_p,u64 icm_addr,u32 ht_size)203 void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
204 {
205 u64 index = (icm_addr >> 5) | ht_size;
206
207 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
208 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
209 }
210
dr_ste_v1_init(u8 * hw_ste_p,u16 lu_type,bool is_rx,u16 gvmi)211 void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
212 {
213 dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
214 dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
215
216 MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
217 MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
218 MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
219 }
220
dr_ste_v1_prepare_for_postsend(u8 * hw_ste_p,u32 ste_size)221 void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
222 {
223 u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
224 u8 *mask = tag + DR_STE_SIZE_TAG;
225 u8 tmp_tag[DR_STE_SIZE_TAG] = {};
226
227 if (ste_size == DR_STE_SIZE_CTRL)
228 return;
229
230 WARN_ON(ste_size != DR_STE_SIZE);
231
232 /* Backup tag */
233 memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
234
235 /* Swap mask and tag both are the same size */
236 memcpy(tag, mask, DR_STE_SIZE_MASK);
237 memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
238 }
239
dr_ste_v1_set_rx_flow_tag(u8 * s_action,u32 flow_tag)240 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
241 {
242 MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
243 DR_STE_V1_ACTION_ID_FLOW_TAG);
244 MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
245 }
246
dr_ste_v1_set_counter_id(u8 * hw_ste_p,u32 ctr_id)247 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
248 {
249 MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
250 }
251
dr_ste_v1_set_reparse(u8 * hw_ste_p)252 void dr_ste_v1_set_reparse(u8 *hw_ste_p)
253 {
254 MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
255 }
256
dr_ste_v1_set_encap(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,int size)257 void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
258 {
259 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
260 DR_STE_V1_ACTION_ID_INSERT_POINTER);
261 /* The hardware expects here size in words (2 byte) */
262 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
263 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
264 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
265 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
266 dr_ste_v1_set_reparse(hw_ste_p);
267 }
268
dr_ste_v1_set_insert_hdr(u8 * hw_ste_p,u8 * d_action,u32 reformat_id,u8 anchor,u8 offset,int size)269 void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
270 u32 reformat_id,
271 u8 anchor, u8 offset,
272 int size)
273 {
274 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
275 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
276 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
277
278 /* The hardware expects here size and offset in words (2 byte) */
279 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
280 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
281
282 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
283 MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
284 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
285
286 dr_ste_v1_set_reparse(hw_ste_p);
287 }
288
dr_ste_v1_set_remove_hdr(u8 * hw_ste_p,u8 * s_action,u8 anchor,u8 offset,int size)289 void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
290 u8 anchor, u8 offset,
291 int size)
292 {
293 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
294 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
295 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
296
297 /* The hardware expects here size and offset in words (2 byte) */
298 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
299 MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
300
301 dr_ste_v1_set_reparse(hw_ste_p);
302 }
303
dr_ste_v1_set_push_vlan(u8 * hw_ste_p,u8 * d_action,u32 vlan_hdr)304 void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
305 {
306 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
307 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
308 /* The hardware expects offset to vlan header in words (2 byte) */
309 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
310 start_offset, HDR_LEN_L2_MACS >> 1);
311 MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
312 inline_data, vlan_hdr);
313
314 dr_ste_v1_set_reparse(hw_ste_p);
315 }
316
dr_ste_v1_set_pop_vlan(u8 * hw_ste_p,u8 * s_action,u8 vlans_num)317 void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
318 {
319 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
320 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
321 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
322 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
323 /* The hardware expects here size in words (2 byte) */
324 MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
325 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
326
327 dr_ste_v1_set_reparse(hw_ste_p);
328 }
329
dr_ste_v1_set_encap_l3(u8 * hw_ste_p,u8 * frst_s_action,u8 * scnd_d_action,u32 reformat_id,int size)330 void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
331 u32 reformat_id, int size)
332 {
333 /* Remove L2 headers */
334 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
335 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
336 MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
337 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
338
339 /* Encapsulate with given reformat ID */
340 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
341 DR_STE_V1_ACTION_ID_INSERT_POINTER);
342 /* The hardware expects here size in words (2 byte) */
343 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
344 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
345 MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
346 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
347
348 dr_ste_v1_set_reparse(hw_ste_p);
349 }
350
dr_ste_v1_set_rx_decap(u8 * hw_ste_p,u8 * s_action)351 void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
352 {
353 MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
354 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
355 MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
356 MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
357 MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
358 DR_STE_HEADER_ANCHOR_INNER_MAC);
359
360 dr_ste_v1_set_reparse(hw_ste_p);
361 }
362
dr_ste_v1_set_accelerated_rewrite_actions(u8 * hw_ste_p,u8 * d_action,u16 num_of_actions,u32 rewrite_pattern,u32 rewrite_args,u8 * action_data)363 static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p,
364 u8 *d_action,
365 u16 num_of_actions,
366 u32 rewrite_pattern,
367 u32 rewrite_args,
368 u8 *action_data)
369 {
370 if (action_data) {
371 memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE);
372 } else {
373 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
374 action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST);
375 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
376 modify_actions_pattern_pointer, rewrite_pattern);
377 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
378 number_of_modify_actions, num_of_actions);
379 MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
380 modify_actions_argument_pointer, rewrite_args);
381 }
382
383 dr_ste_v1_set_reparse(hw_ste_p);
384 }
385
dr_ste_v1_set_basic_rewrite_actions(u8 * hw_ste_p,u8 * s_action,u16 num_of_actions,u32 rewrite_index)386 static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p,
387 u8 *s_action,
388 u16 num_of_actions,
389 u32 rewrite_index)
390 {
391 MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
392 DR_STE_V1_ACTION_ID_MODIFY_LIST);
393 MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
394 num_of_actions);
395 MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
396 rewrite_index);
397
398 dr_ste_v1_set_reparse(hw_ste_p);
399 }
400
dr_ste_v1_set_rewrite_actions(u8 * hw_ste_p,u8 * action,u16 num_of_actions,u32 rewrite_pattern,u32 rewrite_args,u8 * action_data)401 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
402 u8 *action,
403 u16 num_of_actions,
404 u32 rewrite_pattern,
405 u32 rewrite_args,
406 u8 *action_data)
407 {
408 if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX)
409 return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p,
410 action,
411 num_of_actions,
412 rewrite_pattern,
413 rewrite_args,
414 action_data);
415
416 /* fall back to the code that doesn't support accelerated modify header */
417 return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p,
418 action,
419 num_of_actions,
420 rewrite_args);
421 }
422
dr_ste_v1_set_aso_flow_meter(u8 * d_action,u32 object_id,u32 offset,u8 dest_reg_id,u8 init_color)423 static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
424 u32 object_id,
425 u32 offset,
426 u8 dest_reg_id,
427 u8 init_color)
428 {
429 MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
430 DR_STE_V1_ACTION_ID_ASO);
431 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
432 object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
433 /* Convert reg_c index to HW 64bit index */
434 MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
435 (dest_reg_id - 1) / 2);
436 MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
437 DR_STE_V1_ASO_CTX_TYPE_POLICERS);
438 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
439 offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
440 MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
441 init_color);
442 }
443
dr_ste_v1_set_match_range_pkt_len(u8 * hw_ste_p,u32 definer_id,u32 min,u32 max)444 static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
445 u32 min, u32 max)
446 {
447 MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
448
449 /* When the STE will be sent, its mask and tags will be swapped in
450 * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
451 * which doesn't have mask, and shouldn't have mask/tag swapped.
452 * We're using the common utilities functions to send this STE, so need
453 * to allow for this swapping - place the values in the corresponding
454 * locations to allow flipping them when writing to ICM.
455 *
456 * min/max_value_2 corresponds to match_dw_0 in its definer.
457 * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
458 *
459 * Pkt len is 2 bytes that are stored in the higher section of the DW.
460 */
461 MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
462 MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
463 }
464
dr_ste_v1_arr_init_next_match(u8 ** last_ste,u32 * added_stes,u16 gvmi)465 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
466 u32 *added_stes,
467 u16 gvmi)
468 {
469 u8 *action;
470
471 (*added_stes)++;
472 *last_ste += DR_STE_SIZE;
473 dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
474 dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
475
476 action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
477 memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
478 }
479
dr_ste_v1_arr_init_next_match_range(u8 ** last_ste,u32 * added_stes,u16 gvmi)480 static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
481 u32 *added_stes,
482 u16 gvmi)
483 {
484 dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
485 dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
486 }
487
dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)488 void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
489 struct mlx5dr_domain *dmn,
490 u8 *action_type_set,
491 u32 actions_caps,
492 u8 *last_ste,
493 struct mlx5dr_ste_actions_attr *attr,
494 u32 *added_stes)
495 {
496 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
497 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
498 bool allow_modify_hdr = true;
499 bool allow_encap = true;
500
501 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
502 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
503 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
504 attr->gvmi);
505 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
506 last_ste, action);
507 action_sz = DR_STE_ACTION_TRIPLE_SZ;
508 }
509 ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
510 action_sz -= DR_STE_ACTION_SINGLE_SZ;
511 action += DR_STE_ACTION_SINGLE_SZ;
512
513 /* Check if vlan_pop and modify_hdr on same STE is supported */
514 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
515 allow_modify_hdr = false;
516 }
517
518 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
519 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
520 dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
521 attr->gvmi);
522 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
523 last_ste, action);
524 action_sz = DR_STE_ACTION_TRIPLE_SZ;
525 }
526 dr_ste_v1_set_rewrite_actions(last_ste, action,
527 attr->modify_actions,
528 attr->modify_pat_idx,
529 attr->modify_index,
530 attr->single_modify_action);
531 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
532 action += DR_STE_ACTION_DOUBLE_SZ;
533 allow_encap = false;
534 }
535
536 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
537 int i;
538
539 for (i = 0; i < attr->vlans.count; i++) {
540 if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
541 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
542 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
543 action_sz = DR_STE_ACTION_TRIPLE_SZ;
544 allow_encap = true;
545 }
546 ste_ctx->set_push_vlan(last_ste, action,
547 attr->vlans.headers[i]);
548 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
549 action += DR_STE_ACTION_DOUBLE_SZ;
550 }
551 }
552
553 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
554 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
555 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
556 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
557 action_sz = DR_STE_ACTION_TRIPLE_SZ;
558 allow_encap = true;
559 }
560 ste_ctx->set_encap(last_ste, action,
561 attr->reformat.id,
562 attr->reformat.size);
563 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
564 action += DR_STE_ACTION_DOUBLE_SZ;
565 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
566 u8 *d_action;
567
568 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
569 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
570 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
571 action_sz = DR_STE_ACTION_TRIPLE_SZ;
572 }
573 d_action = action + DR_STE_ACTION_SINGLE_SZ;
574
575 ste_ctx->set_encap_l3(last_ste,
576 action, d_action,
577 attr->reformat.id,
578 attr->reformat.size);
579 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
580 action += DR_STE_ACTION_TRIPLE_SZ;
581 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
582 if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
583 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
584 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
585 action_sz = DR_STE_ACTION_TRIPLE_SZ;
586 }
587 ste_ctx->set_insert_hdr(last_ste, action,
588 attr->reformat.id,
589 attr->reformat.param_0,
590 attr->reformat.param_1,
591 attr->reformat.size);
592 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
593 action += DR_STE_ACTION_DOUBLE_SZ;
594 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
595 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
596 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
597 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
598 action_sz = DR_STE_ACTION_TRIPLE_SZ;
599 }
600 ste_ctx->set_remove_hdr(last_ste, action,
601 attr->reformat.param_0,
602 attr->reformat.param_1,
603 attr->reformat.size);
604 action_sz -= DR_STE_ACTION_SINGLE_SZ;
605 action += DR_STE_ACTION_SINGLE_SZ;
606 }
607
608 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
609 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
610 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
611 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
612 action_sz = DR_STE_ACTION_TRIPLE_SZ;
613 }
614 dr_ste_v1_set_aso_flow_meter(action,
615 attr->aso_flow_meter.obj_id,
616 attr->aso_flow_meter.offset,
617 attr->aso_flow_meter.dest_reg_id,
618 attr->aso_flow_meter.init_color);
619 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
620 action += DR_STE_ACTION_DOUBLE_SZ;
621 }
622
623 if (action_type_set[DR_ACTION_TYP_RANGE]) {
624 /* match ranges requires a new STE of its own type */
625 dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
626 dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
627
628 /* we do not support setting any action on the match ranges STE */
629 action_sz = 0;
630
631 dr_ste_v1_set_match_range_pkt_len(last_ste,
632 attr->range.definer_id,
633 attr->range.min,
634 attr->range.max);
635 }
636
637 /* set counter ID on the last STE to adhere to DMFS behavior */
638 if (action_type_set[DR_ACTION_TYP_CTR])
639 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
640
641 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
642 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
643 }
644
dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u32 actions_caps,u8 * last_ste,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)645 void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
646 struct mlx5dr_domain *dmn,
647 u8 *action_type_set,
648 u32 actions_caps,
649 u8 *last_ste,
650 struct mlx5dr_ste_actions_attr *attr,
651 u32 *added_stes)
652 {
653 u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
654 u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
655 bool allow_modify_hdr = true;
656 bool allow_ctr = true;
657
658 if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
659 dr_ste_v1_set_rewrite_actions(last_ste, action,
660 attr->decap_actions,
661 attr->decap_pat_idx,
662 attr->decap_index,
663 NULL);
664 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
665 action += DR_STE_ACTION_DOUBLE_SZ;
666 allow_modify_hdr = false;
667 allow_ctr = false;
668 } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
669 ste_ctx->set_rx_decap(last_ste, action);
670 action_sz -= DR_STE_ACTION_SINGLE_SZ;
671 action += DR_STE_ACTION_SINGLE_SZ;
672 allow_modify_hdr = false;
673 allow_ctr = false;
674 }
675
676 if (action_type_set[DR_ACTION_TYP_TAG]) {
677 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
678 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
679 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
680 action_sz = DR_STE_ACTION_TRIPLE_SZ;
681 allow_modify_hdr = true;
682 allow_ctr = true;
683 }
684 dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
685 action_sz -= DR_STE_ACTION_SINGLE_SZ;
686 action += DR_STE_ACTION_SINGLE_SZ;
687 }
688
689 if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
690 if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
691 !allow_modify_hdr) {
692 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
693 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
694 action_sz = DR_STE_ACTION_TRIPLE_SZ;
695 }
696
697 ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
698 action_sz -= DR_STE_ACTION_SINGLE_SZ;
699 action += DR_STE_ACTION_SINGLE_SZ;
700 allow_ctr = false;
701
702 /* Check if vlan_pop and modify_hdr on same STE is supported */
703 if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
704 allow_modify_hdr = false;
705 }
706
707 if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
708 /* Modify header and decapsulation must use different STEs */
709 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
710 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
711 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
712 action_sz = DR_STE_ACTION_TRIPLE_SZ;
713 allow_modify_hdr = true;
714 allow_ctr = true;
715 }
716 dr_ste_v1_set_rewrite_actions(last_ste, action,
717 attr->modify_actions,
718 attr->modify_pat_idx,
719 attr->modify_index,
720 attr->single_modify_action);
721 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
722 action += DR_STE_ACTION_DOUBLE_SZ;
723 }
724
725 if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
726 int i;
727
728 for (i = 0; i < attr->vlans.count; i++) {
729 if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
730 !allow_modify_hdr) {
731 dr_ste_v1_arr_init_next_match(&last_ste,
732 added_stes,
733 attr->gvmi);
734 action = MLX5_ADDR_OF(ste_mask_and_match_v1,
735 last_ste, action);
736 action_sz = DR_STE_ACTION_TRIPLE_SZ;
737 }
738 ste_ctx->set_push_vlan(last_ste, action,
739 attr->vlans.headers[i]);
740 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
741 action += DR_STE_ACTION_DOUBLE_SZ;
742 }
743 }
744
745 if (action_type_set[DR_ACTION_TYP_CTR]) {
746 /* Counter action set after decap and before insert_hdr
747 * to exclude decaped / encaped header respectively.
748 */
749 if (!allow_ctr) {
750 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
751 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
752 action_sz = DR_STE_ACTION_TRIPLE_SZ;
753 allow_modify_hdr = true;
754 }
755 dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
756 allow_ctr = false;
757 }
758
759 if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
760 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
761 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
762 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
763 action_sz = DR_STE_ACTION_TRIPLE_SZ;
764 }
765 ste_ctx->set_encap(last_ste, action,
766 attr->reformat.id,
767 attr->reformat.size);
768 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
769 action += DR_STE_ACTION_DOUBLE_SZ;
770 allow_modify_hdr = false;
771 } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
772 u8 *d_action;
773
774 if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
775 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
776 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
777 action_sz = DR_STE_ACTION_TRIPLE_SZ;
778 }
779
780 d_action = action + DR_STE_ACTION_SINGLE_SZ;
781
782 ste_ctx->set_encap_l3(last_ste,
783 action, d_action,
784 attr->reformat.id,
785 attr->reformat.size);
786 action_sz -= DR_STE_ACTION_TRIPLE_SZ;
787 allow_modify_hdr = false;
788 } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
789 /* Modify header, decap, and encap must use different STEs */
790 if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
791 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
792 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
793 action_sz = DR_STE_ACTION_TRIPLE_SZ;
794 }
795 ste_ctx->set_insert_hdr(last_ste, action,
796 attr->reformat.id,
797 attr->reformat.param_0,
798 attr->reformat.param_1,
799 attr->reformat.size);
800 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
801 action += DR_STE_ACTION_DOUBLE_SZ;
802 allow_modify_hdr = false;
803 } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
804 if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
805 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
806 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
807 action_sz = DR_STE_ACTION_TRIPLE_SZ;
808 allow_modify_hdr = true;
809 allow_ctr = true;
810 }
811 ste_ctx->set_remove_hdr(last_ste, action,
812 attr->reformat.param_0,
813 attr->reformat.param_1,
814 attr->reformat.size);
815 action_sz -= DR_STE_ACTION_SINGLE_SZ;
816 action += DR_STE_ACTION_SINGLE_SZ;
817 }
818
819 if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
820 if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
821 dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
822 action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
823 action_sz = DR_STE_ACTION_TRIPLE_SZ;
824 }
825 dr_ste_v1_set_aso_flow_meter(action,
826 attr->aso_flow_meter.obj_id,
827 attr->aso_flow_meter.offset,
828 attr->aso_flow_meter.dest_reg_id,
829 attr->aso_flow_meter.init_color);
830 action_sz -= DR_STE_ACTION_DOUBLE_SZ;
831 action += DR_STE_ACTION_DOUBLE_SZ;
832 }
833
834 if (action_type_set[DR_ACTION_TYP_RANGE]) {
835 /* match ranges requires a new STE of its own type */
836 dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
837 dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
838
839 /* we do not support setting any action on the match ranges STE */
840 action_sz = 0;
841
842 dr_ste_v1_set_match_range_pkt_len(last_ste,
843 attr->range.definer_id,
844 attr->range.min,
845 attr->range.max);
846 }
847
848 dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
849 dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
850 }
851
dr_ste_v1_set_action_set(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)852 void dr_ste_v1_set_action_set(u8 *d_action,
853 u8 hw_field,
854 u8 shifter,
855 u8 length,
856 u32 data)
857 {
858 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
859 MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
860 MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
861 MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
862 MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
863 MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
864 }
865
dr_ste_v1_set_action_add(u8 * d_action,u8 hw_field,u8 shifter,u8 length,u32 data)866 void dr_ste_v1_set_action_add(u8 *d_action,
867 u8 hw_field,
868 u8 shifter,
869 u8 length,
870 u32 data)
871 {
872 shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
873 MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
874 MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
875 MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
876 MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
877 MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
878 }
879
dr_ste_v1_set_action_copy(u8 * d_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)880 void dr_ste_v1_set_action_copy(u8 *d_action,
881 u8 dst_hw_field,
882 u8 dst_shifter,
883 u8 dst_len,
884 u8 src_hw_field,
885 u8 src_shifter)
886 {
887 dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
888 src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
889 MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
890 MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
891 MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
892 MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
893 MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
894 MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
895 }
896
dr_ste_v1_set_action_decap_l3_list(void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)897 int dr_ste_v1_set_action_decap_l3_list(void *data,
898 u32 data_sz,
899 u8 *hw_action,
900 u32 hw_action_sz,
901 u16 *used_hw_action_num)
902 {
903 u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
904 void *data_ptr = padded_data;
905 u16 used_actions = 0;
906 u32 inline_data_sz;
907 u32 i;
908
909 if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
910 return -EINVAL;
911
912 inline_data_sz =
913 MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
914
915 /* Add an alignment padding */
916 memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
917
918 /* Remove L2L3 outer headers */
919 MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
920 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
921 MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
922 MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
923 MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
924 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
925 hw_action += DR_STE_ACTION_DOUBLE_SZ;
926 used_actions++; /* Remove and NOP are a single double action */
927
928 /* Point to the last dword of the header */
929 data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
930
931 /* Add the new header using inline action 4Byte at a time, the header
932 * is added in reversed order to the beginning of the packet to avoid
933 * incorrect parsing by the HW. Since header is 14B or 18B an extra
934 * two bytes are padded and later removed.
935 */
936 for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
937 void *addr_inline;
938
939 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
940 DR_STE_V1_ACTION_ID_INSERT_INLINE);
941 /* The hardware expects here offset to words (2 bytes) */
942 MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
943
944 /* Copy bytes one by one to avoid endianness problem */
945 addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
946 hw_action, inline_data);
947 memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
948 hw_action += DR_STE_ACTION_DOUBLE_SZ;
949 used_actions++;
950 }
951
952 /* Remove first 2 extra bytes */
953 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
954 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
955 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
956 /* The hardware expects here size in words (2 bytes) */
957 MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
958 used_actions++;
959
960 *used_hw_action_num = used_actions;
961
962 return 0;
963 }
964
dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)965 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
966 bool inner, u8 *bit_mask)
967 {
968 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
969
970 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
971 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
972
973 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
974 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
975
976 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
977 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
978 DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
979 DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
980
981 if (mask->cvlan_tag) {
982 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
983 mask->cvlan_tag = 0;
984 } else if (mask->svlan_tag) {
985 MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
986 mask->svlan_tag = 0;
987 }
988 }
989
dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)990 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
991 struct mlx5dr_ste_build *sb,
992 u8 *tag)
993 {
994 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
995
996 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
997 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
998
999 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
1000 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
1001
1002 if (spec->ip_version == IP_VERSION_IPV4) {
1003 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
1004 spec->ip_version = 0;
1005 } else if (spec->ip_version == IP_VERSION_IPV6) {
1006 MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
1007 spec->ip_version = 0;
1008 } else if (spec->ip_version) {
1009 return -EINVAL;
1010 }
1011
1012 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
1013 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
1014 DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
1015
1016 if (spec->cvlan_tag) {
1017 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1018 spec->cvlan_tag = 0;
1019 } else if (spec->svlan_tag) {
1020 MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1021 spec->svlan_tag = 0;
1022 }
1023 return 0;
1024 }
1025
dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1026 void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
1027 struct mlx5dr_match_param *mask)
1028 {
1029 dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1030
1031 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
1032 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1033 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
1034 }
1035
dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1036 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1037 struct mlx5dr_ste_build *sb,
1038 u8 *tag)
1039 {
1040 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1041
1042 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1043 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1044 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1045 DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1046
1047 return 0;
1048 }
1049
dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1050 void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1051 struct mlx5dr_match_param *mask)
1052 {
1053 dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1054
1055 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1056 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1057 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1058 }
1059
dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1060 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1061 struct mlx5dr_ste_build *sb,
1062 u8 *tag)
1063 {
1064 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1065
1066 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1067 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1068 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1069 DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1070
1071 return 0;
1072 }
1073
dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1074 void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1075 struct mlx5dr_match_param *mask)
1076 {
1077 dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1078
1079 sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1080 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1081 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1082 }
1083
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1084 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1085 struct mlx5dr_ste_build *sb,
1086 u8 *tag)
1087 {
1088 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1089
1090 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1091 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1092 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1093 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1094 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1095 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1096 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1097 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1098 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1099 DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1100
1101 if (spec->tcp_flags) {
1102 DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1103 spec->tcp_flags = 0;
1104 }
1105
1106 return 0;
1107 }
1108
dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1109 void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1110 struct mlx5dr_match_param *mask)
1111 {
1112 dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1113
1114 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1115 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1116 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1117 }
1118
dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1119 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1120 bool inner, u8 *bit_mask)
1121 {
1122 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1123 struct mlx5dr_match_misc *misc_mask = &value->misc;
1124
1125 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1126 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1127 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1128 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
1129 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
1130 DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1131
1132 if (mask->svlan_tag || mask->cvlan_tag) {
1133 MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1134 mask->cvlan_tag = 0;
1135 mask->svlan_tag = 0;
1136 }
1137
1138 if (inner) {
1139 if (misc_mask->inner_second_cvlan_tag ||
1140 misc_mask->inner_second_svlan_tag) {
1141 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1142 misc_mask->inner_second_cvlan_tag = 0;
1143 misc_mask->inner_second_svlan_tag = 0;
1144 }
1145
1146 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1147 second_vlan_id, misc_mask, inner_second_vid);
1148 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1149 second_cfi, misc_mask, inner_second_cfi);
1150 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1151 second_priority, misc_mask, inner_second_prio);
1152 } else {
1153 if (misc_mask->outer_second_cvlan_tag ||
1154 misc_mask->outer_second_svlan_tag) {
1155 MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1156 misc_mask->outer_second_cvlan_tag = 0;
1157 misc_mask->outer_second_svlan_tag = 0;
1158 }
1159
1160 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1161 second_vlan_id, misc_mask, outer_second_vid);
1162 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1163 second_cfi, misc_mask, outer_second_cfi);
1164 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1165 second_priority, misc_mask, outer_second_prio);
1166 }
1167 }
1168
dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)1169 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1170 bool inner, u8 *tag)
1171 {
1172 struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1173 struct mlx5dr_match_misc *misc_spec = &value->misc;
1174
1175 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1176 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1177 DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1178 DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1179 DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1180
1181 if (spec->ip_version == IP_VERSION_IPV4) {
1182 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1183 spec->ip_version = 0;
1184 } else if (spec->ip_version == IP_VERSION_IPV6) {
1185 MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1186 spec->ip_version = 0;
1187 } else if (spec->ip_version) {
1188 return -EINVAL;
1189 }
1190
1191 if (spec->cvlan_tag) {
1192 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1193 spec->cvlan_tag = 0;
1194 } else if (spec->svlan_tag) {
1195 MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1196 spec->svlan_tag = 0;
1197 }
1198
1199 if (inner) {
1200 if (misc_spec->inner_second_cvlan_tag) {
1201 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1202 misc_spec->inner_second_cvlan_tag = 0;
1203 } else if (misc_spec->inner_second_svlan_tag) {
1204 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1205 misc_spec->inner_second_svlan_tag = 0;
1206 }
1207
1208 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1209 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1210 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1211 } else {
1212 if (misc_spec->outer_second_cvlan_tag) {
1213 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1214 misc_spec->outer_second_cvlan_tag = 0;
1215 } else if (misc_spec->outer_second_svlan_tag) {
1216 MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1217 misc_spec->outer_second_svlan_tag = 0;
1218 }
1219 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1220 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1221 DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1222 }
1223
1224 return 0;
1225 }
1226
dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1227 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1228 bool inner, u8 *bit_mask)
1229 {
1230 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1231
1232 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1233 DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1234
1235 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1236 }
1237
dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1238 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1239 struct mlx5dr_ste_build *sb,
1240 u8 *tag)
1241 {
1242 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1243
1244 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1245 DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1246
1247 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1248 }
1249
dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1250 void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1251 struct mlx5dr_match_param *mask)
1252 {
1253 dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1254
1255 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1256 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1257 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1258 }
1259
dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1260 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1261 bool inner, u8 *bit_mask)
1262 {
1263 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1264
1265 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1266 DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1267
1268 dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1269 }
1270
dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1271 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1272 struct mlx5dr_ste_build *sb,
1273 u8 *tag)
1274 {
1275 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1276
1277 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1278 DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1279
1280 return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1281 }
1282
dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1283 void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1284 struct mlx5dr_match_param *mask)
1285 {
1286 dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1287
1288 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1289 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1290 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1291 }
1292
dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1293 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1294 bool inner, u8 *bit_mask)
1295 {
1296 struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1297 struct mlx5dr_match_misc *misc = &value->misc;
1298
1299 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1300 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1301 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1302 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1303 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1304 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1305 DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1306 DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1307
1308 if (misc->vxlan_vni) {
1309 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1310 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1311 misc->vxlan_vni = 0;
1312 }
1313
1314 if (mask->svlan_tag || mask->cvlan_tag) {
1315 MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1316 mask->cvlan_tag = 0;
1317 mask->svlan_tag = 0;
1318 }
1319 }
1320
dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1321 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1322 struct mlx5dr_ste_build *sb,
1323 u8 *tag)
1324 {
1325 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1326 struct mlx5dr_match_misc *misc = &value->misc;
1327
1328 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1329 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1330 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1331 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1332 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1333 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1334 DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1335
1336 if (misc->vxlan_vni) {
1337 MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1338 (misc->vxlan_vni << 8));
1339 misc->vxlan_vni = 0;
1340 }
1341
1342 if (spec->cvlan_tag) {
1343 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1344 spec->cvlan_tag = 0;
1345 } else if (spec->svlan_tag) {
1346 MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1347 spec->svlan_tag = 0;
1348 }
1349
1350 if (spec->ip_version == IP_VERSION_IPV4) {
1351 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1352 spec->ip_version = 0;
1353 } else if (spec->ip_version == IP_VERSION_IPV6) {
1354 MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1355 spec->ip_version = 0;
1356 } else if (spec->ip_version) {
1357 return -EINVAL;
1358 }
1359
1360 return 0;
1361 }
1362
dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1363 void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1364 struct mlx5dr_match_param *mask)
1365 {
1366 dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1367
1368 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1369 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1370 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1371 }
1372
dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1373 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1374 struct mlx5dr_ste_build *sb,
1375 u8 *tag)
1376 {
1377 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1378
1379 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1380 DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
1381
1382 return 0;
1383 }
1384
dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1385 void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1386 struct mlx5dr_match_param *mask)
1387 {
1388 dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1389
1390 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1391 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1392 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1393 }
1394
dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1395 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1396 struct mlx5dr_ste_build *sb,
1397 u8 *tag)
1398 {
1399 struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1400 struct mlx5dr_match_misc *misc = &value->misc;
1401
1402 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1403 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1404 DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1405 DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1406 DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1407 DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1408 DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1409 DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1410 DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1411
1412 if (sb->inner)
1413 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1414 else
1415 DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1416
1417 if (spec->tcp_flags) {
1418 DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1419 spec->tcp_flags = 0;
1420 }
1421
1422 return 0;
1423 }
1424
dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1425 void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1426 struct mlx5dr_match_param *mask)
1427 {
1428 dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1429
1430 sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1431 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1432 sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1433 }
1434
dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1435 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1436 struct mlx5dr_ste_build *sb,
1437 u8 *tag)
1438 {
1439 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1440
1441 if (sb->inner)
1442 DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1443 else
1444 DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1445
1446 return 0;
1447 }
1448
dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1449 void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1450 struct mlx5dr_match_param *mask)
1451 {
1452 dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1453
1454 sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1455 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1456 sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1457 }
1458
dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1459 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1460 struct mlx5dr_ste_build *sb,
1461 u8 *tag)
1462 {
1463 struct mlx5dr_match_misc *misc = &value->misc;
1464
1465 DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1466 DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1467 DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1468 DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1469
1470 DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1471 DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1472
1473 return 0;
1474 }
1475
dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1476 void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1477 struct mlx5dr_match_param *mask)
1478 {
1479 dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1480
1481 sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1482 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1483 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1484 }
1485
dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1486 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1487 struct mlx5dr_ste_build *sb,
1488 u8 *tag)
1489 {
1490 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1491
1492 if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1493 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1494 misc2, outer_first_mpls_over_gre_label);
1495
1496 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1497 misc2, outer_first_mpls_over_gre_exp);
1498
1499 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1500 misc2, outer_first_mpls_over_gre_s_bos);
1501
1502 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1503 misc2, outer_first_mpls_over_gre_ttl);
1504 } else {
1505 DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1506 misc2, outer_first_mpls_over_udp_label);
1507
1508 DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1509 misc2, outer_first_mpls_over_udp_exp);
1510
1511 DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1512 misc2, outer_first_mpls_over_udp_s_bos);
1513
1514 DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1515 misc2, outer_first_mpls_over_udp_ttl);
1516 }
1517
1518 return 0;
1519 }
1520
dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1521 void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1522 struct mlx5dr_match_param *mask)
1523 {
1524 dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1525
1526 sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1527 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1528 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1529 }
1530
dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1531 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1532 struct mlx5dr_ste_build *sb,
1533 u8 *tag)
1534 {
1535 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1536 u8 *parser_ptr;
1537 u8 parser_id;
1538 u32 mpls_hdr;
1539
1540 mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1541 misc2->outer_first_mpls_over_udp_label = 0;
1542 mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1543 misc2->outer_first_mpls_over_udp_exp = 0;
1544 mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1545 misc2->outer_first_mpls_over_udp_s_bos = 0;
1546 mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1547 misc2->outer_first_mpls_over_udp_ttl = 0;
1548
1549 parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1550 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1551 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1552
1553 return 0;
1554 }
1555
dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1556 void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1557 struct mlx5dr_match_param *mask)
1558 {
1559 dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1560
1561 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1562 * flex parsers_{0-3}/{4-7} respectively.
1563 */
1564 sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1565 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1566 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1567
1568 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1569 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1570 }
1571
dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1572 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1573 struct mlx5dr_ste_build *sb,
1574 u8 *tag)
1575 {
1576 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1577 u8 *parser_ptr;
1578 u8 parser_id;
1579 u32 mpls_hdr;
1580
1581 mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1582 misc2->outer_first_mpls_over_gre_label = 0;
1583 mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1584 misc2->outer_first_mpls_over_gre_exp = 0;
1585 mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1586 misc2->outer_first_mpls_over_gre_s_bos = 0;
1587 mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1588 misc2->outer_first_mpls_over_gre_ttl = 0;
1589
1590 parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1591 parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1592 *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1593
1594 return 0;
1595 }
1596
dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1597 void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1598 struct mlx5dr_match_param *mask)
1599 {
1600 dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1601
1602 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1603 * flex parsers_{0-3}/{4-7} respectively.
1604 */
1605 sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1606 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1607 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1608
1609 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1610 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1611 }
1612
dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1613 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1614 struct mlx5dr_ste_build *sb,
1615 u8 *tag)
1616 {
1617 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1618 bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1619 u32 *icmp_header_data;
1620 u8 *icmp_type;
1621 u8 *icmp_code;
1622
1623 if (is_ipv4) {
1624 icmp_header_data = &misc3->icmpv4_header_data;
1625 icmp_type = &misc3->icmpv4_type;
1626 icmp_code = &misc3->icmpv4_code;
1627 } else {
1628 icmp_header_data = &misc3->icmpv6_header_data;
1629 icmp_type = &misc3->icmpv6_type;
1630 icmp_code = &misc3->icmpv6_code;
1631 }
1632
1633 MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1634 MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1635 MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1636
1637 *icmp_header_data = 0;
1638 *icmp_type = 0;
1639 *icmp_code = 0;
1640
1641 return 0;
1642 }
1643
dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1644 void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1645 struct mlx5dr_match_param *mask)
1646 {
1647 dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1648
1649 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1650 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1651 sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1652 }
1653
dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1654 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1655 struct mlx5dr_ste_build *sb,
1656 u8 *tag)
1657 {
1658 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1659
1660 DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1661 misc2, metadata_reg_a);
1662
1663 return 0;
1664 }
1665
dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1666 void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1667 struct mlx5dr_match_param *mask)
1668 {
1669 dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1670
1671 sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1672 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1673 sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1674 }
1675
dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1676 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1677 struct mlx5dr_ste_build *sb,
1678 u8 *tag)
1679 {
1680 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1681
1682 if (sb->inner) {
1683 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1684 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1685 } else {
1686 DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1687 DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1688 }
1689
1690 return 0;
1691 }
1692
dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1693 void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1694 struct mlx5dr_match_param *mask)
1695 {
1696 dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1697
1698 sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1699 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1700 sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1701 }
1702
1703 static int
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1704 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1705 struct mlx5dr_ste_build *sb,
1706 u8 *tag)
1707 {
1708 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1709
1710 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1711 outer_vxlan_gpe_flags, misc3,
1712 outer_vxlan_gpe_flags);
1713 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1714 outer_vxlan_gpe_next_protocol, misc3,
1715 outer_vxlan_gpe_next_protocol);
1716 DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1717 outer_vxlan_gpe_vni, misc3,
1718 outer_vxlan_gpe_vni);
1719
1720 return 0;
1721 }
1722
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1723 void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1724 struct mlx5dr_match_param *mask)
1725 {
1726 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1727
1728 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1729 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1730 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1731 }
1732
1733 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1734 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1735 struct mlx5dr_ste_build *sb,
1736 u8 *tag)
1737 {
1738 struct mlx5dr_match_misc *misc = &value->misc;
1739
1740 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1741 geneve_protocol_type, misc, geneve_protocol_type);
1742 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1743 geneve_oam, misc, geneve_oam);
1744 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1745 geneve_opt_len, misc, geneve_opt_len);
1746 DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1747 geneve_vni, misc, geneve_vni);
1748
1749 return 0;
1750 }
1751
dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1752 void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1753 struct mlx5dr_match_param *mask)
1754 {
1755 dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1756
1757 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1758 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1759 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1760 }
1761
dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1762 static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1763 struct mlx5dr_ste_build *sb,
1764 u8 *tag)
1765 {
1766 struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1767
1768 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1769 DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1770
1771 return 0;
1772 }
1773
dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1774 void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1775 struct mlx5dr_match_param *mask)
1776 {
1777 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1778 dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1779 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1780 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
1781 }
1782
dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1783 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1784 struct mlx5dr_ste_build *sb,
1785 u8 *tag)
1786 {
1787 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1788
1789 DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1790 DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1791 DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1792 DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1793
1794 return 0;
1795 }
1796
dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1797 void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1798 struct mlx5dr_match_param *mask)
1799 {
1800 dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1801
1802 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1803 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1804 sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1805 }
1806
dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1807 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1808 struct mlx5dr_ste_build *sb,
1809 u8 *tag)
1810 {
1811 struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1812
1813 DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1814 DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1815 DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1816 DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1817
1818 return 0;
1819 }
1820
dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1821 void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1822 struct mlx5dr_match_param *mask)
1823 {
1824 dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1825
1826 sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1827 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1828 sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1829 }
1830
dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)1831 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1832 u8 *bit_mask)
1833 {
1834 struct mlx5dr_match_misc *misc_mask = &value->misc;
1835
1836 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1837 DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1838 misc_mask->source_eswitch_owner_vhca_id = 0;
1839 }
1840
dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1841 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1842 struct mlx5dr_ste_build *sb,
1843 u8 *tag)
1844 {
1845 struct mlx5dr_match_misc *misc = &value->misc;
1846 int id = misc->source_eswitch_owner_vhca_id;
1847 struct mlx5dr_cmd_vport_cap *vport_cap;
1848 struct mlx5dr_domain *dmn = sb->dmn;
1849 struct mlx5dr_domain *vport_dmn;
1850 u8 *bit_mask = sb->bit_mask;
1851 struct mlx5dr_domain *peer;
1852
1853 DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1854
1855 if (sb->vhca_id_valid) {
1856 peer = xa_load(&dmn->peer_dmn_xa, id);
1857 /* Find port GVMI based on the eswitch_owner_vhca_id */
1858 if (id == dmn->info.caps.gvmi)
1859 vport_dmn = dmn;
1860 else if (peer && (id == peer->info.caps.gvmi))
1861 vport_dmn = peer;
1862 else
1863 return -EINVAL;
1864
1865 misc->source_eswitch_owner_vhca_id = 0;
1866 } else {
1867 vport_dmn = dmn;
1868 }
1869
1870 if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1871 return 0;
1872
1873 vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
1874 if (!vport_cap) {
1875 mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1876 misc->source_port);
1877 return -EINVAL;
1878 }
1879
1880 if (vport_cap->vport_gvmi)
1881 MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1882
1883 misc->source_port = 0;
1884 return 0;
1885 }
1886
dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1887 void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1888 struct mlx5dr_match_param *mask)
1889 {
1890 dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1891
1892 sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1893 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1894 sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1895 }
1896
dr_ste_v1_set_flex_parser(u32 * misc4_field_id,u32 * misc4_field_value,bool * parser_is_used,u8 * tag)1897 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1898 u32 *misc4_field_value,
1899 bool *parser_is_used,
1900 u8 *tag)
1901 {
1902 u32 id = *misc4_field_id;
1903 u8 *parser_ptr;
1904
1905 if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1906 return;
1907
1908 parser_is_used[id] = true;
1909 parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1910
1911 *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1912 *misc4_field_id = 0;
1913 *misc4_field_value = 0;
1914 }
1915
dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1916 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1917 struct mlx5dr_ste_build *sb,
1918 u8 *tag)
1919 {
1920 struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1921 bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1922
1923 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1924 &misc_4_mask->prog_sample_field_value_0,
1925 parser_is_used, tag);
1926
1927 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1928 &misc_4_mask->prog_sample_field_value_1,
1929 parser_is_used, tag);
1930
1931 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1932 &misc_4_mask->prog_sample_field_value_2,
1933 parser_is_used, tag);
1934
1935 dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1936 &misc_4_mask->prog_sample_field_value_3,
1937 parser_is_used, tag);
1938
1939 return 0;
1940 }
1941
dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1942 void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1943 struct mlx5dr_match_param *mask)
1944 {
1945 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1946 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1947 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1948 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1949 }
1950
dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1951 void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1952 struct mlx5dr_match_param *mask)
1953 {
1954 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1955 dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1956 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1957 sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1958 }
1959
1960 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1961 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1962 struct mlx5dr_ste_build *sb,
1963 u8 *tag)
1964 {
1965 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1966 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1967 u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1968
1969 MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1970 misc3->geneve_tlv_option_0_data);
1971 misc3->geneve_tlv_option_0_data = 0;
1972
1973 return 0;
1974 }
1975
1976 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)1977 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1978 struct mlx5dr_match_param *mask)
1979 {
1980 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1981
1982 /* STEs with lookup type FLEX_PARSER_{0/1} includes
1983 * flex parsers_{0-3}/{4-7} respectively.
1984 */
1985 sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1986 DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1987 DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1988
1989 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1990 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
1991 }
1992
1993 static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1994 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
1995 struct mlx5dr_ste_build *sb,
1996 u8 *tag)
1997 {
1998 u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1999 struct mlx5dr_match_misc *misc = &value->misc;
2000
2001 if (misc->geneve_tlv_option_0_exist) {
2002 MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
2003 misc->geneve_tlv_option_0_exist = 0;
2004 }
2005
2006 return 0;
2007 }
2008
2009 void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2010 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
2011 struct mlx5dr_match_param *mask)
2012 {
2013 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
2014 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
2015 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2016 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
2017 }
2018
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2019 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
2020 struct mlx5dr_ste_build *sb,
2021 u8 *tag)
2022 {
2023 struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2024
2025 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
2026 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
2027 DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
2028
2029 return 0;
2030 }
2031
dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2032 void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
2033 struct mlx5dr_match_param *mask)
2034 {
2035 dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
2036
2037 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2038 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2039 sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
2040 }
2041
2042 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2043 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
2044 struct mlx5dr_ste_build *sb,
2045 u8 *tag)
2046 {
2047 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
2048 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2049 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
2050 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2051 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
2052 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2053 if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2054 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2055 return 0;
2056 }
2057
2058 void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2059 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2060 struct mlx5dr_match_param *mask)
2061 {
2062 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
2063
2064 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2065 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2066 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
2067 }
2068
2069 static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2070 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
2071 struct mlx5dr_ste_build *sb,
2072 u8 *tag)
2073 {
2074 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
2075 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2076 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
2077 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2078 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
2079 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2080 if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2081 DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2082 return 0;
2083 }
2084
2085 void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask)2086 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2087 struct mlx5dr_match_param *mask)
2088 {
2089 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
2090
2091 sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2092 sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2093 sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
2094 }
2095
dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action * action)2096 int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
2097 {
2098 struct mlx5dr_ptrn_mgr *ptrn_mgr;
2099 int ret;
2100
2101 ptrn_mgr = action->rewrite->dmn->ptrn_mgr;
2102 if (!ptrn_mgr)
2103 return -EOPNOTSUPP;
2104
2105 action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr,
2106 action->rewrite->num_of_actions,
2107 action->rewrite->data);
2108 if (!action->rewrite->arg) {
2109 mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n");
2110 return -EAGAIN;
2111 }
2112
2113 action->rewrite->ptrn =
2114 mlx5dr_ptrn_cache_get_pattern(ptrn_mgr,
2115 action->rewrite->num_of_actions,
2116 action->rewrite->data);
2117 if (!action->rewrite->ptrn) {
2118 mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n");
2119 ret = -EAGAIN;
2120 goto put_arg;
2121 }
2122
2123 return 0;
2124
2125 put_arg:
2126 mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
2127 action->rewrite->arg);
2128 return ret;
2129 }
2130
dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action * action)2131 void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
2132 {
2133 mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr,
2134 action->rewrite->ptrn);
2135 mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
2136 action->rewrite->arg);
2137 }
2138
2139 static struct mlx5dr_ste_ctx ste_ctx_v1 = {
2140 /* Builders */
2141 .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
2142 .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
2143 .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2144 .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2145 .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
2146 .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
2147 .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
2148 .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2149 .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2150 .build_mpls_init = &dr_ste_v1_build_mpls_init,
2151 .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
2152 .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
2153 .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
2154 .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
2155 .build_icmp_init = &dr_ste_v1_build_icmp_init,
2156 .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
2157 .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
2158 .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2159 .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2160 .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2161 .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
2162 .build_register_0_init = &dr_ste_v1_build_register_0_init,
2163 .build_register_1_init = &dr_ste_v1_build_register_1_init,
2164 .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
2165 .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
2166 .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
2167 .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2168 .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
2169 .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2170 .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2171
2172 /* Getters and Setters */
2173 .ste_init = &dr_ste_v1_init,
2174 .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
2175 .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
2176 .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
2177 .set_miss_addr = &dr_ste_v1_set_miss_addr,
2178 .get_miss_addr = &dr_ste_v1_get_miss_addr,
2179 .set_hit_addr = &dr_ste_v1_set_hit_addr,
2180 .set_byte_mask = &dr_ste_v1_set_byte_mask,
2181 .get_byte_mask = &dr_ste_v1_get_byte_mask,
2182 /* Actions */
2183 .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
2184 DR_STE_CTX_ACTION_CAP_RX_PUSH |
2185 DR_STE_CTX_ACTION_CAP_RX_ENCAP |
2186 DR_STE_CTX_ACTION_CAP_POP_MDFY,
2187 .set_actions_rx = &dr_ste_v1_set_actions_rx,
2188 .set_actions_tx = &dr_ste_v1_set_actions_tx,
2189 .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2190 .modify_field_arr = dr_ste_v1_action_modify_field_arr,
2191 .set_action_set = &dr_ste_v1_set_action_set,
2192 .set_action_add = &dr_ste_v1_set_action_add,
2193 .set_action_copy = &dr_ste_v1_set_action_copy,
2194 .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
2195 .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
2196 .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
2197 /* Actions bit set */
2198 .set_encap = &dr_ste_v1_set_encap,
2199 .set_push_vlan = &dr_ste_v1_set_push_vlan,
2200 .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
2201 .set_rx_decap = &dr_ste_v1_set_rx_decap,
2202 .set_encap_l3 = &dr_ste_v1_set_encap_l3,
2203 .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
2204 .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
2205 /* Send */
2206 .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
2207 };
2208
mlx5dr_ste_get_ctx_v1(void)2209 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
2210 {
2211 return &ste_ctx_v1;
2212 }
2213