DPDK  24.03.0
rte_swx_pipeline_internal.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
6 
7 #include <inttypes.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 
11 #include <rte_bitops.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_prefetch.h>
16 #include <rte_meter.h>
17 
18 #include <rte_swx_table_selector.h>
19 #include <rte_swx_table_learner.h>
20 #include <rte_swx_pipeline.h>
21 #include <rte_swx_ctl.h>
22 
23 #ifndef TRACE_LEVEL
24 #define TRACE_LEVEL 0
25 #endif
26 
27 #if TRACE_LEVEL
28 #define TRACE(...) printf(__VA_ARGS__)
29 #else
30 #define TRACE(...)
31 #endif
32 
33 /*
34  * Environment.
35  */
36 #define ntoh64(x) rte_be_to_cpu_64(x)
37 #define hton64(x) rte_cpu_to_be_64(x)
38 
39 /*
40  * Struct.
41  */
42 struct field {
43  char name[RTE_SWX_NAME_SIZE];
44  uint32_t n_bits;
45  uint32_t offset;
46  int var_size;
47 };
48 
49 struct struct_type {
50  TAILQ_ENTRY(struct_type) node;
51  char name[RTE_SWX_NAME_SIZE];
52  struct field *fields;
53  uint32_t n_fields;
54  uint32_t n_bits;
55  uint32_t n_bits_min;
56  int var_size;
57 };
58 
59 TAILQ_HEAD(struct_type_tailq, struct_type);
60 
61 /*
62  * Input port.
63  */
64 struct port_in_type {
65  TAILQ_ENTRY(port_in_type) node;
66  char name[RTE_SWX_NAME_SIZE];
67  struct rte_swx_port_in_ops ops;
68 };
69 
70 TAILQ_HEAD(port_in_type_tailq, port_in_type);
71 
72 struct port_in {
73  TAILQ_ENTRY(port_in) node;
74  struct port_in_type *type;
75  void *obj;
76  uint32_t id;
77 };
78 
79 TAILQ_HEAD(port_in_tailq, port_in);
80 
81 struct port_in_runtime {
83  void *obj;
84 };
85 
86 /*
87  * Output port.
88  */
89 struct port_out_type {
90  TAILQ_ENTRY(port_out_type) node;
91  char name[RTE_SWX_NAME_SIZE];
92  struct rte_swx_port_out_ops ops;
93 };
94 
95 TAILQ_HEAD(port_out_type_tailq, port_out_type);
96 
97 struct port_out {
98  TAILQ_ENTRY(port_out) node;
99  struct port_out_type *type;
100  void *obj;
101  uint32_t id;
102 };
103 
104 TAILQ_HEAD(port_out_tailq, port_out);
105 
106 struct port_out_runtime {
108  rte_swx_port_out_pkt_fast_clone_tx_t pkt_fast_clone_tx;
109  rte_swx_port_out_pkt_clone_tx_t pkt_clone_tx;
111  void *obj;
112 };
113 
114 /*
115  * Packet mirroring.
116  */
117 struct mirroring_session {
118  uint32_t port_id;
119  int fast_clone;
120  uint32_t truncation_length;
121 };
122 
123 /*
124  * Extern object.
125  */
126 struct extern_type_member_func {
127  TAILQ_ENTRY(extern_type_member_func) node;
128  char name[RTE_SWX_NAME_SIZE];
130  uint32_t id;
131 };
132 
133 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
134 
135 struct extern_type {
136  TAILQ_ENTRY(extern_type) node;
137  char name[RTE_SWX_NAME_SIZE];
138  struct struct_type *mailbox_struct_type;
141  struct extern_type_member_func_tailq funcs;
142  uint32_t n_funcs;
143 };
144 
145 TAILQ_HEAD(extern_type_tailq, extern_type);
146 
147 struct extern_obj {
148  TAILQ_ENTRY(extern_obj) node;
149  char name[RTE_SWX_NAME_SIZE];
150  struct extern_type *type;
151  void *obj;
152  uint32_t struct_id;
153  uint32_t id;
154 };
155 
156 TAILQ_HEAD(extern_obj_tailq, extern_obj);
157 
158 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
159 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
160 #endif
161 
162 struct extern_obj_runtime {
163  void *obj;
164  uint8_t *mailbox;
165  rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
166 };
167 
168 /*
169  * Extern function.
170  */
171 struct extern_func {
172  TAILQ_ENTRY(extern_func) node;
173  char name[RTE_SWX_NAME_SIZE];
174  struct struct_type *mailbox_struct_type;
176  uint32_t struct_id;
177  uint32_t id;
178 };
179 
180 TAILQ_HEAD(extern_func_tailq, extern_func);
181 
182 struct extern_func_runtime {
183  uint8_t *mailbox;
185 };
186 
187 /*
188  * Hash function.
189  */
190 struct hash_func {
191  TAILQ_ENTRY(hash_func) node;
192  char name[RTE_SWX_NAME_SIZE];
193  rte_swx_hash_func_t func;
194  uint32_t id;
195 };
196 
197 TAILQ_HEAD(hash_func_tailq, hash_func);
198 
199 struct hash_func_runtime {
200  rte_swx_hash_func_t func;
201 };
202 
203 /*
204  * RSS.
205  */
206 struct rss {
207  TAILQ_ENTRY(rss) node;
208  char name[RTE_SWX_NAME_SIZE];
209  uint32_t id;
210 };
211 
212 TAILQ_HEAD(rss_tailq, rss);
213 
214 struct rss_runtime {
215  uint32_t key_size; /* key size in bytes. */
216  uint8_t key[]; /* key. */
217 };
218 
219 /*
220  * Header.
221  */
222 struct header {
223  TAILQ_ENTRY(header) node;
224  char name[RTE_SWX_NAME_SIZE];
225  struct struct_type *st;
226  uint32_t struct_id;
227  uint32_t id;
228 };
229 
230 TAILQ_HEAD(header_tailq, header);
231 
232 struct header_runtime {
233  uint8_t *ptr0;
234  uint32_t n_bytes;
235 };
236 
237 struct header_out_runtime {
238  uint8_t *ptr0;
239  uint8_t *ptr;
240  uint32_t n_bytes;
241 };
242 
243 /*
244  * Instruction.
245  */
246 
247 /* Operand endianness conventions:
248  *
249  * Case 1: Small fields (i.e. fields with size <= 64 bits)
250  *
251  * Packet headers are always in Network Byte Order (NBO), i.e. big endian.
252  * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
253  * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
254  * when transferred to packet meta-data and in NBO when transferred to packet
255  * headers.
256  *
257  * Notation conventions:
258  * -Header field: H = h.header.field (dst/src)
259  * -Meta-data field: M = m.field (dst/src)
260  * -Extern object mailbox field: E = e.field (dst/src)
261  * -Extern function mailbox field: F = f.field (dst/src)
262  * -Table action data field: T = t.field (src only)
263  * -Immediate value: I = 32-bit unsigned value (src only)
264  *
265  * Case 2: Big fields (i.e. fields with size > 64 bits)
266  *
267  * The big fields are allowed in both headers and meta-data, but they are always
268  * stored in NBO. This is why the few instructions that accept a big field
269  * operand require that the other operand, in case it is a small operand, be
270  * stored in NBO as well, i.e. the small operand must be a header field
271  * (i.e. meta-data field not allowed in this case).
272  *
273  * Notation conventions:
274  * -Header or meta-data big field: HM-NBO.
275  */
276 
277 enum instruction_type {
278  /* rx m.port_in */
279  INSTR_RX,
280 
281  /* tx port_out
282  * port_out = MI
283  */
284  INSTR_TX, /* port_out = M */
285  INSTR_TX_I, /* port_out = I */
286  INSTR_DROP,
287 
288  /*
289  * mirror slot_id session_id
290  * slot_id = MEFT
291  * session_id = MEFT
292  */
293  INSTR_MIRROR,
294 
295  /* recirculate
296  */
297  INSTR_RECIRCULATE,
298 
299  /* recircid m.recirc_pass_id
300  * Read the internal recirculation pass ID into the specified meta-data field.
301  */
302  INSTR_RECIRCID,
303 
304  /* extract h.header */
305  INSTR_HDR_EXTRACT,
306  INSTR_HDR_EXTRACT2,
307  INSTR_HDR_EXTRACT3,
308  INSTR_HDR_EXTRACT4,
309  INSTR_HDR_EXTRACT5,
310  INSTR_HDR_EXTRACT6,
311  INSTR_HDR_EXTRACT7,
312  INSTR_HDR_EXTRACT8,
313 
314  /* extract h.header m.last_field_size */
315  INSTR_HDR_EXTRACT_M,
316 
317  /* lookahead h.header */
318  INSTR_HDR_LOOKAHEAD,
319 
320  /* emit h.header */
321  INSTR_HDR_EMIT,
322  INSTR_HDR_EMIT_TX,
323  INSTR_HDR_EMIT2_TX,
324  INSTR_HDR_EMIT3_TX,
325  INSTR_HDR_EMIT4_TX,
326  INSTR_HDR_EMIT5_TX,
327  INSTR_HDR_EMIT6_TX,
328  INSTR_HDR_EMIT7_TX,
329  INSTR_HDR_EMIT8_TX,
330 
331  /* validate h.header */
332  INSTR_HDR_VALIDATE,
333 
334  /* invalidate h.header */
335  INSTR_HDR_INVALIDATE,
336 
337  /* mov dst src
338  * dst = src
339  * dst = HMEF, src = HMEFTI
340  */
341  INSTR_MOV, /* dst = MEF, src = MEFT; size(dst) <= 64 bits, size(src) <= 64 bits. */
342  INSTR_MOV_MH, /* dst = MEF, src = H; size(dst) <= 64 bits, size(src) <= 64 bits. */
343  INSTR_MOV_HM, /* dst = H, src = MEFT; size(dst) <= 64 bits, size(src) <= 64 bits. */
344  INSTR_MOV_HH, /* dst = H, src = H; size(dst) <= 64 bits, size(src) <= 64 bits. */
345  INSTR_MOV_DMA, /* dst and src in NBO format. */
346  INSTR_MOV_128, /* dst and src in NBO format, size(dst) = size(src) = 128 bits. */
347  INSTR_MOV_128_64, /* dst and src in NBO format, size(dst) = 128 bits, size(src) = 64 b. */
348  INSTR_MOV_64_128, /* dst and src in NBO format, size(dst) = 64 bits, size(src) = 128 b. */
349  INSTR_MOV_128_32, /* dst and src in NBO format, size(dst) = 128 bits, size(src) = 32 b. */
350  INSTR_MOV_32_128, /* dst and src in NBO format, size(dst) = 32 bits, size(src) = 128 b. */
351  INSTR_MOV_I, /* dst = HMEF, src = I; size(dst) <= 64 bits. */
352 
353  /* movh dst src
354  * Read/write the upper half (i.e. bits 127 .. 64) of a 128-bit field into/from a 64-bit
355  * header field:
356  *
357  * dst64 = src128[127:64], where: dst64 = H, src128 = HM-NBO.
358  * dst128[127:64] = src64, where: dst128 = HM-NBO, src64 = H.
359  *
360  * Typically required for operations involving IPv6 addresses.
361  */
362  INSTR_MOVH,
363 
364  /* dma h.header t.field
365  * memcpy(h.header, t.field, sizeof(h.header))
366  */
367  INSTR_DMA_HT,
368  INSTR_DMA_HT2,
369  INSTR_DMA_HT3,
370  INSTR_DMA_HT4,
371  INSTR_DMA_HT5,
372  INSTR_DMA_HT6,
373  INSTR_DMA_HT7,
374  INSTR_DMA_HT8,
375 
376  /* add dst src
377  * dst += src
378  * dst = HMEF, src = HMEFTI
379  */
380  INSTR_ALU_ADD, /* dst = MEF, src = MEF */
381  INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
382  INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
383  INSTR_ALU_ADD_HH, /* dst = H, src = H */
384  INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
385  INSTR_ALU_ADD_HI, /* dst = H, src = I */
386 
387  /* sub dst src
388  * dst -= src
389  * dst = HMEF, src = HMEFTI
390  */
391  INSTR_ALU_SUB, /* dst = MEF, src = MEF */
392  INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
393  INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
394  INSTR_ALU_SUB_HH, /* dst = H, src = H */
395  INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
396  INSTR_ALU_SUB_HI, /* dst = H, src = I */
397 
398  /* ckadd dst src
399  * dst = dst '+ src[0:1] '+ src[2:3] '+ ...
400  * dst = H, src = {H, h.header}, '+ = 1's complement addition operator
401  */
402  INSTR_ALU_CKADD_FIELD, /* src = H */
403  INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 bytes. */
404  INSTR_ALU_CKADD_STRUCT, /* src = h.header, with sizeof(header) any 4-byte multiple. */
405 
406  /* cksub dst src
407  * dst = dst '- src
408  * dst = H, src = H, '- = 1's complement subtraction operator
409  */
410  INSTR_ALU_CKSUB_FIELD,
411 
412  /* and dst src
413  * dst &= src
414  * dst = HMEF, src = HMEFTI
415  */
416  INSTR_ALU_AND, /* dst = MEF, src = MEFT */
417  INSTR_ALU_AND_MH, /* dst = MEF, src = H */
418  INSTR_ALU_AND_HM, /* dst = H, src = MEFT */
419  INSTR_ALU_AND_HH, /* dst = H, src = H */
420  INSTR_ALU_AND_I, /* dst = HMEF, src = I */
421 
422  /* or dst src
423  * dst |= src
424  * dst = HMEF, src = HMEFTI
425  */
426  INSTR_ALU_OR, /* dst = MEF, src = MEFT */
427  INSTR_ALU_OR_MH, /* dst = MEF, src = H */
428  INSTR_ALU_OR_HM, /* dst = H, src = MEFT */
429  INSTR_ALU_OR_HH, /* dst = H, src = H */
430  INSTR_ALU_OR_I, /* dst = HMEF, src = I */
431 
432  /* xor dst src
433  * dst ^= src
434  * dst = HMEF, src = HMEFTI
435  */
436  INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
437  INSTR_ALU_XOR_MH, /* dst = MEF, src = H */
438  INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */
439  INSTR_ALU_XOR_HH, /* dst = H, src = H */
440  INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
441 
442  /* shl dst src
443  * dst <<= src
444  * dst = HMEF, src = HMEFTI
445  */
446  INSTR_ALU_SHL, /* dst = MEF, src = MEF */
447  INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
448  INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
449  INSTR_ALU_SHL_HH, /* dst = H, src = H */
450  INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
451  INSTR_ALU_SHL_HI, /* dst = H, src = I */
452 
453  /* shr dst src
454  * dst >>= src
455  * dst = HMEF, src = HMEFTI
456  */
457  INSTR_ALU_SHR, /* dst = MEF, src = MEF */
458  INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
459  INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
460  INSTR_ALU_SHR_HH, /* dst = H, src = H */
461  INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
462  INSTR_ALU_SHR_HI, /* dst = H, src = I */
463 
464  /* regprefetch REGARRAY index
465  * prefetch REGARRAY[index]
466  * index = HMEFTI
467  */
468  INSTR_REGPREFETCH_RH, /* index = H */
469  INSTR_REGPREFETCH_RM, /* index = MEFT */
470  INSTR_REGPREFETCH_RI, /* index = I */
471 
472  /* regrd dst REGARRAY index
473  * dst = REGARRAY[index]
474  * dst = HMEF, index = HMEFTI
475  */
476  INSTR_REGRD_HRH, /* dst = H, index = H */
477  INSTR_REGRD_HRM, /* dst = H, index = MEFT */
478  INSTR_REGRD_HRI, /* dst = H, index = I */
479  INSTR_REGRD_MRH, /* dst = MEF, index = H */
480  INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */
481  INSTR_REGRD_MRI, /* dst = MEF, index = I */
482 
483  /* regwr REGARRAY index src
484  * REGARRAY[index] = src
485  * index = HMEFTI, src = HMEFTI
486  */
487  INSTR_REGWR_RHH, /* index = H, src = H */
488  INSTR_REGWR_RHM, /* index = H, src = MEFT */
489  INSTR_REGWR_RHI, /* index = H, src = I */
490  INSTR_REGWR_RMH, /* index = MEFT, src = H */
491  INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */
492  INSTR_REGWR_RMI, /* index = MEFT, src = I */
493  INSTR_REGWR_RIH, /* index = I, src = H */
494  INSTR_REGWR_RIM, /* index = I, src = MEFT */
495  INSTR_REGWR_RII, /* index = I, src = I */
496 
497  /* regadd REGARRAY index src
498  * REGARRAY[index] += src
499  * index = HMEFTI, src = HMEFTI
500  */
501  INSTR_REGADD_RHH, /* index = H, src = H */
502  INSTR_REGADD_RHM, /* index = H, src = MEFT */
503  INSTR_REGADD_RHI, /* index = H, src = I */
504  INSTR_REGADD_RMH, /* index = MEFT, src = H */
505  INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */
506  INSTR_REGADD_RMI, /* index = MEFT, src = I */
507  INSTR_REGADD_RIH, /* index = I, src = H */
508  INSTR_REGADD_RIM, /* index = I, src = MEFT */
509  INSTR_REGADD_RII, /* index = I, src = I */
510 
511  /* metprefetch METARRAY index
512  * prefetch METARRAY[index]
513  * index = HMEFTI
514  */
515  INSTR_METPREFETCH_H, /* index = H */
516  INSTR_METPREFETCH_M, /* index = MEFT */
517  INSTR_METPREFETCH_I, /* index = I */
518 
519  /* meter METARRAY index length color_in color_out
520  * color_out = meter(METARRAY[index], length, color_in)
521  * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF
522  */
523  INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */
524  INSTR_METER_HHI, /* index = H, length = H, color_in = I */
525  INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */
526  INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */
527  INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */
528  INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */
529  INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */
530  INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */
531  INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */
532  INSTR_METER_IHI, /* index = I, length = H, color_in = I */
533  INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */
534  INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */
535 
536  /* table TABLE */
537  INSTR_TABLE,
538  INSTR_TABLE_AF,
539  INSTR_SELECTOR,
540  INSTR_LEARNER,
541  INSTR_LEARNER_AF,
542 
543  /* learn ACTION_NAME [ m.action_first_arg ] m.timeout_id */
544  INSTR_LEARNER_LEARN,
545 
546  /* rearm [ m.timeout_id ] */
547  INSTR_LEARNER_REARM,
548  INSTR_LEARNER_REARM_NEW,
549 
550  /* forget */
551  INSTR_LEARNER_FORGET,
552 
553  /* entryid m.table_entry_id
554  * Read the internal table entry ID into the specified meta-data field.
555  */
556  INSTR_ENTRYID,
557 
558  /* extern e.obj.func */
559  INSTR_EXTERN_OBJ,
560 
561  /* extern f.func */
562  INSTR_EXTERN_FUNC,
563 
564  /* hash HASH_FUNC_NAME dst src_first src_last
565  * Compute hash value over range of struct fields.
566  * dst = M
567  * src_first = HMEFT
568  * src_last = HMEFT
569  * src_first and src_last must be fields within the same struct
570  */
571  INSTR_HASH_FUNC,
572 
573  /* rss RSS_OBJ_NAME dst src_first src_last
574  * Compute the RSS hash value over range of struct fields.
575  * dst = M
576  * src_first = HMEFT
577  * src_last = HMEFT
578  * src_first and src_last must be fields within the same struct
579  */
580  INSTR_RSS,
581 
582  /* jmp LABEL
583  * Unconditional jump
584  */
585  INSTR_JMP,
586 
587  /* jmpv LABEL h.header
588  * Jump if header is valid
589  */
590  INSTR_JMP_VALID,
591 
592  /* jmpnv LABEL h.header
593  * Jump if header is invalid
594  */
595  INSTR_JMP_INVALID,
596 
597  /* jmph LABEL
598  * Jump if table lookup hit
599  */
600  INSTR_JMP_HIT,
601 
602  /* jmpnh LABEL
603  * Jump if table lookup miss
604  */
605  INSTR_JMP_MISS,
606 
607  /* jmpa LABEL ACTION
608  * Jump if action run
609  */
610  INSTR_JMP_ACTION_HIT,
611 
612  /* jmpna LABEL ACTION
613  * Jump if action not run
614  */
615  INSTR_JMP_ACTION_MISS,
616 
617  /* jmpeq LABEL a b
618  * Jump if a is equal to b
619  * a = HMEFT, b = HMEFTI
620  */
621  INSTR_JMP_EQ, /* a = MEFT, b = MEFT */
622  INSTR_JMP_EQ_MH, /* a = MEFT, b = H */
623  INSTR_JMP_EQ_HM, /* a = H, b = MEFT */
624  INSTR_JMP_EQ_HH, /* a = H, b = H */
625  INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
626 
627  /* jmpneq LABEL a b
628  * Jump if a is not equal to b
629  * a = HMEFT, b = HMEFTI
630  */
631  INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */
632  INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */
633  INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */
634  INSTR_JMP_NEQ_HH, /* a = H, b = H */
635  INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
636 
637  /* jmplt LABEL a b
638  * Jump if a is less than b
639  * a = HMEFT, b = HMEFTI
640  */
641  INSTR_JMP_LT, /* a = MEFT, b = MEFT */
642  INSTR_JMP_LT_MH, /* a = MEFT, b = H */
643  INSTR_JMP_LT_HM, /* a = H, b = MEFT */
644  INSTR_JMP_LT_HH, /* a = H, b = H */
645  INSTR_JMP_LT_MI, /* a = MEFT, b = I */
646  INSTR_JMP_LT_HI, /* a = H, b = I */
647 
648  /* jmpgt LABEL a b
649  * Jump if a is greater than b
650  * a = HMEFT, b = HMEFTI
651  */
652  INSTR_JMP_GT, /* a = MEFT, b = MEFT */
653  INSTR_JMP_GT_MH, /* a = MEFT, b = H */
654  INSTR_JMP_GT_HM, /* a = H, b = MEFT */
655  INSTR_JMP_GT_HH, /* a = H, b = H */
656  INSTR_JMP_GT_MI, /* a = MEFT, b = I */
657  INSTR_JMP_GT_HI, /* a = H, b = I */
658 
659  /* return
660  * Return from action
661  */
662  INSTR_RETURN,
663 
664  /* Start of custom instructions. */
665  INSTR_CUSTOM_0,
666 };
667 
668 struct instr_operand {
669  uint8_t struct_id;
670  uint8_t n_bits;
671  uint8_t offset;
672  uint8_t pad;
673 };
674 
675 struct instr_io {
676  struct {
677  union {
678  struct {
679  uint8_t offset;
680  uint8_t n_bits;
681  uint8_t pad[2];
682  };
683 
684  uint32_t val;
685  };
686  } io;
687 
688  struct {
689  uint8_t header_id[8];
690  uint8_t struct_id[8];
691  uint8_t n_bytes[8];
692  } hdr;
693 };
694 
695 struct instr_hdr_validity {
696  uint8_t header_id;
697  uint8_t struct_id;
698 };
699 
700 struct instr_table {
701  uint8_t table_id;
702 };
703 
704 struct instr_learn {
705  uint8_t action_id;
706  uint8_t mf_first_arg_offset;
707  uint8_t mf_timeout_id_offset;
708  uint8_t mf_timeout_id_n_bits;
709 };
710 
711 struct instr_extern_obj {
712  uint8_t ext_obj_id;
713  uint8_t func_id;
714 };
715 
716 struct instr_extern_func {
717  uint8_t ext_func_id;
718 };
719 
720 struct instr_hash_func {
721  uint8_t hash_func_id;
722 
723  struct {
724  uint8_t offset;
725  uint8_t n_bits;
726  } dst;
727 
728  struct {
729  uint8_t struct_id;
730  uint16_t offset;
731  uint16_t n_bytes;
732  } src;
733 };
734 
735 struct instr_rss {
736  uint8_t rss_obj_id;
737 
738  struct {
739  uint8_t offset;
740  uint8_t n_bits;
741  } dst;
742 
743  struct {
744  uint8_t struct_id;
745  uint16_t offset;
746  uint16_t n_bytes;
747  } src;
748 };
749 
750 struct instr_dst_src {
751  struct instr_operand dst;
752  union {
753  struct instr_operand src;
754  uint64_t src_val;
755  };
756 };
757 
758 struct instr_regarray {
759  uint8_t regarray_id;
760  uint8_t pad[3];
761 
762  union {
763  struct instr_operand idx;
764  uint32_t idx_val;
765  };
766 
767  union {
768  struct instr_operand dstsrc;
769  uint64_t dstsrc_val;
770  };
771 };
772 
773 struct instr_meter {
774  uint8_t metarray_id;
775  uint8_t pad[3];
776 
777  union {
778  struct instr_operand idx;
779  uint32_t idx_val;
780  };
781 
782  struct instr_operand length;
783 
784  union {
785  struct instr_operand color_in;
786  uint32_t color_in_val;
787  };
788 
789  struct instr_operand color_out;
790 };
791 
792 struct instr_dma {
793  struct {
794  uint8_t header_id[8];
795  uint8_t struct_id[8];
796  } dst;
797 
798  struct {
799  uint8_t offset[8];
800  } src;
801 
802  uint16_t n_bytes[8];
803 };
804 
805 struct instr_jmp {
806  struct instruction *ip;
807 
808  union {
809  struct instr_operand a;
810  uint8_t header_id;
811  uint8_t action_id;
812  };
813 
814  union {
815  struct instr_operand b;
816  uint64_t b_val;
817  };
818 };
819 
820 struct instruction {
821  enum instruction_type type;
822  union {
823  struct instr_io io;
824  struct instr_dst_src mirror;
825  struct instr_hdr_validity valid;
826  struct instr_dst_src mov;
827  struct instr_regarray regarray;
828  struct instr_meter meter;
829  struct instr_dma dma;
830  struct instr_dst_src alu;
831  struct instr_table table;
832  struct instr_learn learn;
833  struct instr_extern_obj ext_obj;
834  struct instr_extern_func ext_func;
835  struct instr_hash_func hash_func;
836  struct instr_rss rss;
837  struct instr_jmp jmp;
838  };
839 };
840 
841 struct instruction_data {
842  char label[RTE_SWX_NAME_SIZE];
843  char jmp_label[RTE_SWX_NAME_SIZE];
844  uint32_t n_users; /* user = jmp instruction to this instruction. */
845  int invalid;
846 };
847 
848 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
849 
850 /*
851  * Action.
852  */
853 typedef void
854 (*action_func_t)(struct rte_swx_pipeline *p);
855 
856 struct action {
857  TAILQ_ENTRY(action) node;
858  char name[RTE_SWX_NAME_SIZE];
859  struct struct_type *st;
860  int *args_endianness; /* 0 = Host Byte Order (HBO); 1 = Network Byte Order (NBO). */
861  struct instruction *instructions;
862  struct instruction_data *instruction_data;
863  uint32_t n_instructions;
864  uint32_t id;
865 };
866 
867 TAILQ_HEAD(action_tailq, action);
868 
869 /*
870  * Table.
871  */
872 struct table_type {
873  TAILQ_ENTRY(table_type) node;
874  char name[RTE_SWX_NAME_SIZE];
875  enum rte_swx_table_match_type match_type;
876  struct rte_swx_table_ops ops;
877 };
878 
879 TAILQ_HEAD(table_type_tailq, table_type);
880 
881 struct match_field {
882  enum rte_swx_table_match_type match_type;
883  struct field *field;
884 };
885 
886 struct table {
887  TAILQ_ENTRY(table) node;
888  char name[RTE_SWX_NAME_SIZE];
889  char args[RTE_SWX_NAME_SIZE];
890  struct table_type *type; /* NULL when n_fields == 0. */
891 
892  /* Match. */
893  struct match_field *fields;
894  uint32_t n_fields;
895  struct header *header; /* Only valid when n_fields > 0. */
896 
897  /* Action. */
898  struct action **actions;
899  struct action *default_action;
900  uint8_t *default_action_data;
901  uint32_t n_actions;
902  int default_action_is_const;
903  uint32_t action_data_size_max;
904  int *action_is_for_table_entries;
905  int *action_is_for_default_entry;
906 
907  struct hash_func *hf;
908  uint32_t size;
909  uint32_t id;
910 };
911 
912 TAILQ_HEAD(table_tailq, table);
913 
914 struct table_runtime {
916  void *mailbox;
917  uint8_t **key;
918 };
919 
920 struct table_statistics {
921  uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
922  uint64_t *n_pkts_action;
923 };
924 
925 /*
926  * Selector.
927  */
928 struct selector {
929  TAILQ_ENTRY(selector) node;
930  char name[RTE_SWX_NAME_SIZE];
931 
932  struct field *group_id_field;
933  struct field **selector_fields;
934  uint32_t n_selector_fields;
935  struct header *selector_header;
936  struct field *member_id_field;
937 
938  uint32_t n_groups_max;
939  uint32_t n_members_per_group_max;
940 
941  uint32_t id;
942 };
943 
944 TAILQ_HEAD(selector_tailq, selector);
945 
946 struct selector_runtime {
947  void *mailbox;
948  uint8_t **group_id_buffer;
949  uint8_t **selector_buffer;
950  uint8_t **member_id_buffer;
951 };
952 
953 struct selector_statistics {
954  uint64_t n_pkts;
955 };
956 
957 /*
958  * Learner table.
959  */
960 struct learner {
961  TAILQ_ENTRY(learner) node;
962  char name[RTE_SWX_NAME_SIZE];
963 
964  /* Match. */
965  struct field **fields;
966  uint32_t n_fields;
967  struct header *header;
968 
969  /* Action. */
970  struct action **actions;
971  struct action *default_action;
972  uint8_t *default_action_data;
973  uint32_t n_actions;
974  int default_action_is_const;
975  uint32_t action_data_size_max;
976  int *action_is_for_table_entries;
977  int *action_is_for_default_entry;
978 
979  struct hash_func *hf;
980  uint32_t size;
982  uint32_t n_timeouts;
983  uint32_t id;
984 };
985 
986 TAILQ_HEAD(learner_tailq, learner);
987 
988 struct learner_runtime {
989  void *mailbox;
990  uint8_t **key;
991 };
992 
993 struct learner_statistics {
994  uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */
995  uint64_t n_pkts_learn[2]; /* 0 = Learn OK, 1 = Learn error. */
996  uint64_t n_pkts_rearm;
997  uint64_t n_pkts_forget;
998  uint64_t *n_pkts_action;
999 };
1000 
1001 /*
1002  * Register array.
1003  */
1004 struct regarray {
1005  TAILQ_ENTRY(regarray) node;
1006  char name[RTE_SWX_NAME_SIZE];
1007  uint64_t init_val;
1008  uint32_t size;
1009  uint32_t id;
1010 };
1011 
1012 TAILQ_HEAD(regarray_tailq, regarray);
1013 
1014 struct regarray_runtime {
1015  uint64_t *regarray;
1016  uint32_t size_mask;
1017 };
1018 
1019 /*
1020  * Meter array.
1021  */
1022 struct meter_profile {
1023  TAILQ_ENTRY(meter_profile) node;
1024  char name[RTE_SWX_NAME_SIZE];
1025  struct rte_meter_trtcm_params params;
1026  struct rte_meter_trtcm_profile profile;
1027  uint32_t n_users;
1028 };
1029 
1030 TAILQ_HEAD(meter_profile_tailq, meter_profile);
1031 
1032 struct metarray {
1033  TAILQ_ENTRY(metarray) node;
1034  char name[RTE_SWX_NAME_SIZE];
1035  uint32_t size;
1036  uint32_t id;
1037 };
1038 
1039 TAILQ_HEAD(metarray_tailq, metarray);
1040 
1041 struct meter {
1042  struct rte_meter_trtcm m;
1043  struct meter_profile *profile;
1044  enum rte_color color_mask;
1045  uint8_t pad[20];
1046 
1047  uint64_t n_pkts[RTE_COLORS];
1048  uint64_t n_bytes[RTE_COLORS];
1049 };
1050 
1051 struct metarray_runtime {
1052  struct meter *metarray;
1053  uint32_t size_mask;
1054 };
1055 
1056 /*
1057  * Pipeline.
1058  */
1059 struct thread {
1060  /* Packet. */
1061  struct rte_swx_pkt pkt;
1062  uint8_t *ptr;
1063  uint32_t *mirroring_slots;
1064  uint64_t mirroring_slots_mask;
1065  int recirculate;
1066  uint32_t recirc_pass_id;
1067 
1068  /* Structures. */
1069  uint8_t **structs;
1070 
1071  /* Packet headers. */
1072  struct header_runtime *headers; /* Extracted or generated headers. */
1073  struct header_out_runtime *headers_out; /* Emitted headers. */
1074  uint8_t *header_storage;
1075  uint8_t *header_out_storage;
1076  uint64_t valid_headers;
1077  uint32_t n_headers_out;
1078 
1079  /* Packet meta-data. */
1080  uint8_t *metadata;
1081 
1082  /* Tables. */
1083  struct table_runtime *tables;
1084  struct selector_runtime *selectors;
1085  struct learner_runtime *learners;
1086  struct rte_swx_table_state *table_state;
1087  uint64_t action_id;
1088  size_t entry_id;
1089  int hit; /* 0 = Miss, 1 = Hit. */
1090  uint32_t learner_id;
1091  uint64_t time;
1092 
1093  /* Extern objects and functions. */
1094  struct extern_obj_runtime *extern_objs;
1095  struct extern_func_runtime *extern_funcs;
1096 
1097  /* Instructions. */
1098  struct instruction *ip;
1099  struct instruction *ret;
1100 };
1101 
1102 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
1103 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
1104 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
1105 
1106 #define HEADER_VALID(thread, header_id) \
1107  MASK64_BIT_GET((thread)->valid_headers, header_id)
1108 
1109 static inline uint64_t
1110 instr_operand_hbo(struct thread *t, const struct instr_operand *x)
1111 {
1112  uint8_t *x_struct = t->structs[x->struct_id];
1113  uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1114  uint64_t x64 = *x64_ptr;
1115  uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1116 
1117  return x64 & x64_mask;
1118 }
1119 
1120 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1121 
1122 static inline uint64_t
1123 instr_operand_nbo(struct thread *t, const struct instr_operand *x)
1124 {
1125  uint8_t *x_struct = t->structs[x->struct_id];
1126  uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1127  uint64_t x64 = *x64_ptr;
1128 
1129  return ntoh64(x64) >> (64 - x->n_bits);
1130 }
1131 
1132 #else
1133 
1134 #define instr_operand_nbo instr_operand_hbo
1135 
1136 #endif
1137 
1138 #define ALU(thread, ip, operator) \
1139 { \
1140  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1141  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1142  uint64_t dst64 = *dst64_ptr; \
1143  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1144  uint64_t dst = dst64 & dst64_mask; \
1145  \
1146  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1147  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1148  uint64_t src64 = *src64_ptr; \
1149  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1150  uint64_t src = src64 & src64_mask; \
1151  \
1152  uint64_t result = dst operator src; \
1153  \
1154  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1155 }
1156 
1157 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1158 
1159 #define ALU_MH(thread, ip, operator) \
1160 { \
1161  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1162  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1163  uint64_t dst64 = *dst64_ptr; \
1164  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1165  uint64_t dst = dst64 & dst64_mask; \
1166  \
1167  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1168  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1169  uint64_t src64 = *src64_ptr; \
1170  uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1171  \
1172  uint64_t result = dst operator src; \
1173  \
1174  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1175 }
1176 
1177 #define ALU_HM(thread, ip, operator) \
1178 { \
1179  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1180  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1181  uint64_t dst64 = *dst64_ptr; \
1182  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1183  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1184  \
1185  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1186  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1187  uint64_t src64 = *src64_ptr; \
1188  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1189  uint64_t src = src64 & src64_mask; \
1190  \
1191  uint64_t result = dst operator src; \
1192  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1193  \
1194  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1195 }
1196 
1197 #define ALU_HM_FAST(thread, ip, operator) \
1198 { \
1199  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1200  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1201  uint64_t dst64 = *dst64_ptr; \
1202  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1203  uint64_t dst = dst64 & dst64_mask; \
1204  \
1205  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1206  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1207  uint64_t src64 = *src64_ptr; \
1208  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1209  uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1210  \
1211  uint64_t result = dst operator src; \
1212  \
1213  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1214 }
1215 
1216 #define ALU_HH(thread, ip, operator) \
1217 { \
1218  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1219  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1220  uint64_t dst64 = *dst64_ptr; \
1221  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1222  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1223  \
1224  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1225  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1226  uint64_t src64 = *src64_ptr; \
1227  uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1228  \
1229  uint64_t result = dst operator src; \
1230  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1231  \
1232  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1233 }
1234 
1235 #define ALU_HH_FAST(thread, ip, operator) \
1236 { \
1237  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1238  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1239  uint64_t dst64 = *dst64_ptr; \
1240  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1241  uint64_t dst = dst64 & dst64_mask; \
1242  \
1243  uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1244  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1245  uint64_t src64 = *src64_ptr; \
1246  uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1247  \
1248  uint64_t result = dst operator src; \
1249  \
1250  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1251 }
1252 
1253 #else
1254 
1255 #define ALU_MH ALU
1256 #define ALU_HM ALU
1257 #define ALU_HM_FAST ALU
1258 #define ALU_HH ALU
1259 #define ALU_HH_FAST ALU
1260 
1261 #endif
1262 
1263 #define ALU_I(thread, ip, operator) \
1264 { \
1265  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1266  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1267  uint64_t dst64 = *dst64_ptr; \
1268  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1269  uint64_t dst = dst64 & dst64_mask; \
1270  \
1271  uint64_t src = (ip)->alu.src_val; \
1272  \
1273  uint64_t result = dst operator src; \
1274  \
1275  *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1276 }
1277 
1278 #define ALU_MI ALU_I
1279 
1280 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1281 
1282 #define ALU_HI(thread, ip, operator) \
1283 { \
1284  uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1285  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1286  uint64_t dst64 = *dst64_ptr; \
1287  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1288  uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1289  \
1290  uint64_t src = (ip)->alu.src_val; \
1291  \
1292  uint64_t result = dst operator src; \
1293  result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1294  \
1295  *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1296 }
1297 
1298 #else
1299 
1300 #define ALU_HI ALU_I
1301 
1302 #endif
1303 
1304 #define MOV(thread, ip) \
1305 { \
1306  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1307  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1308  uint64_t dst64 = *dst64_ptr; \
1309  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1310  \
1311  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1312  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1313  uint64_t src64 = *src64_ptr; \
1314  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1315  uint64_t src = src64 & src64_mask; \
1316  \
1317  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1318 }
1319 
1320 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1321 
1322 #define MOV_MH(thread, ip) \
1323 { \
1324  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1325  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1326  uint64_t dst64 = *dst64_ptr; \
1327  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1328  \
1329  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1330  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1331  uint64_t src64 = *src64_ptr; \
1332  uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1333  \
1334  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1335 }
1336 
1337 #define MOV_HM(thread, ip) \
1338 { \
1339  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1340  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1341  uint64_t dst64 = *dst64_ptr; \
1342  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1343  \
1344  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1345  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1346  uint64_t src64 = *src64_ptr; \
1347  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1348  uint64_t src = src64 & src64_mask; \
1349  \
1350  src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1351  *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1352 }
1353 
1354 #define MOV_HH(thread, ip) \
1355 { \
1356  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1357  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1358  uint64_t dst64 = *dst64_ptr; \
1359  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1360  \
1361  uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1362  uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1363  uint64_t src64 = *src64_ptr; \
1364  \
1365  uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1366  src = src >> (64 - (ip)->mov.dst.n_bits); \
1367  *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1368 }
1369 
1370 #else
1371 
1372 #define MOV_MH MOV
1373 #define MOV_HM MOV
1374 #define MOV_HH MOV
1375 
1376 #endif
1377 
1378 #define MOV_I(thread, ip) \
1379 { \
1380  uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1381  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1382  uint64_t dst64 = *dst64_ptr; \
1383  uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1384  \
1385  uint64_t src = (ip)->mov.src_val; \
1386  \
1387  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1388 }
1389 
1390 #define JMP_CMP(thread, ip, operator) \
1391 { \
1392  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1393  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1394  uint64_t a64 = *a64_ptr; \
1395  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1396  uint64_t a = a64 & a64_mask; \
1397  \
1398  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1399  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1400  uint64_t b64 = *b64_ptr; \
1401  uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1402  uint64_t b = b64 & b64_mask; \
1403  \
1404  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1405 }
1406 
1407 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1408 
1409 #define JMP_CMP_MH(thread, ip, operator) \
1410 { \
1411  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1412  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1413  uint64_t a64 = *a64_ptr; \
1414  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1415  uint64_t a = a64 & a64_mask; \
1416  \
1417  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1418  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1419  uint64_t b64 = *b64_ptr; \
1420  uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1421  \
1422  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1423 }
1424 
1425 #define JMP_CMP_HM(thread, ip, operator) \
1426 { \
1427  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1428  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1429  uint64_t a64 = *a64_ptr; \
1430  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1431  \
1432  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1433  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1434  uint64_t b64 = *b64_ptr; \
1435  uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1436  uint64_t b = b64 & b64_mask; \
1437  \
1438  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1439 }
1440 
1441 #define JMP_CMP_HH(thread, ip, operator) \
1442 { \
1443  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1444  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1445  uint64_t a64 = *a64_ptr; \
1446  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1447  \
1448  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1449  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1450  uint64_t b64 = *b64_ptr; \
1451  uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1452  \
1453  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1454 }
1455 
1456 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1457 { \
1458  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1459  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1460  uint64_t a64 = *a64_ptr; \
1461  uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1462  \
1463  uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1464  uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1465  uint64_t b64 = *b64_ptr; \
1466  uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1467  \
1468  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1469 }
1470 
1471 #else
1472 
1473 #define JMP_CMP_MH JMP_CMP
1474 #define JMP_CMP_HM JMP_CMP
1475 #define JMP_CMP_HH JMP_CMP
1476 #define JMP_CMP_HH_FAST JMP_CMP
1477 
1478 #endif
1479 
1480 #define JMP_CMP_I(thread, ip, operator) \
1481 { \
1482  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1483  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1484  uint64_t a64 = *a64_ptr; \
1485  uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1486  uint64_t a = a64 & a64_mask; \
1487  \
1488  uint64_t b = (ip)->jmp.b_val; \
1489  \
1490  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1491 }
1492 
1493 #define JMP_CMP_MI JMP_CMP_I
1494 
1495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1496 
1497 #define JMP_CMP_HI(thread, ip, operator) \
1498 { \
1499  uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1500  uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1501  uint64_t a64 = *a64_ptr; \
1502  uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1503  \
1504  uint64_t b = (ip)->jmp.b_val; \
1505  \
1506  (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1507 }
1508 
1509 #else
1510 
1511 #define JMP_CMP_HI JMP_CMP_I
1512 
1513 #endif
1514 
1515 #define METADATA_READ(thread, offset, n_bits) \
1516 __extension__ ({ \
1517  uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1518  uint64_t m64 = *m64_ptr; \
1519  uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1520  (m64 & m64_mask); \
1521 })
1522 
1523 #define METADATA_WRITE(thread, offset, n_bits, value) \
1524 { \
1525  uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1526  uint64_t m64 = *m64_ptr; \
1527  uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1528  \
1529  uint64_t m_new = value; \
1530  \
1531  *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1532 }
1533 
1534 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1535 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1536 #endif
1537 
1538 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX
1539 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1024
1540 #endif
1541 
1542 struct rte_swx_pipeline {
1543  char name[RTE_SWX_NAME_SIZE];
1544 
1545  struct struct_type_tailq struct_types;
1546  struct port_in_type_tailq port_in_types;
1547  struct port_in_tailq ports_in;
1548  struct port_out_type_tailq port_out_types;
1549  struct port_out_tailq ports_out;
1550  struct extern_type_tailq extern_types;
1551  struct extern_obj_tailq extern_objs;
1552  struct extern_func_tailq extern_funcs;
1553  struct hash_func_tailq hash_funcs;
1554  struct rss_tailq rss;
1555  struct header_tailq headers;
1556  struct struct_type *metadata_st;
1557  uint32_t metadata_struct_id;
1558  struct action_tailq actions;
1559  struct table_type_tailq table_types;
1560  struct table_tailq tables;
1561  struct selector_tailq selectors;
1562  struct learner_tailq learners;
1563  struct regarray_tailq regarrays;
1564  struct meter_profile_tailq meter_profiles;
1565  struct metarray_tailq metarrays;
1566 
1567  struct port_in_runtime *in;
1568  struct port_out_runtime *out;
1569  struct mirroring_session *mirroring_sessions;
1570  struct instruction **action_instructions;
1571  action_func_t *action_funcs;
1572  struct rte_swx_table_state *table_state;
1573  struct table_statistics *table_stats;
1574  struct selector_statistics *selector_stats;
1575  struct learner_statistics *learner_stats;
1576  struct hash_func_runtime *hash_func_runtime;
1577  struct rss_runtime **rss_runtime;
1578  struct regarray_runtime *regarray_runtime;
1579  struct metarray_runtime *metarray_runtime;
1580  struct instruction *instructions;
1581  struct instruction_data *instruction_data;
1582  instr_exec_t *instruction_table;
1583  struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1584  void *lib;
1585 
1586  uint32_t n_structs;
1587  uint32_t n_ports_in;
1588  uint32_t n_ports_out;
1589  uint32_t n_mirroring_slots;
1590  uint32_t n_mirroring_sessions;
1591  uint32_t n_extern_objs;
1592  uint32_t n_extern_funcs;
1593  uint32_t n_hash_funcs;
1594  uint32_t n_rss;
1595  uint32_t n_actions;
1596  uint32_t n_tables;
1597  uint32_t n_selectors;
1598  uint32_t n_learners;
1599  uint32_t n_regarrays;
1600  uint32_t n_metarrays;
1601  uint32_t n_headers;
1602  uint32_t thread_id;
1603  uint32_t port_id;
1604  uint32_t n_instructions;
1605  int build_done;
1606  int numa_node;
1607 };
1608 
1609 /*
1610  * Instruction.
1611  */
1612 static inline void
1613 pipeline_port_inc(struct rte_swx_pipeline *p)
1614 {
1615  uint32_t port_id = p->port_id;
1616 
1617  port_id++;
1618  if (port_id == p->n_ports_in)
1619  port_id = 0;
1620 
1621  p->port_id = port_id;
1622 }
1623 
1624 static inline void
1625 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
1626 {
1627  t->ip = p->instructions;
1628 }
1629 
1630 static inline void
1631 thread_ip_set(struct thread *t, struct instruction *ip)
1632 {
1633  t->ip = ip;
1634 }
1635 
1636 static inline void
1637 thread_ip_action_call(struct rte_swx_pipeline *p,
1638  struct thread *t,
1639  uint32_t action_id)
1640 {
1641  t->ret = t->ip + 1;
1642  t->ip = p->action_instructions[action_id];
1643 }
1644 
1645 static inline void
1646 thread_ip_inc(struct rte_swx_pipeline *p);
1647 
1648 static inline void
1649 thread_ip_inc(struct rte_swx_pipeline *p)
1650 {
1651  struct thread *t = &p->threads[p->thread_id];
1652 
1653  t->ip++;
1654 }
1655 
1656 static inline void
1657 thread_ip_inc_cond(struct thread *t, int cond)
1658 {
1659  t->ip += cond;
1660 }
1661 
1662 static inline void
1663 thread_yield(struct rte_swx_pipeline *p)
1664 {
1665  p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1666 }
1667 
1668 static inline void
1669 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
1670 {
1671  p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1672 }
1673 
1674 /*
1675  * rx.
1676  */
1677 static inline int
1678 __instr_rx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1679 {
1680  struct port_in_runtime *port = &p->in[p->port_id];
1681  struct rte_swx_pkt *pkt = &t->pkt;
1682  int pkt_received;
1683 
1684  /* Recirculation: keep the current packet. */
1685  if (t->recirculate) {
1686  TRACE("[Thread %2u] rx - recirculate (pass %u)\n",
1687  p->thread_id,
1688  t->recirc_pass_id + 1);
1689 
1690  /* Packet. */
1691  t->ptr = &pkt->pkt[pkt->offset];
1692  t->mirroring_slots_mask = 0;
1693  t->recirculate = 0;
1694  t->recirc_pass_id++;
1695 
1696  /* Headers. */
1697  t->valid_headers = 0;
1698  t->n_headers_out = 0;
1699 
1700  /* Tables. */
1701  t->table_state = p->table_state;
1702 
1703  return 1;
1704  }
1705 
1706  /* Packet. */
1707  pkt_received = port->pkt_rx(port->obj, pkt);
1708  t->ptr = &pkt->pkt[pkt->offset];
1709  rte_prefetch0(t->ptr);
1710 
1711  TRACE("[Thread %2u] rx %s from port %u\n",
1712  p->thread_id,
1713  pkt_received ? "1 pkt" : "0 pkts",
1714  p->port_id);
1715 
1716  t->mirroring_slots_mask = 0;
1717  t->recirc_pass_id = 0;
1718 
1719  /* Headers. */
1720  t->valid_headers = 0;
1721  t->n_headers_out = 0;
1722 
1723  /* Meta-data. */
1724  METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1725 
1726  /* Tables. */
1727  t->table_state = p->table_state;
1728 
1729  /* Thread. */
1730  pipeline_port_inc(p);
1731 
1732  return pkt_received;
1733 }
1734 
1735 static inline void
1736 instr_rx_exec(struct rte_swx_pipeline *p)
1737 {
1738  struct thread *t = &p->threads[p->thread_id];
1739  struct instruction *ip = t->ip;
1740  int pkt_received;
1741 
1742  /* Packet. */
1743  pkt_received = __instr_rx_exec(p, t, ip);
1744 
1745  /* Thread. */
1746  thread_ip_inc_cond(t, pkt_received);
1747  thread_yield(p);
1748 }
1749 
1750 /*
1751  * tx.
1752  */
1753 static inline void
1754 emit_handler(struct thread *t)
1755 {
1756  struct header_out_runtime *h0 = &t->headers_out[0];
1757  struct header_out_runtime *h1 = &t->headers_out[1];
1758  uint32_t offset = 0, i;
1759 
1760  /* No header change or header decapsulation. */
1761  if ((t->n_headers_out == 1) &&
1762  (h0->ptr + h0->n_bytes == t->ptr)) {
1763  TRACE("Emit handler: no header change or header decap.\n");
1764 
1765  t->pkt.offset -= h0->n_bytes;
1766  t->pkt.length += h0->n_bytes;
1767 
1768  return;
1769  }
1770 
1771  /* Header encapsulation (optionally, with prior header decapsulation). */
1772  if ((t->n_headers_out == 2) &&
1773  (h1->ptr + h1->n_bytes == t->ptr) &&
1774  (h0->ptr == h0->ptr0)) {
1775  uint32_t offset;
1776 
1777  TRACE("Emit handler: header encapsulation.\n");
1778 
1779  offset = h0->n_bytes + h1->n_bytes;
1780  memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1781  t->pkt.offset -= offset;
1782  t->pkt.length += offset;
1783 
1784  return;
1785  }
1786 
1787  /* For any other case. */
1788  TRACE("Emit handler: complex case.\n");
1789 
1790  for (i = 0; i < t->n_headers_out; i++) {
1791  struct header_out_runtime *h = &t->headers_out[i];
1792 
1793  memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1794  offset += h->n_bytes;
1795  }
1796 
1797  if (offset) {
1798  memcpy(t->ptr - offset, t->header_out_storage, offset);
1799  t->pkt.offset -= offset;
1800  t->pkt.length += offset;
1801  }
1802 }
1803 
1804 static inline void
1805 mirroring_handler(struct rte_swx_pipeline *p, struct thread *t, struct rte_swx_pkt *pkt)
1806 {
1807  uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1808  uint32_t slot_id;
1809 
1810  for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1811  if (slot_mask & slots_mask) {
1812  struct port_out_runtime *port;
1813  struct mirroring_session *session;
1814  uint32_t port_id, session_id;
1815 
1816  session_id = t->mirroring_slots[slot_id];
1817  session = &p->mirroring_sessions[session_id];
1818 
1819  port_id = session->port_id;
1820  port = &p->out[port_id];
1821 
1822  if (session->fast_clone)
1823  port->pkt_fast_clone_tx(port->obj, pkt);
1824  else
1825  port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1826 
1827  slots_mask &= ~slot_mask;
1828  }
1829 }
1830 
1831 static inline void
1832 __instr_tx_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1833 {
1834  struct rte_swx_pkt *pkt = &t->pkt;
1835  struct port_out_runtime *port;
1836  uint64_t port_id;
1837 
1838  /* Recirculation: keep the current packet. */
1839  if (t->recirculate) {
1840  TRACE("[Thread %2u]: tx 1 pkt - recirculate\n",
1841  p->thread_id);
1842 
1843  /* Headers. */
1844  emit_handler(t);
1845 
1846  /* Packet. */
1847  mirroring_handler(p, t, pkt);
1848 
1849  return;
1850  }
1851 
1852  /* If the output port ID is invalid, then set it to the drop output port that has been set
1853  * up internally by the pipeline for this purpose.
1854  */
1855  port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1856  if (port_id >= p->n_ports_out)
1857  port_id = p->n_ports_out - 1;
1858 
1859  port = &p->out[port_id];
1860 
1861  TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
1862  p->thread_id,
1863  (uint32_t)port_id);
1864 
1865  /* Headers. */
1866  emit_handler(t);
1867 
1868  /* Packet. */
1869  mirroring_handler(p, t, pkt);
1870  port->pkt_tx(port->obj, pkt);
1871 }
1872 
1873 static inline void
1874 __instr_tx_i_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
1875 {
1876  struct rte_swx_pkt *pkt = &t->pkt;
1877  struct port_out_runtime *port;
1878  uint64_t port_id;
1879 
1880  /* Recirculation: keep the current packet. */
1881  if (t->recirculate) {
1882  TRACE("[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1883  p->thread_id);
1884 
1885  /* Headers. */
1886  emit_handler(t);
1887 
1888  /* Packet. */
1889  mirroring_handler(p, t, pkt);
1890 
1891  return;
1892  }
1893 
1894  /* If the output port ID is invalid, then set it to the drop output port that has been set
1895  * up internally by the pipeline for this purpose.
1896  *
1897  * This test cannot be done earlier at instruction translation time, even though the output
1898  * port ID is an immediate value, as the number of output ports is only known later at the
1899  * pipeline build time.
1900  */
1901  port_id = ip->io.io.val;
1902  if (port_id >= p->n_ports_out)
1903  port_id = p->n_ports_out - 1;
1904 
1905  port = &p->out[port_id];
1906 
1907  TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n",
1908  p->thread_id,
1909  (uint32_t)port_id);
1910 
1911  /* Headers. */
1912  emit_handler(t);
1913 
1914  /* Packet. */
1915  mirroring_handler(p, t, pkt);
1916  port->pkt_tx(port->obj, pkt);
1917 }
1918 
1919 static inline void
1920 __instr_drop_exec(struct rte_swx_pipeline *p,
1921  struct thread *t,
1922  const struct instruction *ip __rte_unused)
1923 {
1924  uint64_t port_id = p->n_ports_out - 1;
1925  struct port_out_runtime *port = &p->out[port_id];
1926  struct rte_swx_pkt *pkt = &t->pkt;
1927 
1928  TRACE("[Thread %2u]: drop 1 pkt\n",
1929  p->thread_id);
1930 
1931  /* Headers. */
1932  emit_handler(t);
1933 
1934  /* Packet. */
1935  mirroring_handler(p, t, pkt);
1936  port->pkt_tx(port->obj, pkt);
1937 }
1938 
1939 static inline void
1940 __instr_mirror_exec(struct rte_swx_pipeline *p,
1941  struct thread *t,
1942  const struct instruction *ip)
1943 {
1944  uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1945  uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1946 
1947  slot_id &= p->n_mirroring_slots - 1;
1948  session_id &= p->n_mirroring_sessions - 1;
1949 
1950  TRACE("[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1951  p->thread_id,
1952  (uint32_t)slot_id,
1953  (uint32_t)session_id);
1954 
1955  t->mirroring_slots[slot_id] = session_id;
1956  t->mirroring_slots_mask |= 1LLU << slot_id;
1957 }
1958 
1959 static inline void
1960 __instr_recirculate_exec(struct rte_swx_pipeline *p __rte_unused,
1961  struct thread *t,
1962  const struct instruction *ip __rte_unused)
1963 {
1964  TRACE("[Thread %2u]: recirculate\n",
1965  p->thread_id);
1966 
1967  t->recirculate = 1;
1968 }
1969 
1970 static inline void
1971 __instr_recircid_exec(struct rte_swx_pipeline *p __rte_unused,
1972  struct thread *t,
1973  const struct instruction *ip)
1974 {
1975  TRACE("[Thread %2u]: recircid (pass %u)\n",
1976  p->thread_id,
1977  t->recirc_pass_id);
1978 
1979  /* Meta-data. */
1980  METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1981 }
1982 
1983 /*
1984  * extract.
1985  */
1986 static inline void
1987 __instr_hdr_extract_many_exec(struct rte_swx_pipeline *p __rte_unused,
1988  struct thread *t,
1989  const struct instruction *ip,
1990  uint32_t n_extract)
1991 {
1992  uint64_t valid_headers = t->valid_headers;
1993  uint8_t *ptr = t->ptr;
1994  uint32_t offset = t->pkt.offset;
1995  uint32_t length = t->pkt.length;
1996  uint32_t i;
1997 
1998  for (i = 0; i < n_extract; i++) {
1999  uint32_t header_id = ip->io.hdr.header_id[i];
2000  uint32_t struct_id = ip->io.hdr.struct_id[i];
2001  uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2002 
2003  TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2004  p->thread_id,
2005  header_id,
2006  n_bytes);
2007 
2008  /* Headers. */
2009  t->structs[struct_id] = ptr;
2010  valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2011 
2012  /* Packet. */
2013  offset += n_bytes;
2014  length -= n_bytes;
2015  ptr += n_bytes;
2016  }
2017 
2018  /* Headers. */
2019  t->valid_headers = valid_headers;
2020 
2021  /* Packet. */
2022  t->pkt.offset = offset;
2023  t->pkt.length = length;
2024  t->ptr = ptr;
2025 }
2026 
2027 static inline void
2028 __instr_hdr_extract_exec(struct rte_swx_pipeline *p,
2029  struct thread *t,
2030  const struct instruction *ip)
2031 {
2032  __instr_hdr_extract_many_exec(p, t, ip, 1);
2033 }
2034 
2035 static inline void
2036 __instr_hdr_extract2_exec(struct rte_swx_pipeline *p,
2037  struct thread *t,
2038  const struct instruction *ip)
2039 {
2040  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2041 
2042  __instr_hdr_extract_many_exec(p, t, ip, 2);
2043 }
2044 
2045 static inline void
2046 __instr_hdr_extract3_exec(struct rte_swx_pipeline *p,
2047  struct thread *t,
2048  const struct instruction *ip)
2049 {
2050  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2051 
2052  __instr_hdr_extract_many_exec(p, t, ip, 3);
2053 }
2054 
2055 static inline void
2056 __instr_hdr_extract4_exec(struct rte_swx_pipeline *p,
2057  struct thread *t,
2058  const struct instruction *ip)
2059 {
2060  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2061 
2062  __instr_hdr_extract_many_exec(p, t, ip, 4);
2063 }
2064 
2065 static inline void
2066 __instr_hdr_extract5_exec(struct rte_swx_pipeline *p,
2067  struct thread *t,
2068  const struct instruction *ip)
2069 {
2070  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2071 
2072  __instr_hdr_extract_many_exec(p, t, ip, 5);
2073 }
2074 
2075 static inline void
2076 __instr_hdr_extract6_exec(struct rte_swx_pipeline *p,
2077  struct thread *t,
2078  const struct instruction *ip)
2079 {
2080  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2081 
2082  __instr_hdr_extract_many_exec(p, t, ip, 6);
2083 }
2084 
2085 static inline void
2086 __instr_hdr_extract7_exec(struct rte_swx_pipeline *p,
2087  struct thread *t,
2088  const struct instruction *ip)
2089 {
2090  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2091 
2092  __instr_hdr_extract_many_exec(p, t, ip, 7);
2093 }
2094 
2095 static inline void
2096 __instr_hdr_extract8_exec(struct rte_swx_pipeline *p,
2097  struct thread *t,
2098  const struct instruction *ip)
2099 {
2100  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2101 
2102  __instr_hdr_extract_many_exec(p, t, ip, 8);
2103 }
2104 
2105 static inline void
2106 __instr_hdr_extract_m_exec(struct rte_swx_pipeline *p __rte_unused,
2107  struct thread *t,
2108  const struct instruction *ip)
2109 {
2110  uint64_t valid_headers = t->valid_headers;
2111  uint8_t *ptr = t->ptr;
2112  uint32_t offset = t->pkt.offset;
2113  uint32_t length = t->pkt.length;
2114 
2115  uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2116  uint32_t header_id = ip->io.hdr.header_id[0];
2117  uint32_t struct_id = ip->io.hdr.struct_id[0];
2118  uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2119 
2120  struct header_runtime *h = &t->headers[header_id];
2121 
2122  TRACE("[Thread %2u]: extract header %u (%u + %u bytes)\n",
2123  p->thread_id,
2124  header_id,
2125  n_bytes,
2126  n_bytes_last);
2127 
2128  n_bytes += n_bytes_last;
2129 
2130  /* Headers. */
2131  t->structs[struct_id] = ptr;
2132  t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2133  h->n_bytes = n_bytes;
2134 
2135  /* Packet. */
2136  t->pkt.offset = offset + n_bytes;
2137  t->pkt.length = length - n_bytes;
2138  t->ptr = ptr + n_bytes;
2139 }
2140 
2141 static inline void
2142 __instr_hdr_lookahead_exec(struct rte_swx_pipeline *p __rte_unused,
2143  struct thread *t,
2144  const struct instruction *ip)
2145 {
2146  uint64_t valid_headers = t->valid_headers;
2147  uint8_t *ptr = t->ptr;
2148 
2149  uint32_t header_id = ip->io.hdr.header_id[0];
2150  uint32_t struct_id = ip->io.hdr.struct_id[0];
2151 
2152  TRACE("[Thread %2u]: lookahead header %u\n",
2153  p->thread_id,
2154  header_id);
2155 
2156  /* Headers. */
2157  t->structs[struct_id] = ptr;
2158  t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2159 }
2160 
2161 /*
2162  * emit.
2163  */
2164 static inline void
2165 __instr_hdr_emit_many_exec(struct rte_swx_pipeline *p __rte_unused,
2166  struct thread *t,
2167  const struct instruction *ip,
2168  uint32_t n_emit)
2169 {
2170  uint64_t valid_headers = t->valid_headers;
2171  uint32_t n_headers_out = t->n_headers_out;
2172  struct header_out_runtime *ho = NULL;
2173  uint8_t *ho_ptr = NULL;
2174  uint32_t ho_nbytes = 0, i;
2175 
2176  for (i = 0; i < n_emit; i++) {
2177  uint32_t header_id = ip->io.hdr.header_id[i];
2178  uint32_t struct_id = ip->io.hdr.struct_id[i];
2179 
2180  struct header_runtime *hi = &t->headers[header_id];
2181  uint8_t *hi_ptr0 = hi->ptr0;
2182  uint32_t n_bytes = hi->n_bytes;
2183 
2184  uint8_t *hi_ptr = t->structs[struct_id];
2185 
2186  if (!MASK64_BIT_GET(valid_headers, header_id)) {
2187  TRACE("[Thread %2u]: emit header %u (invalid)\n",
2188  p->thread_id,
2189  header_id);
2190 
2191  continue;
2192  }
2193 
2194  TRACE("[Thread %2u]: emit header %u (valid)\n",
2195  p->thread_id,
2196  header_id);
2197 
2198  /* Headers. */
2199  if (!ho) {
2200  if (!n_headers_out) {
2201  ho = &t->headers_out[0];
2202 
2203  ho->ptr0 = hi_ptr0;
2204  ho->ptr = hi_ptr;
2205 
2206  ho_ptr = hi_ptr;
2207  ho_nbytes = n_bytes;
2208 
2209  n_headers_out = 1;
2210 
2211  continue;
2212  } else {
2213  ho = &t->headers_out[n_headers_out - 1];
2214 
2215  ho_ptr = ho->ptr;
2216  ho_nbytes = ho->n_bytes;
2217  }
2218  }
2219 
2220  if (ho_ptr + ho_nbytes == hi_ptr) {
2221  ho_nbytes += n_bytes;
2222  } else {
2223  ho->n_bytes = ho_nbytes;
2224 
2225  ho++;
2226  ho->ptr0 = hi_ptr0;
2227  ho->ptr = hi_ptr;
2228 
2229  ho_ptr = hi_ptr;
2230  ho_nbytes = n_bytes;
2231 
2232  n_headers_out++;
2233  }
2234  }
2235 
2236  if (ho)
2237  ho->n_bytes = ho_nbytes;
2238  t->n_headers_out = n_headers_out;
2239 }
2240 
2241 static inline void
2242 __instr_hdr_emit_exec(struct rte_swx_pipeline *p,
2243  struct thread *t,
2244  const struct instruction *ip)
2245 {
2246  __instr_hdr_emit_many_exec(p, t, ip, 1);
2247 }
2248 
2249 static inline void
2250 __instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p,
2251  struct thread *t,
2252  const struct instruction *ip)
2253 {
2254  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2255 
2256  __instr_hdr_emit_many_exec(p, t, ip, 1);
2257  __instr_tx_exec(p, t, ip);
2258 }
2259 
2260 static inline void
2261 __instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p,
2262  struct thread *t,
2263  const struct instruction *ip)
2264 {
2265  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2266 
2267  __instr_hdr_emit_many_exec(p, t, ip, 2);
2268  __instr_tx_exec(p, t, ip);
2269 }
2270 
2271 static inline void
2272 __instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p,
2273  struct thread *t,
2274  const struct instruction *ip)
2275 {
2276  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2277 
2278  __instr_hdr_emit_many_exec(p, t, ip, 3);
2279  __instr_tx_exec(p, t, ip);
2280 }
2281 
2282 static inline void
2283 __instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p,
2284  struct thread *t,
2285  const struct instruction *ip)
2286 {
2287  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2288 
2289  __instr_hdr_emit_many_exec(p, t, ip, 4);
2290  __instr_tx_exec(p, t, ip);
2291 }
2292 
2293 static inline void
2294 __instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p,
2295  struct thread *t,
2296  const struct instruction *ip)
2297 {
2298  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2299 
2300  __instr_hdr_emit_many_exec(p, t, ip, 5);
2301  __instr_tx_exec(p, t, ip);
2302 }
2303 
2304 static inline void
2305 __instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p,
2306  struct thread *t,
2307  const struct instruction *ip)
2308 {
2309  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2310 
2311  __instr_hdr_emit_many_exec(p, t, ip, 6);
2312  __instr_tx_exec(p, t, ip);
2313 }
2314 
2315 static inline void
2316 __instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p,
2317  struct thread *t,
2318  const struct instruction *ip)
2319 {
2320  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2321 
2322  __instr_hdr_emit_many_exec(p, t, ip, 7);
2323  __instr_tx_exec(p, t, ip);
2324 }
2325 
2326 static inline void
2327 __instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p,
2328  struct thread *t,
2329  const struct instruction *ip)
2330 {
2331  TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2332 
2333  __instr_hdr_emit_many_exec(p, t, ip, 8);
2334  __instr_tx_exec(p, t, ip);
2335 }
2336 
2337 /*
2338  * validate.
2339  */
2340 static inline void
2341 __instr_hdr_validate_exec(struct rte_swx_pipeline *p __rte_unused,
2342  struct thread *t,
2343  const struct instruction *ip)
2344 {
2345  uint32_t header_id = ip->valid.header_id;
2346  uint32_t struct_id = ip->valid.struct_id;
2347  uint64_t valid_headers = t->valid_headers;
2348  struct header_runtime *h = &t->headers[header_id];
2349 
2350  TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2351 
2352  /* If this header is already valid, then its associated t->structs[] element is also valid
2353  * and therefore it should not be modified. It could point to the packet buffer (in case of
2354  * extracted header) and setting it to the default location (h->ptr0) would be incorrect.
2355  */
2356  if (MASK64_BIT_GET(valid_headers, header_id))
2357  return;
2358 
2359  /* Headers. */
2360  t->structs[struct_id] = h->ptr0;
2361  t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2362 }
2363 
2364 /*
2365  * invalidate.
2366  */
2367 static inline void
2368 __instr_hdr_invalidate_exec(struct rte_swx_pipeline *p __rte_unused,
2369  struct thread *t,
2370  const struct instruction *ip)
2371 {
2372  uint32_t header_id = ip->valid.header_id;
2373 
2374  TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2375 
2376  /* Headers. */
2377  t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2378 }
2379 
2380 /*
2381  * learn.
2382  */
2383 static inline void
2384 __instr_learn_exec(struct rte_swx_pipeline *p,
2385  struct thread *t,
2386  const struct instruction *ip)
2387 {
2388  uint64_t action_id = ip->learn.action_id;
2389  uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2390  uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2391  ip->learn.mf_timeout_id_n_bits);
2392  uint32_t learner_id = t->learner_id;
2393  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2394  p->n_selectors + learner_id];
2395  struct learner_runtime *l = &t->learners[learner_id];
2396  struct learner_statistics *stats = &p->learner_stats[learner_id];
2397  uint32_t status;
2398 
2399  /* Table. */
2400  status = rte_swx_table_learner_add(ts->obj,
2401  l->mailbox,
2402  t->time,
2403  action_id,
2404  &t->metadata[mf_first_arg_offset],
2405  timeout_id);
2406 
2407  TRACE("[Thread %2u] learner %u learn %s\n",
2408  p->thread_id,
2409  learner_id,
2410  status ? "ok" : "error");
2411 
2412  stats->n_pkts_learn[status] += 1;
2413 }
2414 
2415 /*
2416  * rearm.
2417  */
2418 static inline void
2419 __instr_rearm_exec(struct rte_swx_pipeline *p,
2420  struct thread *t,
2421  const struct instruction *ip __rte_unused)
2422 {
2423  uint32_t learner_id = t->learner_id;
2424  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2425  p->n_selectors + learner_id];
2426  struct learner_runtime *l = &t->learners[learner_id];
2427  struct learner_statistics *stats = &p->learner_stats[learner_id];
2428 
2429  /* Table. */
2430  rte_swx_table_learner_rearm(ts->obj, l->mailbox, t->time);
2431 
2432  TRACE("[Thread %2u] learner %u rearm\n",
2433  p->thread_id,
2434  learner_id);
2435 
2436  stats->n_pkts_rearm += 1;
2437 }
2438 
2439 static inline void
2440 __instr_rearm_new_exec(struct rte_swx_pipeline *p,
2441  struct thread *t,
2442  const struct instruction *ip)
2443 {
2444  uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2445  ip->learn.mf_timeout_id_n_bits);
2446  uint32_t learner_id = t->learner_id;
2447  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2448  p->n_selectors + learner_id];
2449  struct learner_runtime *l = &t->learners[learner_id];
2450  struct learner_statistics *stats = &p->learner_stats[learner_id];
2451 
2452  /* Table. */
2453  rte_swx_table_learner_rearm_new(ts->obj, l->mailbox, t->time, timeout_id);
2454 
2455  TRACE("[Thread %2u] learner %u rearm with timeout ID %u\n",
2456  p->thread_id,
2457  learner_id,
2458  timeout_id);
2459 
2460  stats->n_pkts_rearm += 1;
2461 }
2462 
2463 /*
2464  * forget.
2465  */
2466 static inline void
2467 __instr_forget_exec(struct rte_swx_pipeline *p,
2468  struct thread *t,
2469  const struct instruction *ip __rte_unused)
2470 {
2471  uint32_t learner_id = t->learner_id;
2472  struct rte_swx_table_state *ts = &t->table_state[p->n_tables +
2473  p->n_selectors + learner_id];
2474  struct learner_runtime *l = &t->learners[learner_id];
2475  struct learner_statistics *stats = &p->learner_stats[learner_id];
2476 
2477  /* Table. */
2478  rte_swx_table_learner_delete(ts->obj, l->mailbox);
2479 
2480  TRACE("[Thread %2u] learner %u forget\n",
2481  p->thread_id,
2482  learner_id);
2483 
2484  stats->n_pkts_forget += 1;
2485 }
2486 
2487 /*
2488  * entryid.
2489  */
2490 static inline void
2491 __instr_entryid_exec(struct rte_swx_pipeline *p __rte_unused,
2492  struct thread *t,
2493  const struct instruction *ip)
2494 {
2495  TRACE("[Thread %2u]: entryid\n",
2496  p->thread_id);
2497 
2498  /* Meta-data. */
2499  METADATA_WRITE(t, ip->mov.dst.offset, ip->mov.dst.n_bits, t->entry_id);
2500 }
2501 
2502 /*
2503  * extern.
2504  */
2505 static inline uint32_t
2506 __instr_extern_obj_exec(struct rte_swx_pipeline *p __rte_unused,
2507  struct thread *t,
2508  const struct instruction *ip)
2509 {
2510  uint32_t obj_id = ip->ext_obj.ext_obj_id;
2511  uint32_t func_id = ip->ext_obj.func_id;
2512  struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2513  rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
2514  uint32_t done;
2515 
2516  TRACE("[Thread %2u] extern obj %u member func %u\n",
2517  p->thread_id,
2518  obj_id,
2519  func_id);
2520 
2521  done = func(obj->obj, obj->mailbox);
2522 
2523  return done;
2524 }
2525 
2526 static inline uint32_t
2527 __instr_extern_func_exec(struct rte_swx_pipeline *p __rte_unused,
2528  struct thread *t,
2529  const struct instruction *ip)
2530 {
2531  uint32_t ext_func_id = ip->ext_func.ext_func_id;
2532  struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2533  rte_swx_extern_func_t func = ext_func->func;
2534  uint32_t done;
2535 
2536  TRACE("[Thread %2u] extern func %u\n",
2537  p->thread_id,
2538  ext_func_id);
2539 
2540  done = func(ext_func->mailbox);
2541 
2542  return done;
2543 }
2544 
2545 /*
2546  * hash.
2547  */
2548 static inline void
2549 __instr_hash_func_exec(struct rte_swx_pipeline *p,
2550  struct thread *t,
2551  const struct instruction *ip)
2552 {
2553  uint32_t hash_func_id = ip->hash_func.hash_func_id;
2554  uint32_t dst_offset = ip->hash_func.dst.offset;
2555  uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2556  uint32_t src_struct_id = ip->hash_func.src.struct_id;
2557  uint32_t src_offset = ip->hash_func.src.offset;
2558  uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2559 
2560  struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2561  uint8_t *src_ptr = t->structs[src_struct_id];
2562  uint32_t result;
2563 
2564  TRACE("[Thread %2u] hash %u\n",
2565  p->thread_id,
2566  hash_func_id);
2567 
2568  result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2569  METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2570 }
2571 
2572 /*
2573  * rss.
2574  */
2575 static inline uint32_t
2576 rss_func(void *rss_key, uint32_t rss_key_size, void *input_data, uint32_t input_data_size)
2577 {
2578  uint32_t *key = (uint32_t *)rss_key;
2579  uint32_t *data = (uint32_t *)input_data;
2580  uint32_t key_size = rss_key_size >> 2;
2581  uint32_t data_size = input_data_size >> 2;
2582  uint32_t hash_val = 0, i;
2583 
2584  for (i = 0; i < data_size; i++) {
2585  uint32_t d;
2586 
2587  for (d = data[i]; d; d &= (d - 1)) {
2588  uint32_t key0, key1, pos;
2589 
2590  pos = rte_bsf32(d);
2591  key0 = key[i % key_size] << (31 - pos);
2592  key1 = key[(i + 1) % key_size] >> (pos + 1);
2593  hash_val ^= key0 | key1;
2594  }
2595  }
2596 
2597  return hash_val;
2598 }
2599 
2600 static inline void
2601 __instr_rss_exec(struct rte_swx_pipeline *p,
2602  struct thread *t,
2603  const struct instruction *ip)
2604 {
2605  uint32_t rss_obj_id = ip->rss.rss_obj_id;
2606  uint32_t dst_offset = ip->rss.dst.offset;
2607  uint32_t n_dst_bits = ip->rss.dst.n_bits;
2608  uint32_t src_struct_id = ip->rss.src.struct_id;
2609  uint32_t src_offset = ip->rss.src.offset;
2610  uint32_t n_src_bytes = ip->rss.src.n_bytes;
2611 
2612  struct rss_runtime *r = p->rss_runtime[rss_obj_id];
2613  uint8_t *src_ptr = t->structs[src_struct_id];
2614  uint32_t result;
2615 
2616  TRACE("[Thread %2u] rss %u\n",
2617  p->thread_id,
2618  rss_obj_id);
2619 
2620  result = rss_func(r->key, r->key_size, &src_ptr[src_offset], n_src_bytes);
2621  METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2622 }
2623 
2624 /*
2625  * mov.
2626  */
2627 static inline void
2628 __instr_mov_exec(struct rte_swx_pipeline *p __rte_unused,
2629  struct thread *t,
2630  const struct instruction *ip)
2631 {
2632  TRACE("[Thread %2u] mov\n", p->thread_id);
2633 
2634  MOV(t, ip);
2635 }
2636 
2637 static inline void
2638 __instr_mov_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2639  struct thread *t,
2640  const struct instruction *ip)
2641 {
2642  TRACE("[Thread %2u] mov (mh)\n", p->thread_id);
2643 
2644  MOV_MH(t, ip);
2645 }
2646 
2647 static inline void
2648 __instr_mov_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2649  struct thread *t,
2650  const struct instruction *ip)
2651 {
2652  TRACE("[Thread %2u] mov (hm)\n", p->thread_id);
2653 
2654  MOV_HM(t, ip);
2655 }
2656 
2657 static inline void
2658 __instr_mov_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2659  struct thread *t,
2660  const struct instruction *ip)
2661 {
2662  TRACE("[Thread %2u] mov (hh)\n", p->thread_id);
2663 
2664  MOV_HH(t, ip);
2665 }
2666 
2667 static inline void
2668 __instr_mov_dma_exec(struct rte_swx_pipeline *p __rte_unused,
2669  struct thread *t,
2670  const struct instruction *ip)
2671 {
2672  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2673  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2674 
2675  uint32_t n_dst = ip->mov.dst.n_bits >> 3;
2676  uint32_t n_src = ip->mov.src.n_bits >> 3;
2677 
2678  TRACE("[Thread %2u] mov (dma) %u bytes\n", p->thread_id, n);
2679 
2680  /* Both dst and src are in NBO format. */
2681  if (n_dst > n_src) {
2682  uint32_t n_dst_zero = n_dst - n_src;
2683 
2684  /* Zero padding the most significant bytes in dst. */
2685  memset(dst, 0, n_dst_zero);
2686  dst += n_dst_zero;
2687 
2688  /* Copy src to dst. */
2689  memcpy(dst, src, n_src);
2690  } else {
2691  uint32_t n_src_skipped = n_src - n_dst;
2692 
2693  /* Copy src to dst. */
2694  src += n_src_skipped;
2695  memcpy(dst, src, n_dst);
2696  }
2697 }
2698 
2699 static inline void
2700 __instr_mov_128_exec(struct rte_swx_pipeline *p __rte_unused,
2701  struct thread *t,
2702  const struct instruction *ip)
2703 {
2704  uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2705  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2706 
2707  uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2708  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2709 
2710  TRACE("[Thread %2u] mov (128)\n", p->thread_id);
2711 
2712  dst64_ptr[0] = src64_ptr[0];
2713  dst64_ptr[1] = src64_ptr[1];
2714 }
2715 
2716 static inline void
2717 __instr_mov_128_64_exec(struct rte_swx_pipeline *p __rte_unused,
2718  struct thread *t,
2719  const struct instruction *ip)
2720 {
2721  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2722  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2723 
2724  uint64_t *dst64 = (uint64_t *)dst;
2725  uint64_t *src64 = (uint64_t *)src;
2726 
2727  TRACE("[Thread %2u] mov (128 <- 64)\n", p->thread_id);
2728 
2729  dst64[0] = 0;
2730  dst64[1] = src64[0];
2731 }
2732 
2733 static inline void
2734 __instr_mov_64_128_exec(struct rte_swx_pipeline *p __rte_unused,
2735  struct thread *t,
2736  const struct instruction *ip)
2737 {
2738  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2739  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2740 
2741  uint64_t *dst64 = (uint64_t *)dst;
2742  uint64_t *src64 = (uint64_t *)src;
2743 
2744  TRACE("[Thread %2u] mov (64 <- 128)\n", p->thread_id);
2745 
2746  dst64[0] = src64[1];
2747 }
2748 
2749 static inline void
2750 __instr_mov_128_32_exec(struct rte_swx_pipeline *p __rte_unused,
2751  struct thread *t,
2752  const struct instruction *ip)
2753 {
2754  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2755  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2756 
2757  uint32_t *dst32 = (uint32_t *)dst;
2758  uint32_t *src32 = (uint32_t *)src;
2759 
2760  TRACE("[Thread %2u] mov (128 <- 32)\n", p->thread_id);
2761 
2762  dst32[0] = 0;
2763  dst32[1] = 0;
2764  dst32[2] = 0;
2765  dst32[3] = src32[0];
2766 }
2767 
2768 static inline void
2769 __instr_mov_32_128_exec(struct rte_swx_pipeline *p __rte_unused,
2770  struct thread *t,
2771  const struct instruction *ip)
2772 {
2773  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2774  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2775 
2776  uint32_t *dst32 = (uint32_t *)dst;
2777  uint32_t *src32 = (uint32_t *)src;
2778 
2779  TRACE("[Thread %2u] mov (32 <- 128)\n", p->thread_id);
2780 
2781  dst32[0] = src32[3];
2782 }
2783 
2784 static inline void
2785 __instr_mov_i_exec(struct rte_swx_pipeline *p __rte_unused,
2786  struct thread *t,
2787  const struct instruction *ip)
2788 {
2789  TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", p->thread_id, ip->mov.src_val);
2790 
2791  MOV_I(t, ip);
2792 }
2793 
2794 /*
2795  * movh.
2796  */
2797 static inline void
2798 __instr_movh_exec(struct rte_swx_pipeline *p __rte_unused,
2799  struct thread *t,
2800  const struct instruction *ip)
2801 {
2802  uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2803  uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2804 
2805  uint64_t *dst64 = (uint64_t *)dst;
2806  uint64_t *src64 = (uint64_t *)src;
2807 
2808  TRACE("[Thread %2u] movh\n", p->thread_id);
2809 
2810  dst64[0] = src64[0];
2811 }
2812 
2813 /*
2814  * dma.
2815  */
2816 static inline void
2817 __instr_dma_ht_many_exec(struct rte_swx_pipeline *p __rte_unused,
2818  struct thread *t,
2819  const struct instruction *ip,
2820  uint32_t n_dma)
2821 {
2822  uint8_t *action_data = t->structs[0];
2823  uint64_t valid_headers = t->valid_headers;
2824  uint32_t i;
2825 
2826  for (i = 0; i < n_dma; i++) {
2827  uint32_t header_id = ip->dma.dst.header_id[i];
2828  uint32_t struct_id = ip->dma.dst.struct_id[i];
2829  uint32_t offset = ip->dma.src.offset[i];
2830  uint32_t n_bytes = ip->dma.n_bytes[i];
2831 
2832  struct header_runtime *h = &t->headers[header_id];
2833  uint8_t *h_ptr0 = h->ptr0;
2834  uint8_t *h_ptr = t->structs[struct_id];
2835 
2836  void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2837  h_ptr : h_ptr0;
2838  void *src = &action_data[offset];
2839 
2840  TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
2841 
2842  /* Headers. */
2843  memcpy(dst, src, n_bytes);
2844  t->structs[struct_id] = dst;
2845  valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2846  }
2847 
2848  t->valid_headers = valid_headers;
2849 }
2850 
2851 static inline void
2852 __instr_dma_ht_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2853 {
2854  __instr_dma_ht_many_exec(p, t, ip, 1);
2855 }
2856 
2857 static inline void
2858 __instr_dma_ht2_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2859 {
2860  TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2861 
2862  __instr_dma_ht_many_exec(p, t, ip, 2);
2863 }
2864 
2865 static inline void
2866 __instr_dma_ht3_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2867 {
2868  TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2869 
2870  __instr_dma_ht_many_exec(p, t, ip, 3);
2871 }
2872 
2873 static inline void
2874 __instr_dma_ht4_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2875 {
2876  TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2877 
2878  __instr_dma_ht_many_exec(p, t, ip, 4);
2879 }
2880 
2881 static inline void
2882 __instr_dma_ht5_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2883 {
2884  TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2885 
2886  __instr_dma_ht_many_exec(p, t, ip, 5);
2887 }
2888 
2889 static inline void
2890 __instr_dma_ht6_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2891 {
2892  TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2893 
2894  __instr_dma_ht_many_exec(p, t, ip, 6);
2895 }
2896 
2897 static inline void
2898 __instr_dma_ht7_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2899 {
2900  TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2901 
2902  __instr_dma_ht_many_exec(p, t, ip, 7);
2903 }
2904 
2905 static inline void
2906 __instr_dma_ht8_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
2907 {
2908  TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2909 
2910  __instr_dma_ht_many_exec(p, t, ip, 8);
2911 }
2912 
2913 /*
2914  * alu.
2915  */
2916 static inline void
2917 __instr_alu_add_exec(struct rte_swx_pipeline *p __rte_unused,
2918  struct thread *t,
2919  const struct instruction *ip)
2920 {
2921  TRACE("[Thread %2u] add\n", p->thread_id);
2922 
2923  ALU(t, ip, +);
2924 }
2925 
2926 static inline void
2927 __instr_alu_add_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2928  struct thread *t,
2929  const struct instruction *ip)
2930 {
2931  TRACE("[Thread %2u] add (mh)\n", p->thread_id);
2932 
2933  ALU_MH(t, ip, +);
2934 }
2935 
2936 static inline void
2937 __instr_alu_add_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2938  struct thread *t,
2939  const struct instruction *ip)
2940 {
2941  TRACE("[Thread %2u] add (hm)\n", p->thread_id);
2942 
2943  ALU_HM(t, ip, +);
2944 }
2945 
2946 static inline void
2947 __instr_alu_add_hh_exec(struct rte_swx_pipeline *p __rte_unused,
2948  struct thread *t,
2949  const struct instruction *ip)
2950 {
2951  TRACE("[Thread %2u] add (hh)\n", p->thread_id);
2952 
2953  ALU_HH(t, ip, +);
2954 }
2955 
2956 static inline void
2957 __instr_alu_add_mi_exec(struct rte_swx_pipeline *p __rte_unused,
2958  struct thread *t,
2959  const struct instruction *ip)
2960 {
2961  TRACE("[Thread %2u] add (mi)\n", p->thread_id);
2962 
2963  ALU_MI(t, ip, +);
2964 }
2965 
2966 static inline void
2967 __instr_alu_add_hi_exec(struct rte_swx_pipeline *p __rte_unused,
2968  struct thread *t,
2969  const struct instruction *ip)
2970 {
2971  TRACE("[Thread %2u] add (hi)\n", p->thread_id);
2972 
2973  ALU_HI(t, ip, +);
2974 }
2975 
2976 static inline void
2977 __instr_alu_sub_exec(struct rte_swx_pipeline *p __rte_unused,
2978  struct thread *t,
2979  const struct instruction *ip)
2980 {
2981  TRACE("[Thread %2u] sub\n", p->thread_id);
2982 
2983  ALU(t, ip, -);
2984 }
2985 
2986 static inline void
2987 __instr_alu_sub_mh_exec(struct rte_swx_pipeline *p __rte_unused,
2988  struct thread *t,
2989  const struct instruction *ip)
2990 {
2991  TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
2992 
2993  ALU_MH(t, ip, -);
2994 }
2995 
2996 static inline void
2997 __instr_alu_sub_hm_exec(struct rte_swx_pipeline *p __rte_unused,
2998  struct thread *t,
2999  const struct instruction *ip)
3000 {
3001  TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
3002 
3003  ALU_HM(t, ip, -);
3004 }
3005 
3006 static inline void
3007 __instr_alu_sub_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3008  struct thread *t,
3009  const struct instruction *ip)
3010 {
3011  TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
3012 
3013  ALU_HH(t, ip, -);
3014 }
3015 
3016 static inline void
3017 __instr_alu_sub_mi_exec(struct rte_swx_pipeline *p __rte_unused,
3018  struct thread *t,
3019  const struct instruction *ip)
3020 {
3021  TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
3022 
3023  ALU_MI(t, ip, -);
3024 }
3025 
3026 static inline void
3027 __instr_alu_sub_hi_exec(struct rte_swx_pipeline *p __rte_unused,
3028  struct thread *t,
3029  const struct instruction *ip)
3030 {
3031  TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
3032 
3033  ALU_HI(t, ip, -);
3034 }
3035 
3036 static inline void
3037 __instr_alu_shl_exec(struct rte_swx_pipeline *p __rte_unused,
3038  struct thread *t,
3039  const struct instruction *ip)
3040 {
3041  TRACE("[Thread %2u] shl\n", p->thread_id);
3042 
3043  ALU(t, ip, <<);
3044 }
3045 
3046 static inline void
3047 __instr_alu_shl_mh_exec(struct rte_swx_pipeline *p __rte_unused,
3048  struct thread *t,
3049  const struct instruction *ip)
3050 {
3051  TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
3052 
3053  ALU_MH(t, ip, <<);
3054 }
3055 
3056 static inline void
3057 __instr_alu_shl_hm_exec(struct rte_swx_pipeline *p __rte_unused,
3058  struct thread *t,
3059  const struct instruction *ip)
3060 {
3061  TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
3062 
3063  ALU_HM(t, ip, <<);
3064 }
3065 
3066 static inline void
3067 __instr_alu_shl_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3068  struct thread *t,
3069  const struct instruction *ip)
3070 {
3071  TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
3072 
3073  ALU_HH(t, ip, <<);
3074 }
3075 
3076 static inline void
3077 __instr_alu_shl_mi_exec(struct rte_swx_pipeline *p __rte_unused,
3078  struct thread *t,
3079  const struct instruction *ip)
3080 {
3081  TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
3082 
3083  ALU_MI(t, ip, <<);
3084 }
3085 
3086 static inline void
3087 __instr_alu_shl_hi_exec(struct rte_swx_pipeline *p __rte_unused,
3088  struct thread *t,
3089  const struct instruction *ip)
3090 {
3091  TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
3092 
3093  ALU_HI(t, ip, <<);
3094 }
3095 
3096 static inline void
3097 __instr_alu_shr_exec(struct rte_swx_pipeline *p __rte_unused,
3098  struct thread *t,
3099  const struct instruction *ip)
3100 {
3101  TRACE("[Thread %2u] shr\n", p->thread_id);
3102 
3103  ALU(t, ip, >>);
3104 }
3105 
3106 static inline void
3107 __instr_alu_shr_mh_exec(struct rte_swx_pipeline *p __rte_unused,
3108  struct thread *t,
3109  const struct instruction *ip)
3110 {
3111  TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
3112 
3113  ALU_MH(t, ip, >>);
3114 }
3115 
3116 static inline void
3117 __instr_alu_shr_hm_exec(struct rte_swx_pipeline *p __rte_unused,
3118  struct thread *t,
3119  const struct instruction *ip)
3120 {
3121  TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
3122 
3123  ALU_HM(t, ip, >>);
3124 }
3125 
3126 static inline void
3127 __instr_alu_shr_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3128  struct thread *t,
3129  const struct instruction *ip)
3130 {
3131  TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
3132 
3133  ALU_HH(t, ip, >>);
3134 }
3135 
3136 static inline void
3137 __instr_alu_shr_mi_exec(struct rte_swx_pipeline *p __rte_unused,
3138  struct thread *t,
3139  const struct instruction *ip)
3140 {
3141  TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
3142 
3143  /* Structs. */
3144  ALU_MI(t, ip, >>);
3145 }
3146 
3147 static inline void
3148 __instr_alu_shr_hi_exec(struct rte_swx_pipeline *p __rte_unused,
3149  struct thread *t,
3150  const struct instruction *ip)
3151 {
3152  TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
3153 
3154  ALU_HI(t, ip, >>);
3155 }
3156 
3157 static inline void
3158 __instr_alu_and_exec(struct rte_swx_pipeline *p __rte_unused,
3159  struct thread *t,
3160  const struct instruction *ip)
3161 {
3162  TRACE("[Thread %2u] and\n", p->thread_id);
3163 
3164  ALU(t, ip, &);
3165 }
3166 
3167 static inline void
3168 __instr_alu_and_mh_exec(struct rte_swx_pipeline *p __rte_unused,
3169  struct thread *t,
3170  const struct instruction *ip)
3171 {
3172  TRACE("[Thread %2u] and (mh)\n", p->thread_id);
3173 
3174  ALU_MH(t, ip, &);
3175 }
3176 
3177 static inline void
3178 __instr_alu_and_hm_exec(struct rte_swx_pipeline *p __rte_unused,
3179  struct thread *t,
3180  const struct instruction *ip)
3181 {
3182  TRACE("[Thread %2u] and (hm)\n", p->thread_id);
3183 
3184  ALU_HM_FAST(t, ip, &);
3185 }
3186 
3187 static inline void
3188 __instr_alu_and_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3189  struct thread *t,
3190  const struct instruction *ip)
3191 {
3192  TRACE("[Thread %2u] and (hh)\n", p->thread_id);
3193 
3194  ALU_HH_FAST(t, ip, &);
3195 }
3196 
3197 static inline void
3198 __instr_alu_and_i_exec(struct rte_swx_pipeline *p __rte_unused,
3199  struct thread *t,
3200  const struct instruction *ip)
3201 {
3202  TRACE("[Thread %2u] and (i)\n", p->thread_id);
3203 
3204  ALU_I(t, ip, &);
3205 }
3206 
3207 static inline void
3208 __instr_alu_or_exec(struct rte_swx_pipeline *p __rte_unused,
3209  struct thread *t,
3210  const struct instruction *ip)
3211 {
3212  TRACE("[Thread %2u] or\n", p->thread_id);
3213 
3214  ALU(t, ip, |);
3215 }
3216 
3217 static inline void
3218 __instr_alu_or_mh_exec(struct rte_swx_pipeline *p __rte_unused,
3219  struct thread *t,
3220  const struct instruction *ip)
3221 {
3222  TRACE("[Thread %2u] or (mh)\n", p->thread_id);
3223 
3224  ALU_MH(t, ip, |);
3225 }
3226 
3227 static inline void
3228 __instr_alu_or_hm_exec(struct rte_swx_pipeline *p __rte_unused,
3229  struct thread *t,
3230  const struct instruction *ip)
3231 {
3232  TRACE("[Thread %2u] or (hm)\n", p->thread_id);
3233 
3234  ALU_HM_FAST(t, ip, |);
3235 }
3236 
3237 static inline void
3238 __instr_alu_or_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3239  struct thread *t,
3240  const struct instruction *ip)
3241 {
3242  TRACE("[Thread %2u] or (hh)\n", p->thread_id);
3243 
3244  ALU_HH_FAST(t, ip, |);
3245 }
3246 
3247 static inline void
3248 __instr_alu_or_i_exec(struct rte_swx_pipeline *p __rte_unused,
3249  struct thread *t,
3250  const struct instruction *ip)
3251 {
3252  TRACE("[Thread %2u] or (i)\n", p->thread_id);
3253 
3254  ALU_I(t, ip, |);
3255 }
3256 
3257 static inline void
3258 __instr_alu_xor_exec(struct rte_swx_pipeline *p __rte_unused,
3259  struct thread *t,
3260  const struct instruction *ip)
3261 {
3262  TRACE("[Thread %2u] xor\n", p->thread_id);
3263 
3264  ALU(t, ip, ^);
3265 }
3266 
3267 static inline void
3268 __instr_alu_xor_mh_exec(struct rte_swx_pipeline *p __rte_unused,
3269  struct thread *t,
3270  const struct instruction *ip)
3271 {
3272  TRACE("[Thread %2u] xor (mh)\n", p->thread_id);
3273 
3274  ALU_MH(t, ip, ^);
3275 }
3276 
3277 static inline void
3278 __instr_alu_xor_hm_exec(struct rte_swx_pipeline *p __rte_unused,
3279  struct thread *t,
3280  const struct instruction *ip)
3281 {
3282  TRACE("[Thread %2u] xor (hm)\n", p->thread_id);
3283 
3284  ALU_HM_FAST(t, ip, ^);
3285 }
3286 
3287 static inline void
3288 __instr_alu_xor_hh_exec(struct rte_swx_pipeline *p __rte_unused,
3289  struct thread *t,
3290  const struct instruction *ip)
3291 {
3292  TRACE("[Thread %2u] xor (hh)\n", p->thread_id);
3293 
3294  ALU_HH_FAST(t, ip, ^);
3295 }
3296 
3297 static inline void
3298 __instr_alu_xor_i_exec(struct rte_swx_pipeline *p __rte_unused,
3299  struct thread *t,
3300  const struct instruction *ip)
3301 {
3302  TRACE("[Thread %2u] xor (i)\n", p->thread_id);
3303 
3304  ALU_I(t, ip, ^);
3305 }
3306 
3307 static inline void
3308 __instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p __rte_unused,
3309  struct thread *t,
3310  const struct instruction *ip)
3311 {
3312  uint8_t *dst_struct, *src_struct;
3313  uint16_t *dst16_ptr, dst;
3314  uint64_t *src64_ptr, src64, src64_mask, src;
3315  uint64_t r;
3316 
3317  TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
3318 
3319  /* Structs. */
3320  dst_struct = t->structs[ip->alu.dst.struct_id];
3321  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3322  dst = *dst16_ptr;
3323 
3324  src_struct = t->structs[ip->alu.src.struct_id];
3325  src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3326  src64 = *src64_ptr;
3327  src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3328  src = src64 & src64_mask;
3329 
3330  /* Initialize the result with destination 1's complement. */
3331  r = dst;
3332  r = ~r & 0xFFFF;
3333 
3334  /* The first input (r) is a 16-bit number. The second and the third
3335  * inputs are 32-bit numbers. In the worst case scenario, the sum of the
3336  * three numbers (output r) is a 34-bit number.
3337  */
3338  r += (src >> 32) + (src & 0xFFFFFFFF);
3339 
3340  /* The first input is a 16-bit number. The second input is an 18-bit
3341  * number. In the worst case scenario, the sum of the two numbers is a
3342  * 19-bit number.
3343  */
3344  r = (r & 0xFFFF) + (r >> 16);
3345 
3346  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3347  * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
3348  */
3349  r = (r & 0xFFFF) + (r >> 16);
3350 
3351  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3352  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3353  * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
3354  * therefore the output r is always a 16-bit number.
3355  */
3356  r = (r & 0xFFFF) + (r >> 16);
3357 
3358  /* Apply 1's complement to the result. */
3359  r = ~r & 0xFFFF;
3360  r = r ? r : 0xFFFF;
3361 
3362  *dst16_ptr = (uint16_t)r;
3363 }
3364 
3365 static inline void
3366 __instr_alu_cksub_field_exec(struct rte_swx_pipeline *p __rte_unused,
3367  struct thread *t,
3368  const struct instruction *ip)
3369 {
3370  uint8_t *dst_struct, *src_struct;
3371  uint16_t *dst16_ptr, dst;
3372  uint64_t *src64_ptr, src64, src64_mask, src;
3373  uint64_t r;
3374 
3375  TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
3376 
3377  /* Structs. */
3378  dst_struct = t->structs[ip->alu.dst.struct_id];
3379  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3380  dst = *dst16_ptr;
3381 
3382  src_struct = t->structs[ip->alu.src.struct_id];
3383  src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3384  src64 = *src64_ptr;
3385  src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3386  src = src64 & src64_mask;
3387 
3388  /* Initialize the result with destination 1's complement. */
3389  r = dst;
3390  r = ~r & 0xFFFF;
3391 
3392  /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
3393  * the following sequence of operations in 2's complement arithmetic:
3394  * a '- b = (a - b) % 0xFFFF.
3395  *
3396  * In order to prevent an underflow for the below subtraction, in which
3397  * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
3398  * minuend), we first add a multiple of the 0xFFFF modulus to the
3399  * minuend. The number we add to the minuend needs to be a 34-bit number
3400  * or higher, so for readability reasons we picked the 36-bit multiple.
3401  * We are effectively turning the 16-bit minuend into a 36-bit number:
3402  * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
3403  */
3404  r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
3405 
3406  /* A 33-bit number is subtracted from a 36-bit number (the input r). The
3407  * result (the output r) is a 36-bit number.
3408  */
3409  r -= (src >> 32) + (src & 0xFFFFFFFF);
3410 
3411  /* The first input is a 16-bit number. The second input is a 20-bit
3412  * number. Their sum is a 21-bit number.
3413  */
3414  r = (r & 0xFFFF) + (r >> 16);
3415 
3416  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3417  * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
3418  */
3419  r = (r & 0xFFFF) + (r >> 16);
3420 
3421  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3422  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3423  * 0x1001E), the output r is (0 .. 31). So no carry bit can be
3424  * generated, therefore the output r is always a 16-bit number.
3425  */
3426  r = (r & 0xFFFF) + (r >> 16);
3427 
3428  /* Apply 1's complement to the result. */
3429  r = ~r & 0xFFFF;
3430  r = r ? r : 0xFFFF;
3431 
3432  *dst16_ptr = (uint16_t)r;
3433 }
3434 
3435 static inline void
3436 __instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p __rte_unused,
3437  struct thread *t,
3438  const struct instruction *ip)
3439 {
3440  uint8_t *dst_struct, *src_struct;
3441  uint16_t *dst16_ptr, dst;
3442  uint32_t *src32_ptr;
3443  uint64_t r0, r1;
3444 
3445  TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3446 
3447  /* Structs. */
3448  dst_struct = t->structs[ip->alu.dst.struct_id];
3449  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3450  dst = *dst16_ptr;
3451 
3452  src_struct = t->structs[ip->alu.src.struct_id];
3453  src32_ptr = (uint32_t *)&src_struct[0];
3454 
3455  /* Initialize the result with destination 1's complement. */
3456  r0 = dst;
3457  r0 = ~r0 & 0xFFFF;
3458 
3459  r0 += src32_ptr[0]; /* The output r0 is a 33-bit number. */
3460  r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
3461  r0 += src32_ptr[2]; /* The output r0 is a 34-bit number. */
3462  r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
3463  r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
3464 
3465  /* The first input is a 16-bit number. The second input is a 19-bit
3466  * number. Their sum is a 20-bit number.
3467  */
3468  r0 = (r0 & 0xFFFF) + (r0 >> 16);
3469 
3470  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3471  * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
3472  */
3473  r0 = (r0 & 0xFFFF) + (r0 >> 16);
3474 
3475  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3476  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3477  * 0x1000E), the output r is (0 .. 15). So no carry bit can be
3478  * generated, therefore the output r is always a 16-bit number.
3479  */
3480  r0 = (r0 & 0xFFFF) + (r0 >> 16);
3481 
3482  /* Apply 1's complement to the result. */
3483  r0 = ~r0 & 0xFFFF;
3484  r0 = r0 ? r0 : 0xFFFF;
3485 
3486  *dst16_ptr = (uint16_t)r0;
3487 }
3488 
3489 static inline void
3490 __instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p __rte_unused,
3491  struct thread *t,
3492  const struct instruction *ip)
3493 {
3494  uint32_t src_header_id = ip->alu.src.n_bits; /* The src header ID is stored here. */
3495  uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3496  uint8_t *dst_struct, *src_struct;
3497  uint16_t *dst16_ptr, dst;
3498  uint32_t *src32_ptr;
3499  uint64_t r;
3500  uint32_t i;
3501 
3502  if (n_src_header_bytes == 20) {
3503  __instr_alu_ckadd_struct20_exec(p, t, ip);
3504  return;
3505  }
3506 
3507  TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
3508 
3509  /* Structs. */
3510  dst_struct = t->structs[ip->alu.dst.struct_id];
3511  dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3512  dst = *dst16_ptr;
3513 
3514  src_struct = t->structs[ip->alu.src.struct_id];
3515  src32_ptr = (uint32_t *)&src_struct[0];
3516 
3517  /* Initialize the result with destination 1's complement. */
3518  r = dst;
3519  r = ~r & 0xFFFF;
3520 
3521  /* The max number of 32-bit words in a 32K-byte header is 2^13.
3522  * Therefore, in the worst case scenario, a 45-bit number is added to a
3523  * 16-bit number (the input r), so the output r is 46-bit number.
3524  */
3525  for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3526  r += *src32_ptr;
3527 
3528  /* The first input is a 16-bit number. The second input is a 30-bit
3529  * number. Their sum is a 31-bit number.
3530  */
3531  r = (r & 0xFFFF) + (r >> 16);
3532 
3533  /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
3534  * a 15-bit number (0 .. 0x7FFF). The sum is a 17-bit number (0 .. 0x17FFE).
3535  */
3536  r = (r & 0xFFFF) + (r >> 16);
3537 
3538  /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
3539  * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
3540  * 0x17FFE), the output r is (0 .. 0x7FFF). So no carry bit can be
3541  * generated, therefore the output r is always a 16-bit number.
3542  */
3543  r = (r & 0xFFFF) + (r >> 16);
3544 
3545  /* Apply 1's complement to the result. */
3546  r = ~r & 0xFFFF;
3547  r = r ? r : 0xFFFF;
3548 
3549  *dst16_ptr = (uint16_t)r;
3550 }
3551 
3552 /*
3553  * Register array.
3554  */
3555 static inline uint64_t *
3556 instr_regarray_regarray(struct rte_swx_pipeline *p, const struct instruction *ip)
3557 {
3558  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3559  return r->regarray;
3560 }
3561 
3562 static inline uint64_t
3563 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3564 {
3565  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3566 
3567  uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3568  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3569  uint64_t idx64 = *idx64_ptr;
3570  uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3571  uint64_t idx = idx64 & idx64_mask & r->size_mask;
3572 
3573  return idx;
3574 }
3575 
3576 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3577 
3578 static inline uint64_t
3579 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3580 {
3581  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3582 
3583  uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3584  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3585  uint64_t idx64 = *idx64_ptr;
3586  uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3587 
3588  return idx;
3589 }
3590 
3591 #else
3592 
3593 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
3594 
3595 #endif
3596 
3597 static inline uint64_t
3598 instr_regarray_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
3599 {
3600  struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3601 
3602  uint64_t idx = ip->regarray.idx_val & r->size_mask;
3603 
3604  return idx;
3605 }
3606 
3607 static inline uint64_t
3608 instr_regarray_src_hbo(struct thread *t, const struct instruction *ip)
3609 {
3610  uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3611  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3612  uint64_t src64 = *src64_ptr;
3613  uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3614  uint64_t src = src64 & src64_mask;
3615 
3616  return src;
3617 }
3618 
3619 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3620 
3621 static inline uint64_t
3622 instr_regarray_src_nbo(struct thread *t, const struct instruction *ip)
3623 {
3624  uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3625  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3626  uint64_t src64 = *src64_ptr;
3627  uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3628 
3629  return src;
3630 }
3631 
3632 #else
3633 
3634 #define instr_regarray_src_nbo instr_regarray_src_hbo
3635 
3636 #endif
3637 
3638 static inline void
3639 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
3640 {
3641  uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3642  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3643  uint64_t dst64 = *dst64_ptr;
3644  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3645 
3646  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3647 
3648 }
3649 
3650 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3651 
3652 static inline void
3653 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, const struct instruction *ip, uint64_t src)
3654 {
3655  uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3656  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3657  uint64_t dst64 = *dst64_ptr;
3658  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3659 
3660  src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3661  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3662 }
3663 
3664 #else
3665 
3666 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
3667 
3668 #endif
3669 
3670 static inline void
3671 __instr_regprefetch_rh_exec(struct rte_swx_pipeline *p,
3672  struct thread *t,
3673  const struct instruction *ip)
3674 {
3675  uint64_t *regarray, idx;
3676 
3677  TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3678 
3679  regarray = instr_regarray_regarray(p, ip);
3680  idx = instr_regarray_idx_nbo(p, t, ip);
3681  rte_prefetch0(&regarray[idx]);
3682 }
3683 
3684 static inline void
3685 __instr_regprefetch_rm_exec(struct rte_swx_pipeline *p,
3686  struct thread *t,
3687  const struct instruction *ip)
3688 {
3689  uint64_t *regarray, idx;
3690 
3691  TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3692 
3693  regarray = instr_regarray_regarray(p, ip);
3694  idx = instr_regarray_idx_hbo(p, t, ip);
3695  rte_prefetch0(&regarray[idx]);
3696 }
3697 
3698 static inline void
3699 __instr_regprefetch_ri_exec(struct rte_swx_pipeline *p,
3700  struct thread *t __rte_unused,
3701  const struct instruction *ip)
3702 {
3703  uint64_t *regarray, idx;
3704 
3705  TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3706 
3707  regarray = instr_regarray_regarray(p, ip);
3708  idx = instr_regarray_idx_imm(p, ip);
3709  rte_prefetch0(&regarray[idx]);
3710 }
3711 
3712 static inline void
3713 __instr_regrd_hrh_exec(struct rte_swx_pipeline *p,
3714  struct thread *t,
3715  const struct instruction *ip)
3716 {
3717  uint64_t *regarray, idx;
3718 
3719  TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3720 
3721  regarray = instr_regarray_regarray(p, ip);
3722  idx = instr_regarray_idx_nbo(p, t, ip);
3723  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3724 }
3725 
3726 static inline void
3727 __instr_regrd_hrm_exec(struct rte_swx_pipeline *p,
3728  struct thread *t,
3729  const struct instruction *ip)
3730 {
3731  uint64_t *regarray, idx;
3732 
3733  TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3734 
3735  /* Structs. */
3736  regarray = instr_regarray_regarray(p, ip);
3737  idx = instr_regarray_idx_hbo(p, t, ip);
3738  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3739 }
3740 
3741 static inline void
3742 __instr_regrd_mrh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3743 {
3744  uint64_t *regarray, idx;
3745 
3746  TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3747 
3748  regarray = instr_regarray_regarray(p, ip);
3749  idx = instr_regarray_idx_nbo(p, t, ip);
3750  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3751 }
3752 
3753 static inline void
3754 __instr_regrd_mrm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3755 {
3756  uint64_t *regarray, idx;
3757 
3758  TRACE("[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3759 
3760  regarray = instr_regarray_regarray(p, ip);
3761  idx = instr_regarray_idx_hbo(p, t, ip);
3762  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3763 }
3764 
3765 static inline void
3766 __instr_regrd_hri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3767 {
3768  uint64_t *regarray, idx;
3769 
3770  TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3771 
3772  regarray = instr_regarray_regarray(p, ip);
3773  idx = instr_regarray_idx_imm(p, ip);
3774  instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3775 }
3776 
3777 static inline void
3778 __instr_regrd_mri_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3779 {
3780  uint64_t *regarray, idx;
3781 
3782  TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3783 
3784  regarray = instr_regarray_regarray(p, ip);
3785  idx = instr_regarray_idx_imm(p, ip);
3786  instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3787 }
3788 
3789 static inline void
3790 __instr_regwr_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3791 {
3792  uint64_t *regarray, idx, src;
3793 
3794  TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3795 
3796  regarray = instr_regarray_regarray(p, ip);
3797  idx = instr_regarray_idx_nbo(p, t, ip);
3798  src = instr_regarray_src_nbo(t, ip);
3799  regarray[idx] = src;
3800 }
3801 
3802 static inline void
3803 __instr_regwr_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3804 {
3805  uint64_t *regarray, idx, src;
3806 
3807  TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3808 
3809  regarray = instr_regarray_regarray(p, ip);
3810  idx = instr_regarray_idx_nbo(p, t, ip);
3811  src = instr_regarray_src_hbo(t, ip);
3812  regarray[idx] = src;
3813 }
3814 
3815 static inline void
3816 __instr_regwr_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3817 {
3818  uint64_t *regarray, idx, src;
3819 
3820  TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3821 
3822  regarray = instr_regarray_regarray(p, ip);
3823  idx = instr_regarray_idx_hbo(p, t, ip);
3824  src = instr_regarray_src_nbo(t, ip);
3825  regarray[idx] = src;
3826 }
3827 
3828 static inline void
3829 __instr_regwr_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3830 {
3831  uint64_t *regarray, idx, src;
3832 
3833  TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3834 
3835  regarray = instr_regarray_regarray(p, ip);
3836  idx = instr_regarray_idx_hbo(p, t, ip);
3837  src = instr_regarray_src_hbo(t, ip);
3838  regarray[idx] = src;
3839 }
3840 
3841 static inline void
3842 __instr_regwr_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3843 {
3844  uint64_t *regarray, idx, src;
3845 
3846  TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3847 
3848  regarray = instr_regarray_regarray(p, ip);
3849  idx = instr_regarray_idx_nbo(p, t, ip);
3850  src = ip->regarray.dstsrc_val;
3851  regarray[idx] = src;
3852 }
3853 
3854 static inline void
3855 __instr_regwr_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3856 {
3857  uint64_t *regarray, idx, src;
3858 
3859  TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3860 
3861  regarray = instr_regarray_regarray(p, ip);
3862  idx = instr_regarray_idx_hbo(p, t, ip);
3863  src = ip->regarray.dstsrc_val;
3864  regarray[idx] = src;
3865 }
3866 
3867 static inline void
3868 __instr_regwr_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3869 {
3870  uint64_t *regarray, idx, src;
3871 
3872  TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3873 
3874  regarray = instr_regarray_regarray(p, ip);
3875  idx = instr_regarray_idx_imm(p, ip);
3876  src = instr_regarray_src_nbo(t, ip);
3877  regarray[idx] = src;
3878 }
3879 
3880 static inline void
3881 __instr_regwr_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3882 {
3883  uint64_t *regarray, idx, src;
3884 
3885  TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3886 
3887  regarray = instr_regarray_regarray(p, ip);
3888  idx = instr_regarray_idx_imm(p, ip);
3889  src = instr_regarray_src_hbo(t, ip);
3890  regarray[idx] = src;
3891 }
3892 
3893 static inline void
3894 __instr_regwr_rii_exec(struct rte_swx_pipeline *p,
3895  struct thread *t __rte_unused,
3896  const struct instruction *ip)
3897 {
3898  uint64_t *regarray, idx, src;
3899 
3900  TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3901 
3902  regarray = instr_regarray_regarray(p, ip);
3903  idx = instr_regarray_idx_imm(p, ip);
3904  src = ip->regarray.dstsrc_val;
3905  regarray[idx] = src;
3906 }
3907 
3908 static inline void
3909 __instr_regadd_rhh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3910 {
3911  uint64_t *regarray, idx, src;
3912 
3913  TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3914 
3915  regarray = instr_regarray_regarray(p, ip);
3916  idx = instr_regarray_idx_nbo(p, t, ip);
3917  src = instr_regarray_src_nbo(t, ip);
3918  regarray[idx] += src;
3919 }
3920 
3921 static inline void
3922 __instr_regadd_rhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3923 {
3924  uint64_t *regarray, idx, src;
3925 
3926  TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3927 
3928  regarray = instr_regarray_regarray(p, ip);
3929  idx = instr_regarray_idx_nbo(p, t, ip);
3930  src = instr_regarray_src_hbo(t, ip);
3931  regarray[idx] += src;
3932 }
3933 
3934 static inline void
3935 __instr_regadd_rmh_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3936 {
3937  uint64_t *regarray, idx, src;
3938 
3939  TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3940 
3941  regarray = instr_regarray_regarray(p, ip);
3942  idx = instr_regarray_idx_hbo(p, t, ip);
3943  src = instr_regarray_src_nbo(t, ip);
3944  regarray[idx] += src;
3945 }
3946 
3947 static inline void
3948 __instr_regadd_rmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3949 {
3950  uint64_t *regarray, idx, src;
3951 
3952  TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3953 
3954  regarray = instr_regarray_regarray(p, ip);
3955  idx = instr_regarray_idx_hbo(p, t, ip);
3956  src = instr_regarray_src_hbo(t, ip);
3957  regarray[idx] += src;
3958 }
3959 
3960 static inline void
3961 __instr_regadd_rhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3962 {
3963  uint64_t *regarray, idx, src;
3964 
3965  TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3966 
3967  regarray = instr_regarray_regarray(p, ip);
3968  idx = instr_regarray_idx_nbo(p, t, ip);
3969  src = ip->regarray.dstsrc_val;
3970  regarray[idx] += src;
3971 }
3972 
3973 static inline void
3974 __instr_regadd_rmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3975 {
3976  uint64_t *regarray, idx, src;
3977 
3978  TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3979 
3980  regarray = instr_regarray_regarray(p, ip);
3981  idx = instr_regarray_idx_hbo(p, t, ip);
3982  src = ip->regarray.dstsrc_val;
3983  regarray[idx] += src;
3984 }
3985 
3986 static inline void
3987 __instr_regadd_rih_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
3988 {
3989  uint64_t *regarray, idx, src;
3990 
3991  TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3992 
3993  regarray = instr_regarray_regarray(p, ip);
3994  idx = instr_regarray_idx_imm(p, ip);
3995  src = instr_regarray_src_nbo(t, ip);
3996  regarray[idx] += src;
3997 }
3998 
3999 static inline void
4000 __instr_regadd_rim_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4001 {
4002  uint64_t *regarray, idx, src;
4003 
4004  TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
4005 
4006  regarray = instr_regarray_regarray(p, ip);
4007  idx = instr_regarray_idx_imm(p, ip);
4008  src = instr_regarray_src_hbo(t, ip);
4009  regarray[idx] += src;
4010 }
4011 
4012 static inline void
4013 __instr_regadd_rii_exec(struct rte_swx_pipeline *p,
4014  struct thread *t __rte_unused,
4015  const struct instruction *ip)
4016 {
4017  uint64_t *regarray, idx, src;
4018 
4019  TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
4020 
4021  regarray = instr_regarray_regarray(p, ip);
4022  idx = instr_regarray_idx_imm(p, ip);
4023  src = ip->regarray.dstsrc_val;
4024  regarray[idx] += src;
4025 }
4026 
4027 /*
4028  * metarray.
4029  */
4030 static inline struct meter *
4031 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4032 {
4033  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4034 
4035  uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
4036  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
4037  uint64_t idx64 = *idx64_ptr;
4038  uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
4039  uint64_t idx = idx64 & idx64_mask & r->size_mask;
4040 
4041  return &r->metarray[idx];
4042 }
4043 
4044 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4045 
4046 static inline struct meter *
4047 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4048 {
4049  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4050 
4051  uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
4052  uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
4053  uint64_t idx64 = *idx64_ptr;
4054  uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
4055 
4056  return &r->metarray[idx];
4057 }
4058 
4059 #else
4060 
4061 #define instr_meter_idx_nbo instr_meter_idx_hbo
4062 
4063 #endif
4064 
4065 static inline struct meter *
4066 instr_meter_idx_imm(struct rte_swx_pipeline *p, const struct instruction *ip)
4067 {
4068  struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4069 
4070  uint64_t idx = ip->meter.idx_val & r->size_mask;
4071 
4072  return &r->metarray[idx];
4073 }
4074 
4075 static inline uint32_t
4076 instr_meter_length_hbo(struct thread *t, const struct instruction *ip)
4077 {
4078  uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
4079  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
4080  uint64_t src64 = *src64_ptr;
4081  uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
4082  uint64_t src = src64 & src64_mask;
4083 
4084  return (uint32_t)src;
4085 }
4086 
4087 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4088 
4089 static inline uint32_t
4090 instr_meter_length_nbo(struct thread *t, const struct instruction *ip)
4091 {
4092  uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
4093  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
4094  uint64_t src64 = *src64_ptr;
4095  uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
4096 
4097  return (uint32_t)src;
4098 }
4099 
4100 #else
4101 
4102 #define instr_meter_length_nbo instr_meter_length_hbo
4103 
4104 #endif
4105 
4106 static inline enum rte_color
4107 instr_meter_color_in_hbo(struct thread *t, const struct instruction *ip)
4108 {
4109  uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
4110  uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
4111  uint64_t src64 = *src64_ptr;
4112  uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
4113  uint64_t src = src64 & src64_mask;
4114 
4115  return (enum rte_color)src;
4116 }
4117 
4118 static inline void
4119 instr_meter_color_out_hbo_set(struct thread *t,
4120  const struct instruction *ip,
4121  enum rte_color color_out)
4122 {
4123  uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
4124  uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
4125  uint64_t dst64 = *dst64_ptr;
4126  uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
4127 
4128  uint64_t src = (uint64_t)color_out;
4129 
4130  *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
4131 }
4132 
4133 static inline void
4134 __instr_metprefetch_h_exec(struct rte_swx_pipeline *p,
4135  struct thread *t,
4136  const struct instruction *ip)
4137 {
4138  struct meter *m;
4139 
4140  TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id);
4141 
4142  m = instr_meter_idx_nbo(p, t, ip);
4143  rte_prefetch0(m);
4144 }
4145 
4146 static inline void
4147 __instr_metprefetch_m_exec(struct rte_swx_pipeline *p,
4148  struct thread *t,
4149  const struct instruction *ip)
4150 {
4151  struct meter *m;
4152 
4153  TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id);
4154 
4155  m = instr_meter_idx_hbo(p, t, ip);
4156  rte_prefetch0(m);
4157 }
4158 
4159 static inline void
4160 __instr_metprefetch_i_exec(struct rte_swx_pipeline *p,
4161  struct thread *t __rte_unused,
4162  const struct instruction *ip)
4163 {
4164  struct meter *m;
4165 
4166  TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id);
4167 
4168  m = instr_meter_idx_imm(p, ip);
4169  rte_prefetch0(m);
4170 }
4171 
4172 static inline void
4173 __instr_meter_hhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4174 {
4175  struct meter *m;
4176  uint64_t time, n_pkts, n_bytes;
4177  uint32_t length;
4178  enum rte_color color_in, color_out;
4179 
4180  TRACE("[Thread %2u] meter (hhm)\n", p->thread_id);
4181 
4182  m = instr_meter_idx_nbo(p, t, ip);
4183  rte_prefetch0(m->n_pkts);
4184  time = rte_get_tsc_cycles();
4185  length = instr_meter_length_nbo(t, ip);
4186  color_in = instr_meter_color_in_hbo(t, ip);
4187 
4188  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4189  &m->profile->profile,
4190  time,
4191  length,
4192  color_in);
4193 
4194  color_out &= m->color_mask;
4195 
4196  n_pkts = m->n_pkts[color_out];
4197  n_bytes = m->n_bytes[color_out];
4198 
4199  instr_meter_color_out_hbo_set(t, ip, color_out);
4200 
4201  m->n_pkts[color_out] = n_pkts + 1;
4202  m->n_bytes[color_out] = n_bytes + length;
4203 }
4204 
4205 static inline void
4206 __instr_meter_hhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4207 {
4208  struct meter *m;
4209  uint64_t time, n_pkts, n_bytes;
4210  uint32_t length;
4211  enum rte_color color_in, color_out;
4212 
4213  TRACE("[Thread %2u] meter (hhi)\n", p->thread_id);
4214 
4215  m = instr_meter_idx_nbo(p, t, ip);
4216  rte_prefetch0(m->n_pkts);
4217  time = rte_get_tsc_cycles();
4218  length = instr_meter_length_nbo(t, ip);
4219  color_in = (enum rte_color)ip->meter.color_in_val;
4220 
4221  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4222  &m->profile->profile,
4223  time,
4224  length,
4225  color_in);
4226 
4227  color_out &= m->color_mask;
4228 
4229  n_pkts = m->n_pkts[color_out];
4230  n_bytes = m->n_bytes[color_out];
4231 
4232  instr_meter_color_out_hbo_set(t, ip, color_out);
4233 
4234  m->n_pkts[color_out] = n_pkts + 1;
4235  m->n_bytes[color_out] = n_bytes + length;
4236 }
4237 
4238 static inline void
4239 __instr_meter_hmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4240 {
4241  struct meter *m;
4242  uint64_t time, n_pkts, n_bytes;
4243  uint32_t length;
4244  enum rte_color color_in, color_out;
4245 
4246  TRACE("[Thread %2u] meter (hmm)\n", p->thread_id);
4247 
4248  m = instr_meter_idx_nbo(p, t, ip);
4249  rte_prefetch0(m->n_pkts);
4250  time = rte_get_tsc_cycles();
4251  length = instr_meter_length_hbo(t, ip);
4252  color_in = instr_meter_color_in_hbo(t, ip);
4253 
4254  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4255  &m->profile->profile,
4256  time,
4257  length,
4258  color_in);
4259 
4260  color_out &= m->color_mask;
4261 
4262  n_pkts = m->n_pkts[color_out];
4263  n_bytes = m->n_bytes[color_out];
4264 
4265  instr_meter_color_out_hbo_set(t, ip, color_out);
4266 
4267  m->n_pkts[color_out] = n_pkts + 1;
4268  m->n_bytes[color_out] = n_bytes + length;
4269 }
4270 
4271 static inline void
4272 __instr_meter_hmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4273 {
4274  struct meter *m;
4275  uint64_t time, n_pkts, n_bytes;
4276  uint32_t length;
4277  enum rte_color color_in, color_out;
4278 
4279  TRACE("[Thread %2u] meter (hmi)\n", p->thread_id);
4280 
4281  m = instr_meter_idx_nbo(p, t, ip);
4282  rte_prefetch0(m->n_pkts);
4283  time = rte_get_tsc_cycles();
4284  length = instr_meter_length_hbo(t, ip);
4285  color_in = (enum rte_color)ip->meter.color_in_val;
4286 
4287  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4288  &m->profile->profile,
4289  time,
4290  length,
4291  color_in);
4292 
4293  color_out &= m->color_mask;
4294 
4295  n_pkts = m->n_pkts[color_out];
4296  n_bytes = m->n_bytes[color_out];
4297 
4298  instr_meter_color_out_hbo_set(t, ip, color_out);
4299 
4300  m->n_pkts[color_out] = n_pkts + 1;
4301  m->n_bytes[color_out] = n_bytes + length;
4302 }
4303 
4304 static inline void
4305 __instr_meter_mhm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4306 {
4307  struct meter *m;
4308  uint64_t time, n_pkts, n_bytes;
4309  uint32_t length;
4310  enum rte_color color_in, color_out;
4311 
4312  TRACE("[Thread %2u] meter (mhm)\n", p->thread_id);
4313 
4314  m = instr_meter_idx_hbo(p, t, ip);
4315  rte_prefetch0(m->n_pkts);
4316  time = rte_get_tsc_cycles();
4317  length = instr_meter_length_nbo(t, ip);
4318  color_in = instr_meter_color_in_hbo(t, ip);
4319 
4320  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4321  &m->profile->profile,
4322  time,
4323  length,
4324  color_in);
4325 
4326  color_out &= m->color_mask;
4327 
4328  n_pkts = m->n_pkts[color_out];
4329  n_bytes = m->n_bytes[color_out];
4330 
4331  instr_meter_color_out_hbo_set(t, ip, color_out);
4332 
4333  m->n_pkts[color_out] = n_pkts + 1;
4334  m->n_bytes[color_out] = n_bytes + length;
4335 }
4336 
4337 static inline void
4338 __instr_meter_mhi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4339 {
4340  struct meter *m;
4341  uint64_t time, n_pkts, n_bytes;
4342  uint32_t length;
4343  enum rte_color color_in, color_out;
4344 
4345  TRACE("[Thread %2u] meter (mhi)\n", p->thread_id);
4346 
4347  m = instr_meter_idx_hbo(p, t, ip);
4348  rte_prefetch0(m->n_pkts);
4349  time = rte_get_tsc_cycles();
4350  length = instr_meter_length_nbo(t, ip);
4351  color_in = (enum rte_color)ip->meter.color_in_val;
4352 
4353  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4354  &m->profile->profile,
4355  time,
4356  length,
4357  color_in);
4358 
4359  color_out &= m->color_mask;
4360 
4361  n_pkts = m->n_pkts[color_out];
4362  n_bytes = m->n_bytes[color_out];
4363 
4364  instr_meter_color_out_hbo_set(t, ip, color_out);
4365 
4366  m->n_pkts[color_out] = n_pkts + 1;
4367  m->n_bytes[color_out] = n_bytes + length;
4368 }
4369 
4370 static inline void
4371 __instr_meter_mmm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4372 {
4373  struct meter *m;
4374  uint64_t time, n_pkts, n_bytes;
4375  uint32_t length;
4376  enum rte_color color_in, color_out;
4377 
4378  TRACE("[Thread %2u] meter (mmm)\n", p->thread_id);
4379 
4380  m = instr_meter_idx_hbo(p, t, ip);
4381  rte_prefetch0(m->n_pkts);
4382  time = rte_get_tsc_cycles();
4383  length = instr_meter_length_hbo(t, ip);
4384  color_in = instr_meter_color_in_hbo(t, ip);
4385 
4386  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4387  &m->profile->profile,
4388  time,
4389  length,
4390  color_in);
4391 
4392  color_out &= m->color_mask;
4393 
4394  n_pkts = m->n_pkts[color_out];
4395  n_bytes = m->n_bytes[color_out];
4396 
4397  instr_meter_color_out_hbo_set(t, ip, color_out);
4398 
4399  m->n_pkts[color_out] = n_pkts + 1;
4400  m->n_bytes[color_out] = n_bytes + length;
4401 }
4402 
4403 static inline void
4404 __instr_meter_mmi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4405 {
4406  struct meter *m;
4407  uint64_t time, n_pkts, n_bytes;
4408  uint32_t length;
4409  enum rte_color color_in, color_out;
4410 
4411  TRACE("[Thread %2u] meter (mmi)\n", p->thread_id);
4412 
4413  m = instr_meter_idx_hbo(p, t, ip);
4414  rte_prefetch0(m->n_pkts);
4415  time = rte_get_tsc_cycles();
4416  length = instr_meter_length_hbo(t, ip);
4417  color_in = (enum rte_color)ip->meter.color_in_val;
4418 
4419  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4420  &m->profile->profile,
4421  time,
4422  length,
4423  color_in);
4424 
4425  color_out &= m->color_mask;
4426 
4427  n_pkts = m->n_pkts[color_out];
4428  n_bytes = m->n_bytes[color_out];
4429 
4430  instr_meter_color_out_hbo_set(t, ip, color_out);
4431 
4432  m->n_pkts[color_out] = n_pkts + 1;
4433  m->n_bytes[color_out] = n_bytes + length;
4434 }
4435 
4436 static inline void
4437 __instr_meter_ihm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4438 {
4439  struct meter *m;
4440  uint64_t time, n_pkts, n_bytes;
4441  uint32_t length;
4442  enum rte_color color_in, color_out;
4443 
4444  TRACE("[Thread %2u] meter (ihm)\n", p->thread_id);
4445 
4446  m = instr_meter_idx_imm(p, ip);
4447  rte_prefetch0(m->n_pkts);
4448  time = rte_get_tsc_cycles();
4449  length = instr_meter_length_nbo(t, ip);
4450  color_in = instr_meter_color_in_hbo(t, ip);
4451 
4452  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4453  &m->profile->profile,
4454  time,
4455  length,
4456  color_in);
4457 
4458  color_out &= m->color_mask;
4459 
4460  n_pkts = m->n_pkts[color_out];
4461  n_bytes = m->n_bytes[color_out];
4462 
4463  instr_meter_color_out_hbo_set(t, ip, color_out);
4464 
4465  m->n_pkts[color_out] = n_pkts + 1;
4466  m->n_bytes[color_out] = n_bytes + length;
4467 }
4468 
4469 static inline void
4470 __instr_meter_ihi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4471 {
4472  struct meter *m;
4473  uint64_t time, n_pkts, n_bytes;
4474  uint32_t length;
4475  enum rte_color color_in, color_out;
4476 
4477  TRACE("[Thread %2u] meter (ihi)\n", p->thread_id);
4478 
4479  m = instr_meter_idx_imm(p, ip);
4480  rte_prefetch0(m->n_pkts);
4481  time = rte_get_tsc_cycles();
4482  length = instr_meter_length_nbo(t, ip);
4483  color_in = (enum rte_color)ip->meter.color_in_val;
4484 
4485  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4486  &m->profile->profile,
4487  time,
4488  length,
4489  color_in);
4490 
4491  color_out &= m->color_mask;
4492 
4493  n_pkts = m->n_pkts[color_out];
4494  n_bytes = m->n_bytes[color_out];
4495 
4496  instr_meter_color_out_hbo_set(t, ip, color_out);
4497 
4498  m->n_pkts[color_out] = n_pkts + 1;
4499  m->n_bytes[color_out] = n_bytes + length;
4500 }
4501 
4502 static inline void
4503 __instr_meter_imm_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4504 {
4505  struct meter *m;
4506  uint64_t time, n_pkts, n_bytes;
4507  uint32_t length;
4508  enum rte_color color_in, color_out;
4509 
4510  TRACE("[Thread %2u] meter (imm)\n", p->thread_id);
4511 
4512  m = instr_meter_idx_imm(p, ip);
4513  rte_prefetch0(m->n_pkts);
4514  time = rte_get_tsc_cycles();
4515  length = instr_meter_length_hbo(t, ip);
4516  color_in = instr_meter_color_in_hbo(t, ip);
4517 
4518  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4519  &m->profile->profile,
4520  time,
4521  length,
4522  color_in);
4523 
4524  color_out &= m->color_mask;
4525 
4526  n_pkts = m->n_pkts[color_out];
4527  n_bytes = m->n_bytes[color_out];
4528 
4529  instr_meter_color_out_hbo_set(t, ip, color_out);
4530 
4531  m->n_pkts[color_out] = n_pkts + 1;
4532  m->n_bytes[color_out] = n_bytes + length;
4533 }
4534 
4535 static inline void
4536 __instr_meter_imi_exec(struct rte_swx_pipeline *p, struct thread *t, const struct instruction *ip)
4537 {
4538  struct meter *m;
4539  uint64_t time, n_pkts, n_bytes;
4540  uint32_t length;
4541  enum rte_color color_in, color_out;
4542 
4543  TRACE("[Thread %2u] meter (imi)\n", p->thread_id);
4544 
4545  m = instr_meter_idx_imm(p, ip);
4546  rte_prefetch0(m->n_pkts);
4547  time = rte_get_tsc_cycles();
4548  length = instr_meter_length_hbo(t, ip);
4549  color_in = (enum rte_color)ip->meter.color_in_val;
4550 
4551  color_out = rte_meter_trtcm_color_aware_check(&m->m,
4552  &m->profile->profile,
4553  time,
4554  length,
4555  color_in);
4556 
4557  color_out &= m->color_mask;
4558 
4559  n_pkts = m->n_pkts[color_out];
4560  n_bytes = m->n_bytes[color_out];
4561 
4562  instr_meter_color_out_hbo_set(t, ip, color_out);
4563 
4564  m->n_pkts[color_out] = n_pkts + 1;
4565  m->n_bytes[color_out] = n_bytes + length;
4566 }
4567 
4568 #endif
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, size_t *entry_id, int *hit)
int(* rte_swx_extern_func_t)(void *mailbox)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
#define __rte_unused
Definition: rte_common.h:156
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
static uint32_t rte_bsf32(uint32_t v)
Definition: rte_bitops.h:553
uint8_t * pkt
Definition: rte_swx_port.h:26
rte_swx_table_match_type
Definition: rte_swx_table.h:25
void(* rte_swx_port_out_flush_t)(void *port)
Definition: rte_swx_port.h:184
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
Definition: rte_meter.h:522
static uint64_t rte_get_tsc_cycles(void)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
Definition: rte_swx_port.h:173
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
Definition: rte_swx_port.h:159
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
Definition: rte_swx_port.h:147
rte_color
Definition: rte_meter.h:32
uint32_t length
Definition: rte_swx_port.h:32
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX
uint32_t offset
Definition: rte_swx_port.h:29
#define RTE_SWX_NAME_SIZE
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
static void rte_prefetch0(const volatile void *p)
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)
Definition: rte_swx_port.h:72