@@ -80,6 +80,8 @@ SRCS-y += test_ring.c
SRCS-y += test_ring_perf.c
SRCS-y += test_pmd_perf.c
+#ABI Version Testing
+SRCS-$(CONFIG_RTE_BUILD_SHARED_LIB) += v2.0/test_v20.c
ifeq ($(CONFIG_RTE_LIBRTE_TABLE),y)
SRCS-y += test_table.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test_table_pipeline.c
@@ -109,7 +111,6 @@ SRCS-y += test_logs.c
SRCS-y += test_memcpy.c
SRCS-y += test_memcpy_perf.c
-
SRCS-$(CONFIG_RTE_LIBRTE_MEMBER) += test_member.c
SRCS-$(CONFIG_RTE_LIBRTE_MEMBER) += test_member_perf.c
@@ -124,11 +125,20 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite_lf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_routes.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6_perf.c
+#LPM ABI Testing
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += v2.0/test_lpm.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += v2.0/test_lpm_perf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += v2.0/test_lpm6.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += v2.0/test_lpm6_perf.c
+endif
+
SRCS-y += test_debug.c
SRCS-y += test_errno.c
SRCS-y += test_tailq.c
@@ -8,6 +8,7 @@ endif
test_sources = files('commands.c',
'packet_burst_generator.c',
'sample_packet_forward.c',
+ 'v2.0/test_v20.c',
'test.c',
'test_acl.c',
'test_alarm.c',
@@ -68,6 +69,11 @@ test_sources = files('commands.c',
'test_lpm6.c',
'test_lpm6_perf.c',
'test_lpm_perf.c',
+ 'test_lpm_routes.c',
+ 'v2.0/test_lpm.c',
+ 'v2.0/test_lpm_perf.c',
+ 'v2.0/test_lpm6.c',
+ 'v2.0/test_lpm6_perf.c',
'test_malloc.c',
'test_mbuf.c',
'test_member.c',
@@ -162,11 +162,7 @@ int test_set_rxtx_conf(cmdline_fixed_string_t mode);
int test_set_rxtx_anchor(cmdline_fixed_string_t type);
int test_set_rxtx_sc(cmdline_fixed_string_t type);
-#define MAP_ABI_SYMBOL_VERSION(name, abi_version) \
- __asm(".symver "RTE_STR(name)","RTE_STR(name)"@"RTE_STR(abi_version))
-
#define TEST_DPDK_ABI_VERSION_DEFAULT 0
-#define TEST_DPDK_ABI_VERSION_V1604 1
#define TEST_DPDK_ABI_VERSION_V20 2
#define TEST_DPDK_ABI_VERSION_MAX 3
@@ -41,7 +41,7 @@ static int32_t test16(void);
static int32_t test17(void);
static int32_t test18(void);
-rte_lpm_test tests[] = {
+static rte_lpm_test tests[] = {
/* Test Cases */
test0,
test1,
@@ -1277,6 +1277,7 @@ test_lpm(void)
int status, global_status = 0;
for (i = 0; i < NUM_LPM_TESTS; i++) {
+ printf("# test %02d\n", i);
status = tests[i]();
if (status < 0) {
printf("ERROR: LPM Test %u: FAIL\n", i);
@@ -52,7 +52,7 @@ static int32_t test26(void);
static int32_t test27(void);
static int32_t test28(void);
-rte_lpm6_test tests6[] = {
+static rte_lpm6_test tests6[] = {
/* Test Cases */
test0,
test1,
@@ -5,7 +5,6 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
-#include <math.h>
#include <rte_cycles.h>
#include <rte_random.h>
@@ -13,6 +12,7 @@
#include <rte_ip.h>
#include <rte_lpm.h>
+#include "test_lpm_routes.h"
#include "test.h"
#include "test_xmmt_ops.h"
@@ -27,295 +27,6 @@
#define BATCH_SIZE (1 << 12)
#define BULK_SIZE 32
-#define MAX_RULE_NUM (1200000)
-
-struct route_rule {
- uint32_t ip;
- uint8_t depth;
-};
-
-struct route_rule large_route_table[MAX_RULE_NUM];
-
-static uint32_t num_route_entries;
-#define NUM_ROUTE_ENTRIES num_route_entries
-
-enum {
- IP_CLASS_A,
- IP_CLASS_B,
- IP_CLASS_C
-};
-
-/* struct route_rule_count defines the total number of rules in following a/b/c
- * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
- * including the ones for private local network.
- */
-struct route_rule_count {
- uint32_t a[RTE_LPM_MAX_DEPTH];
- uint32_t b[RTE_LPM_MAX_DEPTH];
- uint32_t c[RTE_LPM_MAX_DEPTH];
-};
-
-/* All following numbers of each depth of each common IP class are just
- * got from previous large constant table in app/test/test_lpm_routes.h .
- * In order to match similar performance, they keep same depth and IP
- * address coverage as previous constant table. These numbers don't
- * include any private local IP address. As previous large const rule
- * table was just dumped from a real router, there are no any IP address
- * in class C or D.
- */
-static struct route_rule_count rule_count = {
- .a = { /* IP class A in which the most significant bit is 0 */
- 0, /* depth = 1 */
- 0, /* depth = 2 */
- 1, /* depth = 3 */
- 0, /* depth = 4 */
- 2, /* depth = 5 */
- 1, /* depth = 6 */
- 3, /* depth = 7 */
- 185, /* depth = 8 */
- 26, /* depth = 9 */
- 16, /* depth = 10 */
- 39, /* depth = 11 */
- 144, /* depth = 12 */
- 233, /* depth = 13 */
- 528, /* depth = 14 */
- 866, /* depth = 15 */
- 3856, /* depth = 16 */
- 3268, /* depth = 17 */
- 5662, /* depth = 18 */
- 17301, /* depth = 19 */
- 22226, /* depth = 20 */
- 11147, /* depth = 21 */
- 16746, /* depth = 22 */
- 17120, /* depth = 23 */
- 77578, /* depth = 24 */
- 401, /* depth = 25 */
- 656, /* depth = 26 */
- 1107, /* depth = 27 */
- 1121, /* depth = 28 */
- 2316, /* depth = 29 */
- 717, /* depth = 30 */
- 10, /* depth = 31 */
- 66 /* depth = 32 */
- },
- .b = { /* IP class A in which the most 2 significant bits are 10 */
- 0, /* depth = 1 */
- 0, /* depth = 2 */
- 0, /* depth = 3 */
- 0, /* depth = 4 */
- 1, /* depth = 5 */
- 1, /* depth = 6 */
- 1, /* depth = 7 */
- 3, /* depth = 8 */
- 3, /* depth = 9 */
- 30, /* depth = 10 */
- 25, /* depth = 11 */
- 168, /* depth = 12 */
- 305, /* depth = 13 */
- 569, /* depth = 14 */
- 1129, /* depth = 15 */
- 50800, /* depth = 16 */
- 1645, /* depth = 17 */
- 1820, /* depth = 18 */
- 3506, /* depth = 19 */
- 3258, /* depth = 20 */
- 3424, /* depth = 21 */
- 4971, /* depth = 22 */
- 6885, /* depth = 23 */
- 39771, /* depth = 24 */
- 424, /* depth = 25 */
- 170, /* depth = 26 */
- 433, /* depth = 27 */
- 92, /* depth = 28 */
- 366, /* depth = 29 */
- 377, /* depth = 30 */
- 2, /* depth = 31 */
- 200 /* depth = 32 */
- },
- .c = { /* IP class A in which the most 3 significant bits are 110 */
- 0, /* depth = 1 */
- 0, /* depth = 2 */
- 0, /* depth = 3 */
- 0, /* depth = 4 */
- 0, /* depth = 5 */
- 0, /* depth = 6 */
- 0, /* depth = 7 */
- 12, /* depth = 8 */
- 8, /* depth = 9 */
- 9, /* depth = 10 */
- 33, /* depth = 11 */
- 69, /* depth = 12 */
- 237, /* depth = 13 */
- 1007, /* depth = 14 */
- 1717, /* depth = 15 */
- 14663, /* depth = 16 */
- 8070, /* depth = 17 */
- 16185, /* depth = 18 */
- 48261, /* depth = 19 */
- 36870, /* depth = 20 */
- 33960, /* depth = 21 */
- 50638, /* depth = 22 */
- 61422, /* depth = 23 */
- 466549, /* depth = 24 */
- 1829, /* depth = 25 */
- 4824, /* depth = 26 */
- 4927, /* depth = 27 */
- 5914, /* depth = 28 */
- 10254, /* depth = 29 */
- 4905, /* depth = 30 */
- 1, /* depth = 31 */
- 716 /* depth = 32 */
- }
-};
-
-static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
-{
-/* IP address class A, the most significant bit is 0 */
-#define IP_HEAD_MASK_A 0x00000000
-#define IP_HEAD_BIT_NUM_A 1
-
-/* IP address class B, the most significant 2 bits are 10 */
-#define IP_HEAD_MASK_B 0x80000000
-#define IP_HEAD_BIT_NUM_B 2
-
-/* IP address class C, the most significant 3 bits are 110 */
-#define IP_HEAD_MASK_C 0xC0000000
-#define IP_HEAD_BIT_NUM_C 3
-
- uint32_t class_depth;
- uint32_t range;
- uint32_t mask;
- uint32_t step;
- uint32_t start;
- uint32_t fixed_bit_num;
- uint32_t ip_head_mask;
- uint32_t rule_num;
- uint32_t k;
- struct route_rule *ptr_rule;
-
- if (ip_class == IP_CLASS_A) { /* IP Address class A */
- fixed_bit_num = IP_HEAD_BIT_NUM_A;
- ip_head_mask = IP_HEAD_MASK_A;
- rule_num = rule_count.a[depth - 1];
- } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
- fixed_bit_num = IP_HEAD_BIT_NUM_B;
- ip_head_mask = IP_HEAD_MASK_B;
- rule_num = rule_count.b[depth - 1];
- } else { /* IP Address class C */
- fixed_bit_num = IP_HEAD_BIT_NUM_C;
- ip_head_mask = IP_HEAD_MASK_C;
- rule_num = rule_count.c[depth - 1];
- }
-
- if (rule_num == 0)
- return;
-
- /* the number of rest bits which don't include the most significant
- * fixed bits for this IP address class
- */
- class_depth = depth - fixed_bit_num;
-
- /* range is the maximum number of rules for this depth and
- * this IP address class
- */
- range = 1 << class_depth;
-
- /* only mask the most depth significant generated bits
- * except fixed bits for IP address class
- */
- mask = range - 1;
-
- /* Widen coverage of IP address in generated rules */
- if (range <= rule_num)
- step = 1;
- else
- step = round((double)range / rule_num);
-
- /* Only generate rest bits except the most significant
- * fixed bits for IP address class
- */
- start = lrand48() & mask;
- ptr_rule = &large_route_table[num_route_entries];
- for (k = 0; k < rule_num; k++) {
- ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
- | ip_head_mask;
- ptr_rule->depth = depth;
- ptr_rule++;
- start = (start + step) & mask;
- }
- num_route_entries += rule_num;
-}
-
-static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
-{
- uint32_t pos;
- int try_count = 0;
- struct route_rule tmp;
-
- do {
- pos = lrand48();
- try_count++;
- } while ((try_count < 10) && (pos > num_route_entries));
-
- if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
- pos = num_route_entries >> 1;
-
- tmp = large_route_table[pos];
- large_route_table[pos].ip = ip;
- large_route_table[pos].depth = depth;
- if (num_route_entries < MAX_RULE_NUM)
- large_route_table[num_route_entries++] = tmp;
-}
-
-static void generate_large_route_rule_table(void)
-{
- uint32_t ip_class;
- uint8_t depth;
-
- num_route_entries = 0;
- memset(large_route_table, 0, sizeof(large_route_table));
-
- for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
- for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++) {
- generate_random_rule_prefix(ip_class, depth);
- }
- }
-
- /* Add following rules to keep same as previous large constant table,
- * they are 4 rules with private local IP address and 1 all-zeros prefix
- * with depth = 8.
- */
- insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
- insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
- insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
- insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
- insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
-}
-
-static void
-print_route_distribution(const struct route_rule *table, uint32_t n)
-{
- unsigned i, j;
-
- printf("Route distribution per prefix width: \n");
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------- \n");
-
- /* Count depths. */
- for (i = 1; i <= 32; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++)
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
- printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
- }
- printf("\n");
-}
-
static int
test_lpm_perf(void)
{
@@ -375,7 +86,7 @@ test_lpm_perf(void)
(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
printf("Average LPM Add: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
+ (double)total_time / NUM_ROUTE_ENTRIES);
/* Measure single Lookup */
total_time = 0;
new file mode 100644
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#include <math.h>
+
+#include "rte_lpm.h"
+#include "test_lpm_routes.h"
+
+uint32_t num_route_entries;
+struct route_rule large_route_table[MAX_RULE_NUM];
+
+enum {
+ IP_CLASS_A,
+ IP_CLASS_B,
+ IP_CLASS_C
+};
+
+/* struct route_rule_count defines the total number of rules in following a/b/c
+ * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
+ * including the ones for private local network.
+ */
+struct route_rule_count {
+ uint32_t a[RTE_LPM_MAX_DEPTH];
+ uint32_t b[RTE_LPM_MAX_DEPTH];
+ uint32_t c[RTE_LPM_MAX_DEPTH];
+};
+
+/* All following numbers of each depth of each common IP class are just
+ * got from previous large constant table in app/test/test_lpm_routes.h .
+ * In order to match similar performance, they keep same depth and IP
+ * address coverage as previous constant table. These numbers don't
+ * include any private local IP address. As previous large const rule
+ * table was just dumped from a real router, there are no any IP address
+ * in class C or D.
+ */
+static struct route_rule_count rule_count = {
+ .a = { /* IP class A in which the most significant bit is 0 */
+ 0, /* depth = 1 */
+ 0, /* depth = 2 */
+ 1, /* depth = 3 */
+ 0, /* depth = 4 */
+ 2, /* depth = 5 */
+ 1, /* depth = 6 */
+ 3, /* depth = 7 */
+ 185, /* depth = 8 */
+ 26, /* depth = 9 */
+ 16, /* depth = 10 */
+ 39, /* depth = 11 */
+ 144, /* depth = 12 */
+ 233, /* depth = 13 */
+ 528, /* depth = 14 */
+ 866, /* depth = 15 */
+ 3856, /* depth = 16 */
+ 3268, /* depth = 17 */
+ 5662, /* depth = 18 */
+ 17301, /* depth = 19 */
+ 22226, /* depth = 20 */
+ 11147, /* depth = 21 */
+ 16746, /* depth = 22 */
+ 17120, /* depth = 23 */
+ 77578, /* depth = 24 */
+ 401, /* depth = 25 */
+ 656, /* depth = 26 */
+ 1107, /* depth = 27 */
+ 1121, /* depth = 28 */
+ 2316, /* depth = 29 */
+ 717, /* depth = 30 */
+ 10, /* depth = 31 */
+ 66 /* depth = 32 */
+ },
+ .b = { /* IP class A in which the most 2 significant bits are 10 */
+ 0, /* depth = 1 */
+ 0, /* depth = 2 */
+ 0, /* depth = 3 */
+ 0, /* depth = 4 */
+ 1, /* depth = 5 */
+ 1, /* depth = 6 */
+ 1, /* depth = 7 */
+ 3, /* depth = 8 */
+ 3, /* depth = 9 */
+ 30, /* depth = 10 */
+ 25, /* depth = 11 */
+ 168, /* depth = 12 */
+ 305, /* depth = 13 */
+ 569, /* depth = 14 */
+ 1129, /* depth = 15 */
+ 50800, /* depth = 16 */
+ 1645, /* depth = 17 */
+ 1820, /* depth = 18 */
+ 3506, /* depth = 19 */
+ 3258, /* depth = 20 */
+ 3424, /* depth = 21 */
+ 4971, /* depth = 22 */
+ 6885, /* depth = 23 */
+ 39771, /* depth = 24 */
+ 424, /* depth = 25 */
+ 170, /* depth = 26 */
+ 433, /* depth = 27 */
+ 92, /* depth = 28 */
+ 366, /* depth = 29 */
+ 377, /* depth = 30 */
+ 2, /* depth = 31 */
+ 200 /* depth = 32 */
+ },
+ .c = { /* IP class A in which the most 3 significant bits are 110 */
+ 0, /* depth = 1 */
+ 0, /* depth = 2 */
+ 0, /* depth = 3 */
+ 0, /* depth = 4 */
+ 0, /* depth = 5 */
+ 0, /* depth = 6 */
+ 0, /* depth = 7 */
+ 12, /* depth = 8 */
+ 8, /* depth = 9 */
+ 9, /* depth = 10 */
+ 33, /* depth = 11 */
+ 69, /* depth = 12 */
+ 237, /* depth = 13 */
+ 1007, /* depth = 14 */
+ 1717, /* depth = 15 */
+ 14663, /* depth = 16 */
+ 8070, /* depth = 17 */
+ 16185, /* depth = 18 */
+ 48261, /* depth = 19 */
+ 36870, /* depth = 20 */
+ 33960, /* depth = 21 */
+ 50638, /* depth = 22 */
+ 61422, /* depth = 23 */
+ 466549, /* depth = 24 */
+ 1829, /* depth = 25 */
+ 4824, /* depth = 26 */
+ 4927, /* depth = 27 */
+ 5914, /* depth = 28 */
+ 10254, /* depth = 29 */
+ 4905, /* depth = 30 */
+ 1, /* depth = 31 */
+ 716 /* depth = 32 */
+ }
+};
+
+static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
+{
+/* IP address class A, the most significant bit is 0 */
+#define IP_HEAD_MASK_A 0x00000000
+#define IP_HEAD_BIT_NUM_A 1
+
+/* IP address class B, the most significant 2 bits are 10 */
+#define IP_HEAD_MASK_B 0x80000000
+#define IP_HEAD_BIT_NUM_B 2
+
+/* IP address class C, the most significant 3 bits are 110 */
+#define IP_HEAD_MASK_C 0xC0000000
+#define IP_HEAD_BIT_NUM_C 3
+
+ uint32_t class_depth;
+ uint32_t range;
+ uint32_t mask;
+ uint32_t step;
+ uint32_t start;
+ uint32_t fixed_bit_num;
+ uint32_t ip_head_mask;
+ uint32_t rule_num;
+ uint32_t k;
+ struct route_rule *ptr_rule;
+
+ if (ip_class == IP_CLASS_A) { /* IP Address class A */
+ fixed_bit_num = IP_HEAD_BIT_NUM_A;
+ ip_head_mask = IP_HEAD_MASK_A;
+ rule_num = rule_count.a[depth - 1];
+ } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
+ fixed_bit_num = IP_HEAD_BIT_NUM_B;
+ ip_head_mask = IP_HEAD_MASK_B;
+ rule_num = rule_count.b[depth - 1];
+ } else { /* IP Address class C */
+ fixed_bit_num = IP_HEAD_BIT_NUM_C;
+ ip_head_mask = IP_HEAD_MASK_C;
+ rule_num = rule_count.c[depth - 1];
+ }
+
+ if (rule_num == 0)
+ return;
+
+ /* the number of rest bits which don't include the most significant
+ * fixed bits for this IP address class
+ */
+ class_depth = depth - fixed_bit_num;
+
+ /* range is the maximum number of rules for this depth and
+ * this IP address class
+ */
+ range = 1 << class_depth;
+
+ /* only mask the most depth significant generated bits
+ * except fixed bits for IP address class
+ */
+ mask = range - 1;
+
+ /* Widen coverage of IP address in generated rules */
+ if (range <= rule_num)
+ step = 1;
+ else
+ step = round((double)range / rule_num);
+
+ /* Only generate rest bits except the most significant
+ * fixed bits for IP address class
+ */
+ start = lrand48() & mask;
+ ptr_rule = &large_route_table[num_route_entries];
+ for (k = 0; k < rule_num; k++) {
+ ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
+ | ip_head_mask;
+ ptr_rule->depth = depth;
+ ptr_rule++;
+ start = (start + step) & mask;
+ }
+ num_route_entries += rule_num;
+}
+
+static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
+{
+ uint32_t pos;
+ int try_count = 0;
+ struct route_rule tmp;
+
+ do {
+ pos = lrand48();
+ try_count++;
+ } while ((try_count < 10) && (pos > num_route_entries));
+
+ if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
+ pos = num_route_entries >> 1;
+
+ tmp = large_route_table[pos];
+ large_route_table[pos].ip = ip;
+ large_route_table[pos].depth = depth;
+ if (num_route_entries < MAX_RULE_NUM)
+ large_route_table[num_route_entries++] = tmp;
+}
+
+void generate_large_route_rule_table(void)
+{
+ uint32_t ip_class;
+ uint8_t depth;
+
+ num_route_entries = 0;
+ memset(large_route_table, 0, sizeof(large_route_table));
+
+ for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
+ for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++)
+ generate_random_rule_prefix(ip_class, depth);
+ }
+
+ /* Add following rules to keep same as previous large constant table,
+ * they are 4 rules with private local IP address and 1 all-zeros prefix
+ * with depth = 8.
+ */
+ insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
+ insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
+ insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
+ insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
+ insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
+}
+
+void
+print_route_distribution(const struct route_rule *table, uint32_t n)
+{
+ unsigned int i, j;
+
+ printf("Route distribution per prefix width:\n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("---------------------------\n");
+
+ /* Count depths. */
+ for (i = 1; i <= 32; i++) {
+ unsigned int depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
new file mode 100644
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#ifndef _TEST_LPM_ROUTES_H_
+#define _TEST_LPM_ROUTES_H_
+
+#include <rte_ip.h>
+
+#define MAX_RULE_NUM (1200000)
+
+struct route_rule {
+ uint32_t ip;
+ uint8_t depth;
+};
+
+extern struct route_rule large_route_table[MAX_RULE_NUM];
+
+extern uint32_t num_route_entries;
+#define NUM_ROUTE_ENTRIES num_route_entries
+
+void generate_large_route_rule_table(void);
+void print_route_distribution(const struct route_rule *table, uint32_t n);
+
+#endif
new file mode 100644
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ */
+
+#ifndef _DCOMPAT_H_
+#define _DCOMPAT_H_
+
+#include <rte_compat.h>
+
+#define ABI_VERSION 2.0
+
+#define MAP_ABI_SYMBOL(name) \
+ BIND_VERSION_SYMBOL(name, ABI_VERSION)
+
+MAP_ABI_SYMBOL(rte_lpm_add);
+MAP_ABI_SYMBOL(rte_lpm_find_existing);
+MAP_ABI_SYMBOL(rte_lpm_create);
+MAP_ABI_SYMBOL(rte_lpm_free);
+MAP_ABI_SYMBOL(rte_lpm_is_rule_present);
+MAP_ABI_SYMBOL(rte_lpm_delete);
+MAP_ABI_SYMBOL(rte_lpm_delete_all);
+
+MAP_ABI_SYMBOL(rte_lpm6_add);
+MAP_ABI_SYMBOL(rte_lpm6_is_rule_present);
+MAP_ABI_SYMBOL(rte_lpm6_lookup);
+MAP_ABI_SYMBOL(rte_lpm6_lookup_bulk_func);
+
+#undef MAP_ABI_SYMBOL
+
+#endif
new file mode 100644
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _RTE_LPM_H_
+#define _RTE_LPM_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match (LPM)
+ */
+
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+#include <rte_common.h>
+#include <rte_vect.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Max number of characters in LPM name. */
+#define RTE_LPM_NAMESIZE 32
+
+/** Maximum depth value possible for IPv4 LPM. */
+#define RTE_LPM_MAX_DEPTH 32
+
+/** @internal Total number of tbl24 entries. */
+#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
+
+/** @internal Number of entries in a tbl8 group. */
+#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
+
+/** @internal Total number of tbl8 groups in the tbl8. */
+#define RTE_LPM_TBL8_NUM_GROUPS 256
+
+/** @internal Total number of tbl8 entries. */
+#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
+
+/** @internal Macro to enable/disable run-time checks. */
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
+ if (cond) return (retval); \
+} while (0)
+#else
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
+#endif
+
+/** @internal bitmask with valid and ext_entry/valid_group fields set */
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
+
+/** Bitmask used to indicate successful lookup */
+#define RTE_LPM_LOOKUP_SUCCESS 0x0100
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+/** @internal Tbl24 entry structure. */
+struct rte_lpm_tbl24_entry {
+ /* Stores Next hop or group index (i.e. gindex)into tbl8. */
+ union {
+ uint8_t next_hop;
+ uint8_t tbl8_gindex;
+ };
+ /* Using single uint8_t to store 3 values. */
+ uint8_t valid :1; /**< Validation flag. */
+ uint8_t ext_entry :1; /**< External entry. */
+ uint8_t depth :6; /**< Rule depth. */
+};
+
+/** @internal Tbl8 entry structure. */
+struct rte_lpm_tbl8_entry {
+ uint8_t next_hop; /**< next hop. */
+ /* Using single uint8_t to store 3 values. */
+ uint8_t valid :1; /**< Validation flag. */
+ uint8_t valid_group :1; /**< Group validation flag. */
+ uint8_t depth :6; /**< Rule depth. */
+};
+#else
+struct rte_lpm_tbl24_entry {
+ uint8_t depth :6;
+ uint8_t ext_entry :1;
+ uint8_t valid :1;
+ union {
+ uint8_t tbl8_gindex;
+ uint8_t next_hop;
+ };
+};
+
+struct rte_lpm_tbl8_entry {
+ uint8_t depth :6;
+ uint8_t valid_group :1;
+ uint8_t valid :1;
+ uint8_t next_hop;
+};
+#endif
+
+/** @internal Rule structure. */
+struct rte_lpm_rule {
+ uint32_t ip; /**< Rule IP address. */
+ uint8_t next_hop; /**< Rule next hop. */
+};
+
+/** @internal Contains metadata about the rules table. */
+struct rte_lpm_rule_info {
+ uint32_t used_rules; /**< Used rules so far. */
+ uint32_t first_rule; /**< Indexes the first rule of a given depth. */
+};
+
+/** @internal LPM structure. */
+struct rte_lpm {
+ /* LPM metadata. */
+ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
+ uint32_t max_rules; /**< Max. balanced rules per lpm. */
+ struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
+
+ /* LPM Tables. */
+ struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+ __rte_cache_aligned; /**< LPM tbl24 table. */
+ struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+ __rte_cache_aligned; /**< LPM tbl8 table. */
+ struct rte_lpm_rule rules_tbl[0] \
+ __rte_cache_aligned; /**< LPM rules. */
+};
+
+/**
+ * Create an LPM object.
+ *
+ * @param name
+ * LPM object name
+ * @param socket_id
+ * NUMA socket ID for LPM table memory allocation
+ * @param max_rules
+ * Maximum number of LPM rules that can be added
+ * @param flags
+ * This parameter is currently unused
+ * @return
+ * Handle to LPM object on success, NULL otherwise with rte_errno set
+ * to an appropriate values. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - invalid parameter passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_lpm *
+rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+
+/**
+ * Find an existing LPM object and return a pointer to it.
+ *
+ * @param name
+ * Name of the lpm object as passed to rte_lpm_create()
+ * @return
+ * Pointer to lpm object or NULL if object not found with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_lpm *
+rte_lpm_find_existing(const char *name);
+
+/**
+ * Free an LPM object.
+ *
+ * @param lpm
+ * LPM object handle
+ * @return
+ * None
+ */
+void
+rte_lpm_free(struct rte_lpm *lpm);
+
+/**
+ * Add a rule to the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be added to the LPM table
+ * @param depth
+ * Depth of the rule to be added to the LPM table
+ * @param next_hop
+ * Next hop of the rule to be added to the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+
+/**
+ * Check if a rule is present in the LPM table,
+ * and provide its next hop if it is.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be searched
+ * @param depth
+ * Depth of the rule to searched
+ * @param next_hop
+ * Next hop of the rule (valid only if it is found)
+ * @return
+ * 1 if the rule exists, 0 if it does not, a negative value on failure
+ */
+int
+rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be deleted from the LPM table
+ * @param depth
+ * Depth of the rule to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+
+/**
+ * Delete all rules from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ */
+void
+rte_lpm_delete_all(struct rte_lpm *lpm);
+
+/**
+ * Lookup an IP into the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP to be looked up in the LPM table
+ * @param next_hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only)
+ * @return
+ * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
+ */
+static inline int
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+{
+ unsigned tbl24_index = (ip >> 8);
+ uint16_t tbl_entry;
+
+ /* DEBUG: Check user input arguments. */
+ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
+
+ /* Copy tbl24 entry */
+ tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
+
+ /* Copy tbl8 entry (only if needed) */
+ if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+
+ unsigned tbl8_index = (uint8_t)ip +
+ ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+ tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ }
+
+ *next_hop = (uint8_t)tbl_entry;
+ return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
+}
+
+/**
+ * Lookup multiple IP addresses in an LPM table. This may be implemented as a
+ * macro, so the address of the function should not be used.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be looked up in the LPM table
+ * @param next_hops
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an array of two byte values. The most significant byte in each
+ * value says whether the lookup was successful (bitmask
+ * RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
+ * actual next hop.
+ * @param n
+ * Number of elements in ips (and next_hops) array to lookup. This should be a
+ * compile time constant, and divisible by 8 for best performance.
+ * @return
+ * -EINVAL for incorrect arguments, otherwise 0
+ */
+#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
+ rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
+
+static inline int
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
+ uint16_t * next_hops, const unsigned n)
+{
+ unsigned i;
+ unsigned tbl24_indexes[n];
+
+ /* DEBUG: Check user input arguments. */
+ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
+ (next_hops == NULL)), -EINVAL);
+
+ for (i = 0; i < n; i++) {
+ tbl24_indexes[i] = ips[i] >> 8;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Simply copy tbl24 entry to output */
+ next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
+
+ /* Overwrite output with tbl8 entry if needed */
+ if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+
+ unsigned tbl8_index = (uint8_t)ips[i] +
+ ((uint8_t)next_hops[i] *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+ next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ }
+ }
+ return 0;
+}
+
+/* Mask four results. */
+#define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
+
+/**
+ * Lookup four IP addresses in an LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * Four IPs to be looked up in the LPM table
+ * @param hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an 4 elements array of two byte values.
+ * If the lookup was succesfull for the given IP, then least significant byte
+ * of the corresponding element is the actual next hop and the most
+ * significant byte is zero.
+ * If the lookup for the given IP failed, then corresponding element would
+ * contain default value, see description of then next parameter.
+ * @param defv
+ * Default value to populate into corresponding element of hop[] array,
+ * if lookup would fail.
+ */
+static inline void
+rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
+ uint16_t defv)
+{
+ __m128i i24;
+ rte_xmm_t i8;
+ uint16_t tbl[4];
+ uint64_t idx, pt;
+
+ const __m128i mask8 =
+ _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
+
+ /*
+ * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
+ * as one 64-bit value (0x0300030003000300).
+ */
+ const uint64_t mask_xv =
+ ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
+
+ /*
+ * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
+ * as one 64-bit value (0x0100010001000100).
+ */
+ const uint64_t mask_v =
+ ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
+
+ /* get 4 indexes for tbl24[]. */
+ i24 = _mm_srli_epi32(ip, CHAR_BIT);
+
+ /* extract values from tbl24[] */
+ idx = _mm_cvtsi128_si64(i24);
+ i24 = _mm_srli_si128(i24, sizeof(uint64_t));
+
+ tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+
+ idx = _mm_cvtsi128_si64(i24);
+
+ tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+
+ /* get 4 indexes for tbl8[]. */
+ i8.x = _mm_and_si128(ip, mask8);
+
+ pt = (uint64_t)tbl[0] |
+ (uint64_t)tbl[1] << 16 |
+ (uint64_t)tbl[2] << 32 |
+ (uint64_t)tbl[3] << 48;
+
+ /* search successfully finished for all 4 IP addresses. */
+ if (likely((pt & mask_xv) == mask_v)) {
+ uintptr_t ph = (uintptr_t)hop;
+ *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
+ return;
+ }
+
+ if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[0] = i8.u32[0] +
+ (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
+ }
+ if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[1] = i8.u32[1] +
+ (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
+ }
+ if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[2] = i8.u32[2] +
+ (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
+ }
+ if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ i8.u32[3] = i8.u32[3] +
+ (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
+ }
+
+ hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
+ hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
+ hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
+ hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LPM_H_ */
new file mode 100644
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+#ifndef _RTE_LPM6_H_
+#define _RTE_LPM6_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match for IPv6 (LPM6)
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define RTE_LPM6_MAX_DEPTH 128
+#define RTE_LPM6_IPV6_ADDR_SIZE 16
+/** Max number of characters in LPM name. */
+#define RTE_LPM6_NAMESIZE 32
+
+/** LPM structure. */
+struct rte_lpm6;
+
+/** LPM configuration structure. */
+struct rte_lpm6_config {
+ uint32_t max_rules; /**< Max number of rules. */
+ uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
+ int flags; /**< This field is currently unused. */
+};
+
+/**
+ * Create an LPM object.
+ *
+ * @param name
+ * LPM object name
+ * @param socket_id
+ * NUMA socket ID for LPM table memory allocation
+ * @param config
+ * Structure containing the configuration
+ * @return
+ * Handle to LPM object on success, NULL otherwise with rte_errno set
+ * to an appropriate values. Possible rte_errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - invalid parameter passed to function
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_lpm6 *
+rte_lpm6_create(const char *name, int socket_id,
+ const struct rte_lpm6_config *config);
+
+/**
+ * Find an existing LPM object and return a pointer to it.
+ *
+ * @param name
+ * Name of the lpm object as passed to rte_lpm6_create()
+ * @return
+ * Pointer to lpm object or NULL if object not found with rte_errno
+ * set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_lpm6 *
+rte_lpm6_find_existing(const char *name);
+
+/**
+ * Free an LPM object.
+ *
+ * @param lpm
+ * LPM object handle
+ * @return
+ * None
+ */
+void
+rte_lpm6_free(struct rte_lpm6 *lpm);
+
+/**
+ * Add a rule to the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be added to the LPM table
+ * @param depth
+ * Depth of the rule to be added to the LPM table
+ * @param next_hop
+ * Next hop of the rule to be added to the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t next_hop);
+
+/**
+ * Check if a rule is present in the LPM table,
+ * and provide its next hop if it is.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be searched
+ * @param depth
+ * Depth of the rule to searched
+ * @param next_hop
+ * Next hop of the rule (valid only if it is found)
+ * @return
+ * 1 if the rule exists, 0 if it does not, a negative value on failure
+ */
+int
+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+uint8_t *next_hop);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP of the rule to be deleted from the LPM table
+ * @param depth
+ * Depth of the rule to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise
+ */
+int
+rte_lpm6_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be deleted from the LPM table
+ * @param depths
+ * Array of depths of the rules to be deleted from the LPM table
+ * @param n
+ * Number of rules to be deleted from the LPM table
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
+
+/**
+ * Delete all rules from the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ */
+void
+rte_lpm6_delete_all(struct rte_lpm6 *lpm);
+
+/**
+ * Lookup an IP into the LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ip
+ * IP to be looked up in the LPM table
+ * @param next_hop
+ * Next hop of the most specific rule found for IP (valid on lookup hit only)
+ * @return
+ * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
+ */
+int
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
+
+/**
+ * Lookup multiple IP addresses in an LPM table.
+ *
+ * @param lpm
+ * LPM object handle
+ * @param ips
+ * Array of IPs to be looked up in the LPM table
+ * @param next_hops
+ * Next hop of the most specific rule found for IP (valid on lookup hit only).
+ * This is an array of two byte values. The next hop will be stored on
+ * each position on success; otherwise the position will be set to -1.
+ * @param n
+ * Number of elements in ips (and next_hops) array to lookup.
+ * @return
+ * -EINVAL for incorrect arguments, otherwise 0
+ */
+int
+rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int16_t * next_hops, unsigned n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,1139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ *
+ * LPM Autotests from DPDK v2.2.0 for v2.0 abi compability testing.
+ *
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_ip.h>
+#include <time.h>
+
+#include "../test.h"
+
+/* remapping of DPDK v2.0 symbols */
+#include "dcompat.h"
+/* backported header from DPDK v2.0 */
+#include "rte_lpm.h"
+#include "../test_lpm_routes.h"
+
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d:\n", __LINE__); \
+ return -1; \
+ } \
+} while (0)
+
+typedef int32_t (*rte_lpm_test)(void);
+
+static int32_t test0(void);
+static int32_t test1(void);
+static int32_t test2(void);
+static int32_t test3(void);
+static int32_t test4(void);
+static int32_t test5(void);
+static int32_t test6(void);
+static int32_t test7(void);
+static int32_t test8(void);
+static int32_t test9(void);
+static int32_t test10(void);
+static int32_t test11(void);
+static int32_t test12(void);
+static int32_t test13(void);
+static int32_t test14(void);
+static int32_t test15(void);
+static int32_t test16(void);
+static int32_t test17(void);
+
+static rte_lpm_test tests[] = {
+/* Test Cases */
+ test0,
+ test1,
+ test2,
+ test3,
+ test4,
+ test5,
+ test6,
+ test7,
+ test8,
+ test9,
+ test10,
+ test11,
+ test12,
+ test13,
+ test14,
+ test15,
+ test16,
+ test17,
+};
+
+#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
+#define MAX_DEPTH 32
+#define MAX_RULES 256
+#define PASS 0
+
+/*
+ * Check that rte_lpm_create fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test0(void)
+{
+ struct rte_lpm *lpm = NULL;
+
+ /* rte_lpm_create: lpm name == NULL */
+ lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* rte_lpm_create: max_rules = 0 */
+ /* Note: __func__ inserts the function name, in this case "test0". */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* socket_id < -1 is invalid */
+ lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ return PASS;
+}
+
+/*
+ * Create lpm table then delete lpm table 100 times
+ * Use a slightly different rules size each time
+ * */
+int32_t
+test1(void)
+{
+ struct rte_lpm *lpm = NULL;
+ int32_t i;
+
+ /* rte_lpm_free: Free NULL */
+ for (i = 0; i < 100; i++) {
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ rte_lpm_free(lpm);
+ }
+
+ /* Can not test free so return success */
+ return PASS;
+}
+
+/*
+ * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
+ * therefore it is impossible to check for failure but this test is added to
+ * increase function coverage metrics and to validate that freeing null does
+ * not crash.
+ */
+int32_t
+test2(void)
+{
+ struct rte_lpm *lpm = NULL;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ rte_lpm_free(lpm);
+ rte_lpm_free(NULL);
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm_add fails gracefully for incorrect user input arguments
+ */
+int32_t
+test3(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip = RTE_IPV4(0, 0, 0, 0);
+ uint8_t depth = 24, next_hop = 100;
+ int32_t status = 0;
+
+ /* rte_lpm_add: lpm == NULL */
+ status = rte_lpm_add(NULL, ip, depth, next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm_add: depth < 1 */
+ status = rte_lpm_add(lpm, ip, 0, next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm_add: depth > MAX_DEPTH */
+ status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm_delete fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test4(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip = RTE_IPV4(0, 0, 0, 0);
+ uint8_t depth = 24;
+ int32_t status = 0;
+
+ /* rte_lpm_delete: lpm == NULL */
+ status = rte_lpm_delete(NULL, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm_delete: depth < 1 */
+ status = rte_lpm_delete(lpm, ip, 0);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm_delete: depth > MAX_DEPTH */
+ status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm_lookup fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test5(void)
+{
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip = RTE_IPV4(0, 0, 0, 0);
+ uint8_t next_hop_return = 0;
+ int32_t status = 0;
+
+ /* rte_lpm_lookup: lpm == NULL */
+ status = rte_lpm_lookup(NULL, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm_lookup: depth < 1 */
+ status = rte_lpm_lookup(lpm, ip, NULL);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm_free(lpm);
+#endif
+ return PASS;
+}
+
+
+
+/*
+ * Call add, lookup and delete for a single rule with depth <= 24
+ */
+int32_t
+test6(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip = RTE_IPV4(0, 0, 0, 0);
+ uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Call add, lookup and delete for a single rule with depth > 24
+ */
+
+int32_t
+test7(void)
+{
+ __m128i ipx4;
+ uint16_t hop[4];
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip = RTE_IPV4(0, 0, 0, 0);
+ uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ TEST_LPM_ASSERT(hop[0] == next_hop_add);
+ TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[3] == next_hop_add);
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Use rte_lpm_add to add rules which effect only the second half of the lpm
+ * table. Use all possible depths ranging from 1..32. Set the next hop = to the
+ * depth. Check lookup hit for on every add and check for lookup miss on the
+ * first half of the lpm table after each add. Finally delete all rules going
+ * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
+ * delete. The lookup should return the next_hop_add value related to the
+ * previous depth value (i.e. depth -1).
+ */
+int32_t
+test8(void)
+{
+ __m128i ipx4;
+ uint16_t hop[4];
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip1 = RTE_IPV4(127, 255, 255, 255), ip2 = RTE_IPV4(128, 0, 0, 0);
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Loop with rte_lpm_add. */
+ for (depth = 1; depth <= 32; depth++) {
+ /* Let the next_hop_add value = depth. Just for change. */
+ next_hop_add = depth;
+
+ status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ /* Check IP in first half of tbl24 which should be empty. */
+ status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+
+ ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[1] == next_hop_add);
+ TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[3] == next_hop_add);
+ }
+
+ /* Loop with rte_lpm_delete. */
+ for (depth = 32; depth >= 1; depth--) {
+ next_hop_add = (uint8_t) (depth - 1);
+
+ status = rte_lpm_delete(lpm, ip2, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+
+ if (depth != 1) {
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+ }
+ else {
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ if (depth != 1) {
+ TEST_LPM_ASSERT(hop[0] == next_hop_add);
+ TEST_LPM_ASSERT(hop[1] == next_hop_add);
+ } else {
+ TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+ }
+ TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
+ }
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * - Add & lookup to hit invalid TBL24 entry
+ * - Add & lookup to hit valid TBL24 entry not extended
+ * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
+ * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
+ *
+ */
+int32_t
+test9(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip, ip_1, ip_2;
+ uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
+ next_hop_add_2, next_hop_return;
+ int32_t status = 0;
+
+ /* Add & lookup to hit invalid TBL24 entry */
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add & lookup to hit valid TBL24 entry not extended */
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 23;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ depth = 24;
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ depth = 24;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 23;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
+ * entry */
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ip = RTE_IPV4(128, 0, 0, 5);
+ depth = 32;
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
+ * entry */
+ ip_1 = RTE_IPV4(128, 0, 0, 0);
+ depth_1 = 25;
+ next_hop_add_1 = 101;
+
+ ip_2 = RTE_IPV4(128, 0, 0, 5);
+ depth_2 = 32;
+ next_hop_add_2 = 102;
+
+ next_hop_return = 0;
+
+ status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+ status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
+
+ status = rte_lpm_delete(lpm, ip_2, depth_2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+ status = rte_lpm_delete(lpm, ip_1, depth_1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+
+/*
+ * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
+ * lookup)
+ * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
+ * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
+ * delete & lookup)
+ * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
+ * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
+ * - Delete a rule that is not present in the TBL24 & lookup
+ * - Delete a rule that is not present in the TBL8 & lookup
+ *
+ */
+int32_t
+test10(void)
+{
+
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip;
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ /* Add rule that covers a TBL24 range previously invalid & lookup
+ * (& delete & lookup) */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 16;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 25;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add rule that extends a TBL24 valid entry & lookup for both rules
+ * (& delete & lookup) */
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip = RTE_IPV4(128, 0, 0, 10);
+ depth = 32;
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ next_hop_add = 100;
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ ip = RTE_IPV4(128, 0, 0, 10);
+ depth = 32;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add rule that updates the next hop in TBL24 & lookup
+ * (& delete & lookup) */
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Add rule that updates the next hop in TBL8 & lookup
+ * (& delete & lookup) */
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Delete a rule that is not present in the TBL24 & lookup */
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_delete_all(lpm);
+
+ /* Delete a rule that is not present in the TBL8 & lookup */
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 32;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add two rules, lookup to hit the more specific one, lookup to hit the less
+ * specific one delete the less specific rule and lookup previous values again;
+ * add a more specific rule than the existing rule, lookup again
+ *
+ * */
+int32_t
+test11(void)
+{
+
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip;
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip = RTE_IPV4(128, 0, 0, 10);
+ depth = 32;
+ next_hop_add = 101;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ next_hop_add = 100;
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ ip = RTE_IPV4(128, 0, 0, 10);
+ depth = 32;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
+ * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
+ * and contraction.
+ *
+ * */
+
+int32_t
+test12(void)
+{
+ __m128i ipx4;
+ uint16_t hop[4];
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip, i;
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ for (i = 0; i < 1000; i++) {
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+
+ ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[1] == next_hop_add);
+ TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[3] == next_hop_add);
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
+ * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
+ * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
+ * extension and contraction.
+ *
+ * */
+
+int32_t
+test13(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip, i;
+ uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ ip = RTE_IPV4(128, 0, 0, 0);
+ depth = 24;
+ next_hop_add_1 = 100;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+ depth = 32;
+ next_hop_add_2 = 101;
+
+ for (i = 0; i < 1000; i++) {
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add_2));
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add_1));
+ }
+
+ depth = 24;
+
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
+ * No more tbl8 extensions will be allowed. Now add one more rule that required
+ * a tbl8 extension and get fail.
+ * */
+int32_t
+test14(void)
+{
+
+ /* We only use depth = 32 in the loop below so we must make sure
+ * that we have enough storage for all rules at that depth*/
+
+ struct rte_lpm *lpm = NULL;
+ uint32_t ip;
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ /* Add enough space for 256 rules for every depth */
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ depth = 32;
+ next_hop_add = 100;
+ ip = RTE_IPV4(0, 0, 0, 0);
+
+ /* Add 256 rules that require a tbl8 extension */
+ for (; ip <= RTE_IPV4(0, 0, 255, 0); ip += 256) {
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+ }
+
+ /* All tbl8 extensions have been used above. Try to add one more and
+ * we get a fail */
+ ip = RTE_IPV4(1, 0, 0, 0);
+ depth = 32;
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Sequence of operations for find existing lpm table
+ *
+ * - create table
+ * - find existing table: hit
+ * - find non-existing table: miss
+ *
+ */
+int32_t
+test15(void)
+{
+ struct rte_lpm *lpm = NULL, *result = NULL;
+
+ /* Create lpm */
+ lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Try to find existing lpm */
+ result = rte_lpm_find_existing("lpm_find_existing");
+ TEST_LPM_ASSERT(result == lpm);
+
+ /* Try to find non-existing lpm */
+ result = rte_lpm_find_existing("lpm_find_non_existing");
+ TEST_LPM_ASSERT(result == NULL);
+
+ /* Cleanup. */
+ rte_lpm_delete_all(lpm);
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * test failure condition of overloading the tbl8 so no more will fit
+ * Check we get an error return value in that case
+ */
+int32_t
+test16(void)
+{
+ uint32_t ip;
+ struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
+ 256 * 32, 0);
+
+ /* ip loops through all possibilities for top 24 bits of address */
+ for (ip = 0; ip < 0xFFFFFF; ip++){
+ /* add an entry within a different tbl8 each time, since
+ * depth >24 and the top 24 bits are different */
+ if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
+ break;
+ }
+
+ if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
+ printf("Error, unexpected failure with filling tbl8 groups\n");
+ printf("Failed after %u additions, expected after %u\n",
+ (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
+ }
+
+ rte_lpm_free(lpm);
+ return 0;
+}
+
+/*
+ * Test for overwriting of tbl8:
+ * - add rule /32 and lookup
+ * - add new rule /24 and lookup
+ * - add third rule /25 and lookup
+ * - lookup /32 and /24 rule to ensure the table has not been overwritten.
+ */
+int32_t
+test17(void)
+{
+ struct rte_lpm *lpm = NULL;
+ const uint32_t ip_10_32 = RTE_IPV4(10, 10, 10, 2);
+ const uint32_t ip_10_24 = RTE_IPV4(10, 10, 10, 0);
+ const uint32_t ip_20_25 = RTE_IPV4(10, 10, 20, 2);
+ const uint8_t d_ip_10_32 = 32,
+ d_ip_10_24 = 24,
+ d_ip_20_25 = 25;
+ const uint8_t next_hop_ip_10_32 = 100,
+ next_hop_ip_10_24 = 105,
+ next_hop_ip_20_25 = 111;
+ uint8_t next_hop_return = 0;
+ int32_t status = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
+ next_hop_ip_10_32)) < 0)
+ return -1;
+
+ status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+ uint8_t test_hop_10_32 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+ if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
+ next_hop_ip_10_24)) < 0)
+ return -1;
+
+ status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+ uint8_t test_hop_10_24 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+ if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
+ next_hop_ip_20_25)) < 0)
+ return -1;
+
+ status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
+ uint8_t test_hop_20_25 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
+
+ if (test_hop_10_32 == test_hop_10_24) {
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
+ if (test_hop_10_24 == test_hop_20_25){
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
+ status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+ status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+
+/*
+ * Do all unit tests.
+ */
+
+static int
+test_lpm(void)
+{
+ unsigned int i;
+ int status, global_status = 0;
+
+ for (i = 0; i < NUM_LPM_TESTS; i++) {
+ status = tests[i]();
+ if (status < 0) {
+ printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
+ global_status = status;
+ }
+ }
+
+ return global_status;
+}
+
+REGISTER_TEST_COMMAND_VERSION(lpm_autotest,
+ test_lpm, TEST_DPDK_ABI_VERSION_V20);
new file mode 100644
@@ -0,0 +1,1748 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ *
+ * LPM6 Autotests from DPDK v17.02 for v2.0 abi compatibility testing.
+ *
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <rte_memory.h>
+/* remapping of DPDK v2.0 symbols */
+#include "dcompat.h"
+/* backported header from DPDK v2.0 */
+#include "rte_lpm6.h"
+
+#include "../test.h"
+#include "../test_lpm6_data.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+} while(0)
+
+typedef int32_t (* rte_lpm6_test)(void);
+
+static int32_t test0(void);
+static int32_t test1(void);
+static int32_t test2(void);
+static int32_t test3(void);
+static int32_t test4(void);
+static int32_t test5(void);
+static int32_t test6(void);
+static int32_t test7(void);
+static int32_t test8(void);
+static int32_t test9(void);
+static int32_t test10(void);
+static int32_t test11(void);
+static int32_t test12(void);
+static int32_t test13(void);
+static int32_t test14(void);
+static int32_t test15(void);
+static int32_t test16(void);
+static int32_t test17(void);
+static int32_t test18(void);
+static int32_t test19(void);
+static int32_t test20(void);
+static int32_t test21(void);
+static int32_t test22(void);
+static int32_t test23(void);
+static int32_t test24(void);
+static int32_t test25(void);
+static int32_t test26(void);
+static int32_t test27(void);
+
+static rte_lpm6_test tests6[] = {
+/* Test Cases */
+ test0,
+ test1,
+ test2,
+ test3,
+ test4,
+ test5,
+ test6,
+ test7,
+ test8,
+ test9,
+ test10,
+ test11,
+ test12,
+ test13,
+ test14,
+ test15,
+ test16,
+ test17,
+ test18,
+ test19,
+ test20,
+ test21,
+ test22,
+ test23,
+ test24,
+ test25,
+ test26,
+ test27,
+};
+
+#define NUM_LPM6_TESTS (sizeof(tests6)/sizeof(tests6[0]))
+#define MAX_DEPTH 128
+#define MAX_RULES 1000000
+#define NUMBER_TBL8S (1 << 16)
+#define MAX_NUM_TBL8S (1 << 21)
+#define PASS 0
+
+static void
+IPv6(uint8_t *ip, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5,
+ uint8_t b6, uint8_t b7, uint8_t b8, uint8_t b9, uint8_t b10,
+ uint8_t b11, uint8_t b12, uint8_t b13, uint8_t b14, uint8_t b15,
+ uint8_t b16)
+{
+ ip[0] = b1;
+ ip[1] = b2;
+ ip[2] = b3;
+ ip[3] = b4;
+ ip[4] = b5;
+ ip[5] = b6;
+ ip[6] = b7;
+ ip[7] = b8;
+ ip[8] = b9;
+ ip[9] = b10;
+ ip[10] = b11;
+ ip[11] = b12;
+ ip[12] = b13;
+ ip[13] = b14;
+ ip[14] = b15;
+ ip[15] = b16;
+}
+
+/*
+ * Check that rte_lpm6_create fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test0(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_create: lpm name == NULL */
+ lpm = rte_lpm6_create(NULL, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* rte_lpm6_create: max_rules = 0 */
+ /* Note: __func__ inserts the function name, in this case "test0". */
+ config.max_rules = 0;
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* socket_id < -1 is invalid */
+ config.max_rules = MAX_RULES;
+ lpm = rte_lpm6_create(__func__, -2, &config);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* rte_lpm6_create: number_tbl8s is bigger than the maximum */
+ config.number_tbl8s = MAX_NUM_TBL8S + 1;
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ /* rte_lpm6_create: config = NULL */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, NULL);
+ TEST_LPM_ASSERT(lpm == NULL);
+
+ return PASS;
+}
+
+/*
+ * Creates two different LPM tables. Tries to create a third one with the same
+ * name as the first one and expects the create function to return the same
+ * pointer.
+ */
+int32_t
+test1(void)
+{
+ struct rte_lpm6 *lpm1 = NULL, *lpm2 = NULL, *lpm3 = NULL;
+ struct rte_lpm6_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_create: lpm name == LPM1 */
+ lpm1 = rte_lpm6_create("LPM1", SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm1 != NULL);
+
+ /* rte_lpm6_create: lpm name == LPM2 */
+ lpm2 = rte_lpm6_create("LPM2", SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm2 != NULL);
+
+ /* rte_lpm6_create: lpm name == LPM2 */
+ lpm3 = rte_lpm6_create("LPM1", SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm3 == NULL);
+
+ rte_lpm6_free(lpm1);
+ rte_lpm6_free(lpm2);
+
+ return PASS;
+}
+
+/*
+ * Create lpm table then delete lpm table 20 times
+ * Use a slightly different rules size each time
+ */
+int32_t
+test2(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ int32_t i;
+
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_free: Free NULL */
+ for (i = 0; i < 20; i++) {
+ config.max_rules = MAX_RULES - i;
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ rte_lpm6_free(lpm);
+ }
+
+ /* Can not test free so return success */
+ return PASS;
+}
+
+/*
+ * Call rte_lpm6_free for NULL pointer user input. Note: free has no return and
+ * therefore it is impossible to check for failure but this test is added to
+ * increase function coverage metrics and to validate that freeing null does
+ * not crash.
+ */
+int32_t
+test3(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ rte_lpm6_free(lpm);
+ rte_lpm6_free(NULL);
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm6_add fails gracefully for incorrect user input arguments
+ */
+int32_t
+test4(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 24, next_hop = 100;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_add: lpm == NULL */
+ status = rte_lpm6_add(NULL, ip, depth, next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm6_add: depth < 1 */
+ status = rte_lpm6_add(lpm, ip, 0, next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm6_add: depth > MAX_DEPTH */
+ status = rte_lpm6_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm6_delete fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test5(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 24;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm_delete: lpm == NULL */
+ status = rte_lpm6_delete(NULL, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm_delete: depth < 1 */
+ status = rte_lpm6_delete(lpm, ip, 0);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm_delete: depth > MAX_DEPTH */
+ status = rte_lpm6_delete(lpm, ip, (MAX_DEPTH + 1));
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Check that rte_lpm6_lookup fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test6(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t next_hop_return = 0;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_lookup: lpm == NULL */
+ status = rte_lpm6_lookup(NULL, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm6_lookup: ip = NULL */
+ status = rte_lpm6_lookup(lpm, NULL, &next_hop_return);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm6_lookup: next_hop = NULL */
+ status = rte_lpm6_lookup(lpm, ip, NULL);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Checks that rte_lpm6_lookup_bulk_func fails gracefully for incorrect user
+ * input arguments
+ */
+int32_t
+test7(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[10][16];
+ int16_t next_hop_return[10];
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_lookup: lpm == NULL */
+ status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm6_lookup: ip = NULL */
+ status = rte_lpm6_lookup_bulk_func(lpm, NULL, next_hop_return, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm6_lookup: next_hop = NULL */
+ status = rte_lpm6_lookup_bulk_func(lpm, ip, NULL, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Checks that rte_lpm6_delete_bulk_func fails gracefully for incorrect user
+ * input arguments
+ */
+int32_t
+test8(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[10][16];
+ uint8_t depth[10];
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* rte_lpm6_delete: lpm == NULL */
+ status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ /*Create vaild lpm to use in rest of test. */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* rte_lpm6_delete: ip = NULL */
+ status = rte_lpm6_delete_bulk_func(lpm, NULL, depth, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ /* rte_lpm6_delete: next_hop = NULL */
+ status = rte_lpm6_delete_bulk_func(lpm, ip, NULL, 10);
+ TEST_LPM_ASSERT(status < 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Call add, lookup and delete for a single rule with depth < 24.
+ * Check all the combinations for the first three bytes that result in a hit.
+ * Delete the rule and check that the same test returs a miss.
+ */
+int32_t
+test9(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 16, next_hop_add = 100, next_hop_return = 0;
+ int32_t status = 0;
+ uint8_t i;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ for (i = 0; i < UINT8_MAX; i++) {
+ ip[2] = i;
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+ }
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ for (i = 0; i < UINT8_MAX; i++) {
+ ip[2] = i;
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Adds max_rules + 1 and expects a failure. Deletes a rule, then adds
+ * another one and expects success.
+ */
+int32_t
+test10(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth, next_hop_add = 100;
+ int32_t status = 0;
+ int i;
+
+ config.max_rules = 127;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ for (i = 1; i < 128; i++) {
+ depth = (uint8_t)i;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+ }
+
+ depth = 128;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == -ENOSPC);
+
+ depth = 127;
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 128;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Creates an LPM table with a small number of tbl8s and exhaust them in the
+ * middle of the process of creating a rule.
+ */
+int32_t
+test11(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth, next_hop_add = 100;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = 16;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ depth = 128;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip[0] = 1;
+ depth = 25;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 33;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 41;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 49;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == -ENOSPC);
+
+ depth = 41;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Creates an LPM table with a small number of tbl8s and exhaust them in the
+ * middle of the process of adding a rule when there is already an existing rule
+ * in that position and needs to be extended.
+ */
+int32_t
+test12(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth, next_hop_add = 100;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = 16;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ depth = 128;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip[0] = 1;
+ depth = 41;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 49;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == -ENOSPC);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Creates an LPM table with max_rules = 2 and tries to add 3 rules.
+ * Delete one of the rules and tries to add the third one again.
+ */
+int32_t
+test13(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth, next_hop_add = 100;
+ int32_t status = 0;
+
+ config.max_rules = 2;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ depth = 1;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 2;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 3;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == -ENOSPC);
+
+ depth = 2;
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 3;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add 2^12 routes with different first 12 bits and depth 25.
+ * Add one more route with the same depth and check that results in a failure.
+ * After that delete the last rule and create the one that was attempted to be
+ * created. This checks tbl8 exhaustion.
+ */
+int32_t
+test14(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 25, next_hop_add = 100;
+ int32_t status = 0;
+ int i;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = 256;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ for (i = 0; i < 256; i++) {
+ ip[0] = (uint8_t)i;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+ }
+
+ ip[0] = 255;
+ ip[1] = 1;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == -ENOSPC);
+
+ ip[0] = 255;
+ ip[1] = 0;
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip[0] = 255;
+ ip[1] = 1;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Call add, lookup and delete for a single rule with depth = 24
+ */
+int32_t
+test15(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Call add, lookup and delete for a single rule with depth > 24
+ */
+int32_t
+test16(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {12,12,1,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth = 128, next_hop_add = 100, next_hop_return = 0;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Use rte_lpm6_add to add rules which effect only the second half of the lpm
+ * table. Use all possible depths ranging from 1..32. Set the next hop = to the
+ * depth. Check lookup hit for on every add and check for lookup miss on the
+ * first half of the lpm table after each add. Finally delete all rules going
+ * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
+ * delete. The lookup should return the next_hop_add value related to the
+ * previous depth value (i.e. depth -1).
+ */
+int32_t
+test17(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip1[] = {127,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255};
+ uint8_t ip2[] = {128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Loop with rte_lpm6_add. */
+ for (depth = 1; depth <= 16; depth++) {
+ /* Let the next_hop_add value = depth. Just for change. */
+ next_hop_add = depth;
+
+ status = rte_lpm6_add(lpm, ip2, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ /* Check IP in first half of tbl24 which should be empty. */
+ status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+ }
+
+ /* Loop with rte_lpm6_delete. */
+ for (depth = 16; depth >= 1; depth--) {
+ next_hop_add = (uint8_t) (depth - 1);
+
+ status = rte_lpm6_delete(lpm, ip2, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip2, &next_hop_return);
+
+ if (depth != 1) {
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+ }
+ else {
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ status = rte_lpm6_lookup(lpm, ip1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * - Add & lookup to hit invalid TBL24 entry
+ * - Add & lookup to hit valid TBL24 entry not extended
+ * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
+ * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
+ */
+int32_t
+test18(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[16], ip_1[16], ip_2[16];
+ uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
+ next_hop_add_2, next_hop_return;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* Add & lookup to hit invalid TBL24 entry */
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /* Add & lookup to hit valid TBL24 entry not extended */
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 23;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ depth = 24;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ depth = 24;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 23;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
+ * entry.
+ */
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
+ * entry
+ */
+ IPv6(ip_1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth_1 = 25;
+ next_hop_add_1 = 101;
+
+ IPv6(ip_2, 128, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth_2 = 32;
+ next_hop_add_2 = 102;
+
+ next_hop_return = 0;
+
+ status = rte_lpm6_add(lpm, ip_1, depth_1, next_hop_add_1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+ status = rte_lpm6_add(lpm, ip_2, depth_2, next_hop_add_2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
+
+ status = rte_lpm6_delete(lpm, ip_2, depth_2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip_2, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+ status = rte_lpm6_delete(lpm, ip_1, depth_1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip_1, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
+ * lookup)
+ * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
+ * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
+ * delete & lookup)
+ * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
+ * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
+ * - Delete a rule that is not present in the TBL24 & lookup
+ * - Delete a rule that is not present in the TBL8 & lookup
+ */
+int32_t
+test19(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[16];
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* Add rule that covers a TBL24 range previously invalid & lookup
+ * (& delete & lookup)
+ */
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 16;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 25;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_lpm6_delete_all(lpm);
+
+ /*
+ * Add rule that extends a TBL24 valid entry & lookup for both rules
+ * (& delete & lookup)
+ */
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ next_hop_add = 100;
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ IPv6(ip, 128, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /*
+ * Add rule that updates the next hop in TBL24 & lookup
+ * (& delete & lookup)
+ */
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /*
+ * Add rule that updates the next hop in TBL8 & lookup
+ * (& delete & lookup)
+ */
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /* Delete a rule that is not present in the TBL24 & lookup */
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_delete_all(lpm);
+
+ /* Delete a rule that is not present in the TBL8 & lookup */
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 32;
+ next_hop_add = 100;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status < 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add two rules, lookup to hit the more specific one, lookup to hit the less
+ * specific one delete the less specific rule and lookup previous values again;
+ * add a more specific rule than the existing rule, lookup again
+ */
+int32_t
+test20(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[16];
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+ depth = 128;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ next_hop_add = 100;
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 24;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10);
+ depth = 128;
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Adds 3 rules and look them up through the lookup_bulk function.
+ * Includes in the lookup a fourth IP address that won't match
+ * and checks that the result is as expected.
+ */
+int32_t
+test21(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip_batch[4][16];
+ uint8_t depth, next_hop_add;
+ int16_t next_hop_return[4];
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 48;
+ next_hop_add = 100;
+
+ status = rte_lpm6_add(lpm, ip_batch[0], depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 48;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip_batch[1], depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 48;
+ next_hop_add = 102;
+
+ status = rte_lpm6_add(lpm, ip_batch[2], depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 4);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == 100
+ && next_hop_return[1] == 101 && next_hop_return[2] == 102
+ && next_hop_return[3] == -1);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Adds 5 rules and look them up.
+ * Use the delete_bulk function to delete two of them. Lookup again.
+ * Use the delete_bulk function to delete one more. Lookup again.
+ * Use the delete_bulk function to delete two more, one invalid. Lookup again.
+ * Use the delete_bulk function to delete the remaining one. Lookup again.
+ */
+int32_t
+test22(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip_batch[5][16];
+ uint8_t depth[5], next_hop_add;
+ int16_t next_hop_return[5];
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Adds 5 rules and look them up */
+
+ IPv6(ip_batch[0], 128, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth[0] = 48;
+ next_hop_add = 101;
+
+ status = rte_lpm6_add(lpm, ip_batch[0], depth[0], next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[1], 128, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth[1] = 48;
+ next_hop_add = 102;
+
+ status = rte_lpm6_add(lpm, ip_batch[1], depth[1], next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[2], 128, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth[2] = 48;
+ next_hop_add = 103;
+
+ status = rte_lpm6_add(lpm, ip_batch[2], depth[2], next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[3], 128, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth[3] = 48;
+ next_hop_add = 104;
+
+ status = rte_lpm6_add(lpm, ip_batch[3], depth[3], next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth[4] = 48;
+ next_hop_add = 105;
+
+ status = rte_lpm6_add(lpm, ip_batch[4], depth[4], next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 5);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == 101
+ && next_hop_return[1] == 102 && next_hop_return[2] == 103
+ && next_hop_return[3] == 104 && next_hop_return[4] == 105);
+
+ /* Use the delete_bulk function to delete two of them. Lookup again */
+
+ status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[0], depth, 2);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 5);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
+ && next_hop_return[1] == -1 && next_hop_return[2] == 103
+ && next_hop_return[3] == 104 && next_hop_return[4] == 105);
+
+ /* Use the delete_bulk function to delete one more. Lookup again */
+
+ status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[2], depth, 1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 5);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
+ && next_hop_return[1] == -1 && next_hop_return[2] == -1
+ && next_hop_return[3] == 104 && next_hop_return[4] == 105);
+
+ /* Use the delete_bulk function to delete two, one invalid. Lookup again */
+
+ IPv6(ip_batch[4], 128, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[3], depth, 2);
+ TEST_LPM_ASSERT(status == 0);
+
+ IPv6(ip_batch[4], 128, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 5);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
+ && next_hop_return[1] == -1 && next_hop_return[2] == -1
+ && next_hop_return[3] == -1 && next_hop_return[4] == 105);
+
+ /* Use the delete_bulk function to delete the remaining one. Lookup again */
+
+ status = rte_lpm6_delete_bulk_func(lpm, &ip_batch[4], depth, 1);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup_bulk_func(lpm, ip_batch,
+ next_hop_return, 5);
+ TEST_LPM_ASSERT(status == 0 && next_hop_return[0] == -1
+ && next_hop_return[1] == -1 && next_hop_return[2] == -1
+ && next_hop_return[3] == -1 && next_hop_return[4] == -1);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
+ * lookup (miss) in a for loop of 30 times. This will check tbl8 extension
+ * and contraction.
+ */
+int32_t
+test23(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint32_t i;
+ uint8_t ip[16];
+ uint8_t depth, next_hop_add, next_hop_return;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ IPv6(ip, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ depth = 128;
+ next_hop_add = 100;
+
+ for (i = 0; i < 30; i++) {
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == -ENOENT);
+ }
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Sequence of operations for find existing lpm table
+ *
+ * - create table
+ * - find existing table: hit
+ * - find non-existing table: miss
+ */
+int32_t
+test24(void)
+{
+ struct rte_lpm6 *lpm = NULL, *result = NULL;
+ struct rte_lpm6_config config;
+
+ config.max_rules = 256 * 32;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ /* Create lpm */
+ lpm = rte_lpm6_create("lpm_find_existing", SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Try to find existing lpm */
+ result = rte_lpm6_find_existing("lpm_find_existing");
+ TEST_LPM_ASSERT(result == lpm);
+
+ /* Try to find non-existing lpm */
+ result = rte_lpm6_find_existing("lpm_find_non_existing");
+ TEST_LPM_ASSERT(result == NULL);
+
+ /* Cleanup. */
+ rte_lpm6_delete_all(lpm);
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add a set of random routes with random depths.
+ * Lookup different IP addresses that match the routes previously added.
+ * Checks that the next hop is the expected one.
+ * The routes, IP addresses and expected result for every case have been
+ * precalculated by using a python script and stored in a .h file.
+ */
+int32_t
+test25(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[16];
+ uint32_t i;
+ uint8_t depth, next_hop_add, next_hop_return, next_hop_expected;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ for (i = 0; i < 1000; i++) {
+ memcpy(ip, large_route_table[i].ip, 16);
+ depth = large_route_table[i].depth;
+ next_hop_add = large_route_table[i].next_hop;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+ }
+
+ /* generate large IPS table and expected next_hops */
+ generate_large_ips_table(1);
+
+ for (i = 0; i < 100000; i++) {
+ memcpy(ip, large_ips_table[i].ip, 16);
+ next_hop_expected = large_ips_table[i].next_hop;
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) &&
+ (next_hop_return == next_hop_expected));
+ }
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Test for overwriting of tbl8:
+ * - add rule /32 and lookup
+ * - add new rule /24 and lookup
+ * - add third rule /25 and lookup
+ * - lookup /32 and /24 rule to ensure the table has not been overwritten.
+ */
+int32_t
+test26(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip_10_32[] = {10, 10, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint8_t ip_10_24[] = {10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint8_t ip_20_25[] = {10, 10, 20, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint8_t d_ip_10_32 = 32;
+ uint8_t d_ip_10_24 = 24;
+ uint8_t d_ip_20_25 = 25;
+ uint8_t next_hop_ip_10_32 = 100;
+ uint8_t next_hop_ip_10_24 = 105;
+ uint8_t next_hop_ip_20_25 = 111;
+ uint8_t next_hop_return = 0;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ if ((status = rte_lpm6_add(lpm, ip_10_32, d_ip_10_32,
+ next_hop_ip_10_32)) < 0)
+ return -1;
+
+ status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+ uint8_t test_hop_10_32 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+ if ((status = rte_lpm6_add(lpm, ip_10_24, d_ip_10_24,
+ next_hop_ip_10_24)) < 0)
+ return -1;
+
+ status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+ uint8_t test_hop_10_24 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+ if ((status = rte_lpm6_add(lpm, ip_20_25, d_ip_20_25,
+ next_hop_ip_20_25)) < 0)
+ return -1;
+
+ status = rte_lpm6_lookup(lpm, ip_20_25, &next_hop_return);
+ uint8_t test_hop_20_25 = next_hop_return;
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
+
+ if (test_hop_10_32 == test_hop_10_24) {
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
+ if (test_hop_10_24 == test_hop_20_25){
+ printf("Next hop return equal\n");
+ return -1;
+ }
+
+ status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+ status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Add a rule that reaches the end of the tree.
+ * Add a rule that is more generic than the first one.
+ * Check every possible combination that produces a match for the second rule.
+ * This tests tbl expansion.
+ */
+int32_t
+test27(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = {128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,0};
+ uint8_t depth = 128, next_hop_add = 100, next_hop_return;
+ int32_t status = 0;
+ int i, j;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ depth = 128;
+ next_hop_add = 128;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ depth = 112;
+ next_hop_add = 112;
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ for (i = 0; i < 256; i++) {
+ ip[14] = (uint8_t)i;
+ for (j = 0; j < 256; j++) {
+ ip[15] = (uint8_t)j;
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ if (i == 0 && j == 0)
+ TEST_LPM_ASSERT(status == 0 && next_hop_return == 128);
+ else
+ TEST_LPM_ASSERT(status == 0 && next_hop_return == 112);
+ }
+ }
+
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
+ * Do all unit and performance tests.
+ */
+static int
+test_lpm6(void)
+{
+ unsigned i;
+ int status = -1, global_status = 0;
+
+ for (i = 0; i < NUM_LPM6_TESTS; i++) {
+ printf("# test %02d\n", i);
+ status = tests6[i]();
+
+ if (status < 0) {
+ printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests6[i]));
+ global_status = status;
+ }
+ }
+
+ return global_status;
+}
+
+REGISTER_TEST_COMMAND_VERSION(lpm6_autotest,
+ test_lpm6, TEST_DPDK_ABI_VERSION_V20);
new file mode 100644
@@ -0,0 +1,179 @@
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ *
+ * LPM6 Autotests from DPDK v17.02 for v2.0 abi compatibility testing.
+ *
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_memory.h>
+
+/* remapping of DPDK v2.0 symbols */
+#include "dcompat.h"
+/* backported header from DPDK v2.0 */
+#include "rte_lpm6.h"
+
+#include "../test.h"
+#include "../test_lpm6_data.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+ } while(0)
+
+static int32_t test_lpm6_perf(void);
+
+#define NUMBER_TBL8S (1 << 16)
+#define PASS 0
+
+/*
+ * Lookup performance test
+ */
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE 100000
+
+static void
+print_route_distribution(const struct rules_tbl_entry *table, uint32_t n)
+{
+ unsigned i, j;
+
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
+
+ /* Count depths. */
+ for(i = 1; i <= 128; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+static int
+test_lpm6_perf(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint64_t begin, total_time;
+ unsigned i, j;
+ uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ int64_t count = 0;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ /* Only generate IPv6 address of each item in large IPS table,
+ * here next_hop is not needed.
+ */
+ generate_large_ips_table(0);
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measure add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm6_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i ++) {
+ begin = rte_rdtsc();
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
+ if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
+ &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+
+ uint8_t ip_batch[NUM_IPS_ENTRIES][16];
+ int16_t next_hops[NUM_IPS_ENTRIES];
+
+ for (i = 0; i < NUM_IPS_ENTRIES; i++)
+ memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+
+ for (i = 0; i < ITERATIONS; i ++) {
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ rte_lpm6_lookup_bulk_func(lpm, ip_batch, next_hops, NUM_IPS_ENTRIES);
+ total_time += rte_rdtsc() - begin;
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j++)
+ if (next_hops[j] < 0)
+ count++;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm6_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm6_delete_all(lpm);
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+REGISTER_TEST_COMMAND_VERSION(lpm6_perf_autotest,
+ test_lpm6_perf, TEST_DPDK_ABI_VERSION_V20);
new file mode 100644
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2019 Intel Corporation
+ *
+ * LPM Autotests from DPDK v2.2.0 for v2.0 abi compability testing.
+ *
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_ip.h>
+#include <time.h>
+
+#include "../test.h"
+
+/* remapping of DPDK v2.0 symbols */
+#include "dcompat.h"
+/* backported header from DPDK v2.0 */
+#include "rte_lpm.h"
+#include "../test_lpm_routes.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d:\n", __LINE__); \
+ return -1; \
+ } \
+ } while (0)
+
+
+#define PASS 0
+
+/*
+ * Lookup performance test
+ */
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE (1 << 12)
+#define BULK_SIZE 32
+
+static int32_t test_lpm_perf(void);
+
+int32_t
+test_lpm_perf(void)
+{
+ struct rte_lpm *lpm = NULL;
+ uint64_t begin, total_time, lpm_used_entries = 0;
+ unsigned i, j;
+ uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ uint64_t cache_line_counter = 0;
+ int64_t count = 0;
+
+ rte_srand(rte_rdtsc());
+
+ /* (re) generate the routing table */
+ generate_large_route_rule_table();
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table,
+ (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measue add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ /* Obtain add statistics. */
+ for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
+ if (lpm->tbl24[i].valid)
+ lpm_used_entries++;
+
+ if (i % 32 == 0){
+ if ((uint64_t)count < lpm_used_entries) {
+ cache_line_counter++;
+ count = lpm_used_entries;
+ }
+ }
+ }
+
+ printf("Used table 24 entries = %u (%g%%)\n",
+ (unsigned) lpm_used_entries,
+ (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
+ printf("64 byte Cache entries used = %u (%u bytes)\n",
+ (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
+
+ printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+
+ for (j = 0; j < BATCH_SIZE; j++) {
+ if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint16_t next_hops[BULK_SIZE];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
+ unsigned k;
+ rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
+ for (k = 0; k < BULK_SIZE; k++)
+ if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure LookupX4 */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint16_t next_hops[4];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
+ unsigned k;
+ __m128i ipx4;
+
+ ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
+ ipx4 = *(__m128i *)(ip_batch + j);
+ rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
+ for (k = 0; k < RTE_DIM(next_hops); k++)
+ if (unlikely(next_hops[k] == UINT16_MAX))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm_delete_all(lpm);
+ rte_lpm_free(lpm);
+
+ return PASS;
+}
+
+REGISTER_TEST_COMMAND_VERSION(lpm_perf_autotest,
+ test_lpm_perf, TEST_DPDK_ABI_VERSION_V20);
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_ip.h>
+#include <rte_lpm.h>
+
+#include "../test.h"
+
+REGISTER_TEST_ABI_VERSION(v20, TEST_DPDK_ABI_VERSION_V20);