[dpdk-dev] [PATCH v2 2/5] lpm: fix compilation on ARM BE

Hemant Agrawal hemant.agrawal at nxp.com
Wed Dec 13 13:52:55 CET 2017


Compiling on ARM BE using Linaro toolchain caused following
error/warnings.

rte_lpm.c: In function ‘add_depth_big_v20’:
rte_lpm.c:911:4: error: braces around scalar initializer [-Werror]
    { .group_idx = (uint8_t)tbl8_group_index, },
    ^
rte_lpm.c:911:4: note: (near initialization for
	‘new_tbl24_entry.depth’)
rte_lpm.c:911:6:error: field name not in record or union initializer
    { .group_idx = (uint8_t)tbl8_group_index, },
      ^
rte_lpm.c:911:6: note: (near initialization for
	‘new_tbl24_entry.depth’)
rte_lpm.c:914:13: error: initialized field overwritten
	[-Werror=override-init]
    .depth = 0,

Fixes: dc81ebbacaeb ("lpm: extend IPv4 next hop field")
Cc: Michal Kobylinski <michalx.kobylinski at intel.com>
Cc: stable at dpdk.org

Signed-off-by: Jun Yang <jun.yang at nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
Acked-by: Bruce Richardson <bruce.richardson at intel.com>
---
v2: added endianess check in the assignments

 lib/librte_lpm/rte_lpm.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index e1f1fad..a47c04f 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -912,10 +912,17 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 			{ .group_idx = (uint8_t)tbl8_group_index, },
 			.valid = VALID,
 			.valid_group = 1,
 			.depth = 0,
+#else
+			.depth = 0,
+			.valid_group = 1,
+			.valid = VALID,
+			{ .group_idx = (uint8_t)tbl8_group_index, },
+#endif
 		};
 
 		lpm->tbl24[tbl24_index] = new_tbl24_entry;
@@ -958,10 +965,17 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 				{ .group_idx = (uint8_t)tbl8_group_index, },
 				.valid = VALID,
 				.valid_group = 1,
 				.depth = 0,
+#else
+				.depth = 0,
+				.valid_group = 1,
+				.valid = VALID,
+				{ .group_idx = (uint8_t)tbl8_group_index, },
+#endif
 		};
 
 		lpm->tbl24[tbl24_index] = new_tbl24_entry;
@@ -1365,10 +1379,18 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 		 */
 
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 			{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = sub_rule_depth,
+#else
+			.depth = sub_rule_depth,
+			.valid_group = 0,
+			.valid = VALID,
+			{ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, },
+#endif
+
 		};
 
 		struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
@@ -1668,10 +1690,17 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 	} else if (tbl8_recycle_index > -1) {
 		/* Update tbl24 entry. */
 		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 			{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = lpm->tbl8[tbl8_recycle_index].depth,
+#else
+			.depth = lpm->tbl8[tbl8_recycle_index].depth,
+			.valid_group = 0,
+			.valid = VALID,
+			{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+#endif
 		};
 
 		/* Set tbl24 before freeing tbl8 to avoid race condition. */
-- 
2.7.4



More information about the dev mailing list