[V3,6/6] net/hns3: remove some unnecessary blank lines

Message ID 1604932142-19900-7-git-send-email-oulijun@huawei.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series bugfix and cleanups for hns3 |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/travis-robot success Travis build: passed
ci/iol-mellanox-Performance success Performance Testing PASS

Commit Message

Lijun Ou Nov. 9, 2020, 2:29 p.m. UTC
  According to the rule of the static check tools
that arrange blank lines properly to keep the
code compact, here remove some unncessary blank
line to fix the above rule warning.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c    |  1 -
 drivers/net/hns3/hns3_ethdev.c |  6 +-----
 drivers/net/hns3/hns3_flow.c   | 37 ++++++++++++++-----------------------
 drivers/net/hns3/hns3_rxtx.c   |  1 -
 drivers/net/hns3/hns3_stats.c  |  1 -
 5 files changed, 15 insertions(+), 31 deletions(-)
  

Patch

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 4f52ed0..f58f4f7 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -198,7 +198,6 @@  hns3_cmd_csq_clean(struct hns3_hw *hw)
 	int clean;
 
 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
-
 	if (!is_valid_csq_clean_head(csq, head)) {
 		hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
 			    csq->next_to_use, csq->next_to_clean);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 79c0389..2011378 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -238,7 +238,6 @@  hns3_interrupt_handler(void *param)
 	hns3_pf_disable_irq0(hw);
 
 	event_cause = hns3_check_event_cause(hns, &clearval);
-
 	/* vector 0 interrupt is shared with reset and mailbox source events. */
 	if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
 		hns3_warn(hw, "Received err interrupt");
@@ -3556,9 +3555,7 @@  hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,
 	for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {
 		priv = &buf_alloc->priv_buf[i];
 		mask = BIT((uint8_t)i);
-
-		if (hw->hw_tc_map & mask &&
-		    hw->dcb_info.hw_pfc_map & mask) {
+		if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) {
 			/* Reduce the number of pfc TC with private buffer */
 			priv->wl.low = 0;
 			priv->enable = 0;
@@ -3612,7 +3609,6 @@  hns3_only_alloc_priv_buff(struct hns3_hw *hw,
 
 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
 		priv = &buf_alloc->priv_buf[i];
-
 		priv->enable = 0;
 		priv->wl.low = 0;
 		priv->wl.high = 0;
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 2fff157..ee6ec15 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -209,8 +209,7 @@  hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 
 	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
 	if (ret) {
-		rte_flow_error_set(error, -ret,
-				   RTE_FLOW_ERROR_TYPE_HANDLE,
+		rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
 				   NULL, "Read counter fail.");
 		return ret;
 	}
@@ -547,7 +546,6 @@  hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
 	if (item->mask) {
 		ipv4_mask = item->mask;
-
 		if (ipv4_mask->hdr.total_length ||
 		    ipv4_mask->hdr.packet_id ||
 		    ipv4_mask->hdr.fragment_offset ||
@@ -616,8 +614,8 @@  hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
 	if (item->mask) {
 		ipv6_mask = item->mask;
-		if (ipv6_mask->hdr.vtc_flow ||
-		    ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
+		if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
+		    ipv6_mask->hdr.hop_limits) {
 			return rte_flow_error_set(error, EINVAL,
 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 						  item,
@@ -672,12 +670,10 @@  hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 
 	if (item->mask) {
 		tcp_mask = item->mask;
-		if (tcp_mask->hdr.sent_seq ||
-		    tcp_mask->hdr.recv_ack ||
-		    tcp_mask->hdr.data_off ||
-		    tcp_mask->hdr.tcp_flags ||
-		    tcp_mask->hdr.rx_win ||
-		    tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
+		if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
+		    tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
 			return rte_flow_error_set(error, EINVAL,
 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 						  item,
@@ -776,7 +772,6 @@  hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 						  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
 						  item,
 						  "Only support src & dst port in SCTP");
-
 		if (sctp_mask->hdr.src_port) {
 			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
 			rule->key_conf.mask.src_port =
@@ -1069,8 +1064,7 @@  hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 }
 
 static int
-hns3_parse_normal(const struct rte_flow_item *item,
-		  struct hns3_fdir_rule *rule,
+hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
 		  struct items_step_mngr *step_mngr,
 		  struct rte_flow_error *error)
 {
@@ -1331,9 +1325,8 @@  hns3_rss_conf_copy(struct hns3_rss_conf *out,
 		.key_len = in->key_len,
 		.queue_num = in->queue_num,
 	};
-	out->conf.queue =
-		memcpy(out->queue, in->queue,
-		       sizeof(*in->queue) * in->queue_num);
+	out->conf.queue = memcpy(out->queue, in->queue,
+				sizeof(*in->queue) * in->queue_num);
 	if (in->key)
 		out->conf.key = memcpy(out->key, in->key, in->key_len);
 
@@ -1783,17 +1776,15 @@  hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 
 	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
 	if (flow == NULL) {
-		rte_flow_error_set(error, ENOMEM,
-				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-				   "Failed to allocate flow memory");
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Failed to allocate flow memory");
 		return NULL;
 	}
 	flow_node = rte_zmalloc("hns3 flow node",
 				sizeof(struct hns3_flow_mem), 0);
 	if (flow_node == NULL) {
-		rte_flow_error_set(error, ENOMEM,
-				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-				   "Failed to allocate flow list memory");
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Failed to allocate flow list memory");
 		rte_free(flow);
 		return NULL;
 	}
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 4b88b46..c76e635 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1584,7 +1584,6 @@  hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
 
 	vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
 			RTE_PKTMBUF_HEADROOM);
-
 	if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
 		return -EINVAL;
 
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index c590647..91168ac 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -679,7 +679,6 @@  hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			(*count)++;
 		}
 	}
-
 }
 
 void