[v3,6/9] distributor: remove deprecated code

Message ID 0c5205b372743b7600e4d3d8076f664243fd9b6e.1571245316.git.anatoly.burakov@intel.com (mailing list archive)
State Superseded, archived
Headers
Series Implement the new ABI policy and add helper scripts |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Burakov, Anatoly Oct. 16, 2019, 5:03 p.m. UTC
  From: Marcin Baran <marcinx.baran@intel.com>

Remove code for old ABI versions ahead of ABI version bump.

Signed-off-by: Marcin Baran <marcinx.baran@intel.com>
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---

Notes:
    v3:
    - Removed single mode from distributor as per Dave's comments
    
    v2:
    - Moved this to before ABI version bump to avoid compile breakage

 app/test/test_distributor.c                   | 102 ++---
 app/test/test_distributor_perf.c              |  12 -
 lib/librte_distributor/Makefile               |   1 -
 lib/librte_distributor/meson.build            |   2 +-
 lib/librte_distributor/rte_distributor.c      | 126 +-----
 lib/librte_distributor/rte_distributor.h      |   1 -
 .../rte_distributor_private.h                 |  35 --
 .../rte_distributor_v1705.h                   |  61 ---
 lib/librte_distributor/rte_distributor_v20.c  | 402 ------------------
 lib/librte_distributor/rte_distributor_v20.h  | 218 ----------
 .../rte_distributor_version.map               |  16 +-
 11 files changed, 38 insertions(+), 938 deletions(-)
 delete mode 100644 lib/librte_distributor/rte_distributor_v1705.h
 delete mode 100644 lib/librte_distributor/rte_distributor_v20.c
 delete mode 100644 lib/librte_distributor/rte_distributor_v20.h
  

Comments

Hunt, David Oct. 17, 2019, 10:53 a.m. UTC | #1
On 16/10/2019 18:03, Anatoly Burakov wrote:
> From: Marcin Baran <marcinx.baran@intel.com>
>
> Remove code for old ABI versions ahead of ABI version bump.
>
> Signed-off-by: Marcin Baran <marcinx.baran@intel.com>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
>
> Notes:
>      v3:
>      - Removed single mode from distributor as per Dave's comments


Hi Anatoly,

Having looked at this code closer, I see that this now breaks the API 
for when a distributor instance is created with the RTE_DIST_ALG_SINGLE.

I think now that the better solution would be to just re-name the _v20 
to _single for structs, functions, etc, as you did in the previous patch 
version. That means that the unit and perf tests should still work 
unchanged, and maintain the API.

Rgds,
Dave.
  

Patch

diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index 7090b55f88..af42f3a991 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -511,18 +511,9 @@  test_flush_with_worker_shutdown(struct worker_params *wp,
 static
 int test_error_distributor_create_name(void)
 {
-	struct rte_distributor *d = NULL;
 	struct rte_distributor *db = NULL;
 	char *name = NULL;
 
-	d = rte_distributor_create(name, rte_socket_id(),
-			rte_lcore_count() - 1,
-			RTE_DIST_ALG_SINGLE);
-	if (d != NULL || rte_errno != EINVAL) {
-		printf("ERROR: No error on create() with NULL name param\n");
-		return -1;
-	}
-
 	db = rte_distributor_create(name, rte_socket_id(),
 			rte_lcore_count() - 1,
 			RTE_DIST_ALG_BURST);
@@ -538,17 +529,8 @@  int test_error_distributor_create_name(void)
 static
 int test_error_distributor_create_numworkers(void)
 {
-	struct rte_distributor *ds = NULL;
 	struct rte_distributor *db = NULL;
 
-	ds = rte_distributor_create("test_numworkers", rte_socket_id(),
-			RTE_MAX_LCORE + 10,
-			RTE_DIST_ALG_SINGLE);
-	if (ds != NULL || rte_errno != EINVAL) {
-		printf("ERROR: No error on create() with num_workers > MAX\n");
-		return -1;
-	}
-
 	db = rte_distributor_create("test_numworkers", rte_socket_id(),
 			RTE_MAX_LCORE + 10,
 			RTE_DIST_ALG_BURST);
@@ -589,11 +571,8 @@  quit_workers(struct worker_params *wp, struct rte_mempool *p)
 static int
 test_distributor(void)
 {
-	static struct rte_distributor *ds;
 	static struct rte_distributor *db;
-	static struct rte_distributor *dist[2];
 	static struct rte_mempool *p;
-	int i;
 
 	if (rte_lcore_count() < 2) {
 		printf("Not enough cores for distributor_autotest, expecting at least 2\n");
@@ -613,20 +592,6 @@  test_distributor(void)
 		rte_distributor_clear_returns(db);
 	}
 
-	if (ds == NULL) {
-		ds = rte_distributor_create("Test_dist_single",
-				rte_socket_id(),
-				rte_lcore_count() - 1,
-			RTE_DIST_ALG_SINGLE);
-		if (ds == NULL) {
-			printf("Error creating single distributor\n");
-			return -1;
-		}
-	} else {
-		rte_distributor_flush(ds);
-		rte_distributor_clear_returns(ds);
-	}
-
 	const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
 			(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
 	if (p == NULL) {
@@ -638,52 +603,39 @@  test_distributor(void)
 		}
 	}
 
-	dist[0] = ds;
-	dist[1] = db;
-
-	for (i = 0; i < 2; i++) {
-
-		worker_params.dist = dist[i];
-		if (i)
-			strlcpy(worker_params.name, "burst",
-					sizeof(worker_params.name));
-		else
-			strlcpy(worker_params.name, "single",
-					sizeof(worker_params.name));
-
-		rte_eal_mp_remote_launch(handle_work,
-				&worker_params, SKIP_MASTER);
-		if (sanity_test(&worker_params, p) < 0)
+	worker_params.dist = db;
+
+	rte_eal_mp_remote_launch(handle_work,
+			&worker_params, SKIP_MASTER);
+	if (sanity_test(&worker_params, p) < 0)
+		goto err;
+	quit_workers(&worker_params, p);
+
+	rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
+			&worker_params, SKIP_MASTER);
+	if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
+		goto err;
+	quit_workers(&worker_params, p);
+
+	if (rte_lcore_count() > 2) {
+		rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
+				&worker_params,
+				SKIP_MASTER);
+		if (sanity_test_with_worker_shutdown(&worker_params,
+				p) < 0)
 			goto err;
 		quit_workers(&worker_params, p);
 
-		rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
-				&worker_params, SKIP_MASTER);
-		if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
+		rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
+				&worker_params,
+				SKIP_MASTER);
+		if (test_flush_with_worker_shutdown(&worker_params,
+				p) < 0)
 			goto err;
 		quit_workers(&worker_params, p);
 
-		if (rte_lcore_count() > 2) {
-			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
-					&worker_params,
-					SKIP_MASTER);
-			if (sanity_test_with_worker_shutdown(&worker_params,
-					p) < 0)
-				goto err;
-			quit_workers(&worker_params, p);
-
-			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
-					&worker_params,
-					SKIP_MASTER);
-			if (test_flush_with_worker_shutdown(&worker_params,
-					p) < 0)
-				goto err;
-			quit_workers(&worker_params, p);
-
-		} else {
-			printf("Too few cores to run worker shutdown test\n");
-		}
-
+	} else {
+		printf("Too few cores to run worker shutdown test\n");
 	}
 
 	if (test_error_distributor_create_numworkers() == -1 ||
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index 664530ff9e..a0bbae1a16 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -215,18 +215,6 @@  test_distributor_perf(void)
 	/* first time how long it takes to round-trip a cache line */
 	time_cache_line_switch();
 
-	if (ds == NULL) {
-		ds = rte_distributor_create("Test_perf", rte_socket_id(),
-				rte_lcore_count() - 1,
-				RTE_DIST_ALG_SINGLE);
-		if (ds == NULL) {
-			printf("Error creating distributor\n");
-			return -1;
-		}
-	} else {
-		rte_distributor_clear_returns(ds);
-	}
-
 	if (db == NULL) {
 		db = rte_distributor_create("Test_burst", rte_socket_id(),
 				rte_lcore_count() - 1,
diff --git a/lib/librte_distributor/Makefile b/lib/librte_distributor/Makefile
index 0ef80dcff4..54e9b0cc27 100644
--- a/lib/librte_distributor/Makefile
+++ b/lib/librte_distributor/Makefile
@@ -15,7 +15,6 @@  EXPORT_MAP := rte_distributor_version.map
 LIBABIVER := 1
 
 # all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor_v20.c
 SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor.c
 ifeq ($(CONFIG_RTE_ARCH_X86),y)
 SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_sse.c
diff --git a/lib/librte_distributor/meson.build b/lib/librte_distributor/meson.build
index dba7e3b2aa..d3e2aaa9e0 100644
--- a/lib/librte_distributor/meson.build
+++ b/lib/librte_distributor/meson.build
@@ -1,7 +1,7 @@ 
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
-sources = files('rte_distributor.c', 'rte_distributor_v20.c')
+sources = files('rte_distributor.c')
 if arch_subdir == 'x86'
 	sources += files('rte_distributor_match_sse.c')
 else
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index 21eb1fb0a1..d74fa468c8 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -18,8 +18,6 @@ 
 
 #include "rte_distributor_private.h"
 #include "rte_distributor.h"
-#include "rte_distributor_v20.h"
-#include "rte_distributor_v1705.h"
 
 TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
 
@@ -33,7 +31,7 @@  EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
 /**** Burst Packet APIs called by workers ****/
 
 void
-rte_distributor_request_pkt_v1705(struct rte_distributor *d,
+rte_distributor_request_pkt(struct rte_distributor *d,
 		unsigned int worker_id, struct rte_mbuf **oldpkt,
 		unsigned int count)
 {
@@ -42,12 +40,6 @@  rte_distributor_request_pkt_v1705(struct rte_distributor *d,
 
 	volatile int64_t *retptr64;
 
-	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
-		rte_distributor_request_pkt_v20(d->d_v20,
-			worker_id, oldpkt[0]);
-		return;
-	}
-
 	retptr64 = &(buf->retptr64[0]);
 	/* Spin while handshake bits are set (scheduler clears it) */
 	while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
@@ -78,14 +70,9 @@  rte_distributor_request_pkt_v1705(struct rte_distributor *d,
 	 */
 	*retptr64 |= RTE_DISTRIB_GET_BUF;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
-MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **oldpkt,
-		unsigned int count),
-		rte_distributor_request_pkt_v1705);
 
 int
-rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
+rte_distributor_poll_pkt(struct rte_distributor *d,
 		unsigned int worker_id, struct rte_mbuf **pkts)
 {
 	struct rte_distributor_buffer *buf = &d->bufs[worker_id];
@@ -93,11 +80,6 @@  rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
 	int count = 0;
 	unsigned int i;
 
-	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
-		pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
-		return (pkts[0]) ? 1 : 0;
-	}
-
 	/* If bit is set, return */
 	if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
 		return -1;
@@ -119,27 +101,14 @@  rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
 
 	return count;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **pkts),
-		rte_distributor_poll_pkt_v1705);
 
 int
-rte_distributor_get_pkt_v1705(struct rte_distributor *d,
+rte_distributor_get_pkt(struct rte_distributor *d,
 		unsigned int worker_id, struct rte_mbuf **pkts,
 		struct rte_mbuf **oldpkt, unsigned int return_count)
 {
 	int count;
 
-	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
-		if (return_count <= 1) {
-			pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
-				worker_id, oldpkt[0]);
-			return (pkts[0]) ? 1 : 0;
-		} else
-			return -EINVAL;
-	}
-
 	rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
 
 	count = rte_distributor_poll_pkt(d, worker_id, pkts);
@@ -153,27 +122,14 @@  rte_distributor_get_pkt_v1705(struct rte_distributor *d,
 	}
 	return count;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **pkts,
-		struct rte_mbuf **oldpkt, unsigned int return_count),
-		rte_distributor_get_pkt_v1705);
 
 int
-rte_distributor_return_pkt_v1705(struct rte_distributor *d,
+rte_distributor_return_pkt(struct rte_distributor *d,
 		unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
 {
 	struct rte_distributor_buffer *buf = &d->bufs[worker_id];
 	unsigned int i;
 
-	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
-		if (num == 1)
-			return rte_distributor_return_pkt_v20(d->d_v20,
-				worker_id, oldpkt[0]);
-		else
-			return -EINVAL;
-	}
-
 	for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
 		/* Switch off the return bit first */
 		buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
@@ -187,10 +143,6 @@  rte_distributor_return_pkt_v1705(struct rte_distributor *d,
 
 	return 0;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
-		rte_distributor_return_pkt_v1705);
 
 /**** APIs called on distributor core ***/
 
@@ -336,7 +288,7 @@  release(struct rte_distributor *d, unsigned int wkr)
 
 /* process a set of packets to distribute them to workers */
 int
-rte_distributor_process_v1705(struct rte_distributor *d,
+rte_distributor_process(struct rte_distributor *d,
 		struct rte_mbuf **mbufs, unsigned int num_mbufs)
 {
 	unsigned int next_idx = 0;
@@ -347,11 +299,6 @@  rte_distributor_process_v1705(struct rte_distributor *d,
 	uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
 	unsigned int i, j, w, wid;
 
-	if (d->alg_type == RTE_DIST_ALG_SINGLE) {
-		/* Call the old API */
-		return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
-	}
-
 	if (unlikely(num_mbufs == 0)) {
 		/* Flush out all non-full cache-lines to workers. */
 		for (wid = 0 ; wid < d->num_workers; wid++) {
@@ -470,14 +417,10 @@  rte_distributor_process_v1705(struct rte_distributor *d,
 
 	return num_mbufs;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
-		struct rte_mbuf **mbufs, unsigned int num_mbufs),
-		rte_distributor_process_v1705);
 
 /* return to the caller, packets returned from workers */
 int
-rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
+rte_distributor_returned_pkts(struct rte_distributor *d,
 		struct rte_mbuf **mbufs, unsigned int max_mbufs)
 {
 	struct rte_distributor_returned_pkts *returns = &d->returns;
@@ -485,12 +428,6 @@  rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
 			max_mbufs : returns->count;
 	unsigned int i;
 
-	if (d->alg_type == RTE_DIST_ALG_SINGLE) {
-		/* Call the old API */
-		return rte_distributor_returned_pkts_v20(d->d_v20,
-				mbufs, max_mbufs);
-	}
-
 	for (i = 0; i < retval; i++) {
 		unsigned int idx = (returns->start + i) &
 				RTE_DISTRIB_RETURNS_MASK;
@@ -502,10 +439,6 @@  rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
 
 	return retval;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
-		struct rte_mbuf **mbufs, unsigned int max_mbufs),
-		rte_distributor_returned_pkts_v1705);
 
 /*
  * Return the number of packets in-flight in a distributor, i.e. packets
@@ -527,16 +460,11 @@  total_outstanding(const struct rte_distributor *d)
  * queued up.
  */
 int
-rte_distributor_flush_v1705(struct rte_distributor *d)
+rte_distributor_flush(struct rte_distributor *d)
 {
 	unsigned int flushed;
 	unsigned int wkr;
 
-	if (d->alg_type == RTE_DIST_ALG_SINGLE) {
-		/* Call the old API */
-		return rte_distributor_flush_v20(d->d_v20);
-	}
-
 	flushed = total_outstanding(d);
 
 	while (total_outstanding(d) > 0)
@@ -556,33 +484,21 @@  rte_distributor_flush_v1705(struct rte_distributor *d)
 
 	return flushed;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
-		rte_distributor_flush_v1705);
 
 /* clears the internal returns array in the distributor */
 void
-rte_distributor_clear_returns_v1705(struct rte_distributor *d)
+rte_distributor_clear_returns(struct rte_distributor *d)
 {
 	unsigned int wkr;
 
-	if (d->alg_type == RTE_DIST_ALG_SINGLE) {
-		/* Call the old API */
-		rte_distributor_clear_returns_v20(d->d_v20);
-		return;
-	}
-
 	/* throw away returns, so workers can exit */
 	for (wkr = 0; wkr < d->num_workers; wkr++)
 		d->bufs[wkr].retptr64[0] = 0;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
-MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
-		rte_distributor_clear_returns_v1705);
 
 /* creates a distributor instance */
 struct rte_distributor *
-rte_distributor_create_v1705(const char *name,
+rte_distributor_create(const char *name,
 		unsigned int socket_id,
 		unsigned int num_workers,
 		unsigned int alg_type)
@@ -593,8 +509,6 @@  rte_distributor_create_v1705(const char *name,
 	const struct rte_memzone *mz;
 	unsigned int i;
 
-	/* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
-
 	/* compilation-time checks */
 	RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
 	RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
@@ -605,23 +519,6 @@  rte_distributor_create_v1705(const char *name,
 		return NULL;
 	}
 
-	if (alg_type == RTE_DIST_ALG_SINGLE) {
-		d = malloc(sizeof(struct rte_distributor));
-		if (d == NULL) {
-			rte_errno = ENOMEM;
-			return NULL;
-		}
-		d->d_v20 = rte_distributor_create_v20(name,
-				socket_id, num_workers);
-		if (d->d_v20 == NULL) {
-			free(d);
-			/* rte_errno will have been set */
-			return NULL;
-		}
-		d->alg_type = alg_type;
-		return d;
-	}
-
 	snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
 	mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
 	if (mz == NULL) {
@@ -656,8 +553,3 @@  rte_distributor_create_v1705(const char *name,
 
 	return d;
 }
-BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
-MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
-		const char *name, unsigned int socket_id,
-		unsigned int num_workers, unsigned int alg_type),
-		rte_distributor_create_v1705);
diff --git a/lib/librte_distributor/rte_distributor.h b/lib/librte_distributor/rte_distributor.h
index 327c0c4ab2..41c06093ee 100644
--- a/lib/librte_distributor/rte_distributor.h
+++ b/lib/librte_distributor/rte_distributor.h
@@ -20,7 +20,6 @@  extern "C" {
 /* Type of distribution (burst/single) */
 enum rte_distributor_alg_type {
 	RTE_DIST_ALG_BURST = 0,
-	RTE_DIST_ALG_SINGLE,
 	RTE_DIST_NUM_ALG_TYPES
 };
 
diff --git a/lib/librte_distributor/rte_distributor_private.h b/lib/librte_distributor/rte_distributor_private.h
index 33cd89410c..552eecc88f 100644
--- a/lib/librte_distributor/rte_distributor_private.h
+++ b/lib/librte_distributor/rte_distributor_private.h
@@ -48,18 +48,6 @@  extern "C" {
 
 #define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
 
-/**
- * Buffer structure used to pass the pointer data between cores. This is cache
- * line aligned, but to improve performance and prevent adjacent cache-line
- * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
- * the next cache line to worker 0, we pad this out to three cache lines.
- * Only 64-bits of the memory is actually used though.
- */
-union rte_distributor_buffer_v20 {
-	volatile int64_t bufptr64;
-	char pad[RTE_CACHE_LINE_SIZE*3];
-} __rte_cache_aligned;
-
 /*
  * Transfer up to 8 mbufs at a time to/from workers, and
  * flow matching algorithm optimized for 8 flow IDs at a time
@@ -80,27 +68,6 @@  struct rte_distributor_returned_pkts {
 	struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
 };
 
-struct rte_distributor_v20 {
-	TAILQ_ENTRY(rte_distributor_v20) next;    /**< Next in list. */
-
-	char name[RTE_DISTRIBUTOR_NAMESIZE];  /**< Name of the ring. */
-	unsigned int num_workers;             /**< Number of workers polling */
-
-	uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
-		/**< Tracks the tag being processed per core */
-	uint64_t in_flight_bitmask;
-		/**< on/off bits for in-flight tags.
-		 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
-		 * the bitmask has to expand.
-		 */
-
-	struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
-
-	union rte_distributor_buffer_v20 bufs[RTE_DISTRIB_MAX_WORKERS];
-
-	struct rte_distributor_returned_pkts returns;
-};
-
 /* All different signature compare functions */
 enum rte_distributor_match_function {
 	RTE_DIST_MATCH_SCALAR = 0,
@@ -153,8 +120,6 @@  struct rte_distributor {
 	struct rte_distributor_returned_pkts returns;
 
 	enum rte_distributor_match_function dist_match_fn;
-
-	struct rte_distributor_v20 *d_v20;
 };
 
 void
diff --git a/lib/librte_distributor/rte_distributor_v1705.h b/lib/librte_distributor/rte_distributor_v1705.h
deleted file mode 100644
index df4d9e8150..0000000000
--- a/lib/librte_distributor/rte_distributor_v1705.h
+++ /dev/null
@@ -1,61 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef _RTE_DISTRIB_V1705_H_
-#define _RTE_DISTRIB_V1705_H_
-
-/**
- * @file
- * RTE distributor
- *
- * The distributor is a component which is designed to pass packets
- * one-at-a-time to workers, with dynamic load balancing.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct rte_distributor *
-rte_distributor_create_v1705(const char *name, unsigned int socket_id,
-		unsigned int num_workers,
-		unsigned int alg_type);
-
-int
-rte_distributor_process_v1705(struct rte_distributor *d,
-		struct rte_mbuf **mbufs, unsigned int num_mbufs);
-
-int
-rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
-		struct rte_mbuf **mbufs, unsigned int max_mbufs);
-
-int
-rte_distributor_flush_v1705(struct rte_distributor *d);
-
-void
-rte_distributor_clear_returns_v1705(struct rte_distributor *d);
-
-int
-rte_distributor_get_pkt_v1705(struct rte_distributor *d,
-	unsigned int worker_id, struct rte_mbuf **pkts,
-	struct rte_mbuf **oldpkt, unsigned int retcount);
-
-int
-rte_distributor_return_pkt_v1705(struct rte_distributor *d,
-	unsigned int worker_id, struct rte_mbuf **oldpkt, int num);
-
-void
-rte_distributor_request_pkt_v1705(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **oldpkt,
-		unsigned int count);
-
-int
-rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
-		unsigned int worker_id, struct rte_mbuf **mbufs);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
deleted file mode 100644
index cdc0969a89..0000000000
--- a/lib/librte_distributor/rte_distributor_v20.c
+++ /dev/null
@@ -1,402 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#include <stdio.h>
-#include <sys/queue.h>
-#include <string.h>
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_errno.h>
-#include <rte_compat.h>
-#include <rte_string_fns.h>
-#include <rte_eal_memconfig.h>
-#include <rte_pause.h>
-#include <rte_tailq.h>
-
-#include "rte_distributor_v20.h"
-#include "rte_distributor_private.h"
-
-TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
-
-static struct rte_tailq_elem rte_distributor_tailq = {
-	.name = "RTE_DISTRIBUTOR",
-};
-EAL_REGISTER_TAILQ(rte_distributor_tailq)
-
-/**** APIs called by workers ****/
-
-void
-rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned worker_id, struct rte_mbuf *oldpkt)
-{
-	union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
-	int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
-			| RTE_DISTRIB_GET_BUF;
-	while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
-		rte_pause();
-	buf->bufptr64 = req;
-}
-VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
-
-struct rte_mbuf *
-rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned worker_id)
-{
-	union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
-	if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
-		return NULL;
-
-	/* since bufptr64 is signed, this should be an arithmetic shift */
-	int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
-	return (struct rte_mbuf *)((uintptr_t)ret);
-}
-VERSION_SYMBOL(rte_distributor_poll_pkt, _v20, 2.0);
-
-struct rte_mbuf *
-rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned worker_id, struct rte_mbuf *oldpkt)
-{
-	struct rte_mbuf *ret;
-	rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
-	while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
-		rte_pause();
-	return ret;
-}
-VERSION_SYMBOL(rte_distributor_get_pkt, _v20, 2.0);
-
-int
-rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned worker_id, struct rte_mbuf *oldpkt)
-{
-	union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
-	uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
-			| RTE_DISTRIB_RETURN_BUF;
-	buf->bufptr64 = req;
-	return 0;
-}
-VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
-
-/**** APIs called on distributor core ***/
-
-/* as name suggests, adds a packet to the backlog for a particular worker */
-static int
-add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
-{
-	if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
-		return -1;
-
-	bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
-			= item;
-	return 0;
-}
-
-/* takes the next packet for a worker off the backlog */
-static int64_t
-backlog_pop(struct rte_distributor_backlog *bl)
-{
-	bl->count--;
-	return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
-}
-
-/* stores a packet returned from a worker inside the returns array */
-static inline void
-store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
-		unsigned *ret_start, unsigned *ret_count)
-{
-	/* store returns in a circular buffer - code is branch-free */
-	d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
-			= (void *)oldbuf;
-	*ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
-	*ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
-}
-
-static inline void
-handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
-{
-	d->in_flight_tags[wkr] = 0;
-	d->in_flight_bitmask &= ~(1UL << wkr);
-	d->bufs[wkr].bufptr64 = 0;
-	if (unlikely(d->backlog[wkr].count != 0)) {
-		/* On return of a packet, we need to move the
-		 * queued packets for this core elsewhere.
-		 * Easiest solution is to set things up for
-		 * a recursive call. That will cause those
-		 * packets to be queued up for the next free
-		 * core, i.e. it will return as soon as a
-		 * core becomes free to accept the first
-		 * packet, as subsequent ones will be added to
-		 * the backlog for that core.
-		 */
-		struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
-		unsigned i;
-		struct rte_distributor_backlog *bl = &d->backlog[wkr];
-
-		for (i = 0; i < bl->count; i++) {
-			unsigned idx = (bl->start + i) &
-					RTE_DISTRIB_BACKLOG_MASK;
-			pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
-					RTE_DISTRIB_FLAG_BITS));
-		}
-		/* recursive call.
-		 * Note that the tags were set before first level call
-		 * to rte_distributor_process.
-		 */
-		rte_distributor_process_v20(d, pkts, i);
-		bl->count = bl->start = 0;
-	}
-}
-
-/* this function is called when process() fn is called without any new
- * packets. It goes through all the workers and clears any returned packets
- * to do a partial flush.
- */
-static int
-process_returns(struct rte_distributor_v20 *d)
-{
-	unsigned wkr;
-	unsigned flushed = 0;
-	unsigned ret_start = d->returns.start,
-			ret_count = d->returns.count;
-
-	for (wkr = 0; wkr < d->num_workers; wkr++) {
-
-		const int64_t data = d->bufs[wkr].bufptr64;
-		uintptr_t oldbuf = 0;
-
-		if (data & RTE_DISTRIB_GET_BUF) {
-			flushed++;
-			if (d->backlog[wkr].count)
-				d->bufs[wkr].bufptr64 =
-						backlog_pop(&d->backlog[wkr]);
-			else {
-				d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
-				d->in_flight_tags[wkr] = 0;
-				d->in_flight_bitmask &= ~(1UL << wkr);
-			}
-			oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
-		} else if (data & RTE_DISTRIB_RETURN_BUF) {
-			handle_worker_shutdown(d, wkr);
-			oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
-		}
-
-		store_return(oldbuf, d, &ret_start, &ret_count);
-	}
-
-	d->returns.start = ret_start;
-	d->returns.count = ret_count;
-
-	return flushed;
-}
-
-/* process a set of packets to distribute them to workers */
-int
-rte_distributor_process_v20(struct rte_distributor_v20 *d,
-		struct rte_mbuf **mbufs, unsigned num_mbufs)
-{
-	unsigned next_idx = 0;
-	unsigned wkr = 0;
-	struct rte_mbuf *next_mb = NULL;
-	int64_t next_value = 0;
-	uint32_t new_tag = 0;
-	unsigned ret_start = d->returns.start,
-			ret_count = d->returns.count;
-
-	if (unlikely(num_mbufs == 0))
-		return process_returns(d);
-
-	while (next_idx < num_mbufs || next_mb != NULL) {
-
-		int64_t data = d->bufs[wkr].bufptr64;
-		uintptr_t oldbuf = 0;
-
-		if (!next_mb) {
-			next_mb = mbufs[next_idx++];
-			next_value = (((int64_t)(uintptr_t)next_mb)
-					<< RTE_DISTRIB_FLAG_BITS);
-			/*
-			 * User is advocated to set tag value for each
-			 * mbuf before calling rte_distributor_process.
-			 * User defined tags are used to identify flows,
-			 * or sessions.
-			 */
-			new_tag = next_mb->hash.usr;
-
-			/*
-			 * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
-			 * then the size of match has to be expanded.
-			 */
-			uint64_t match = 0;
-			unsigned i;
-			/*
-			 * to scan for a match use "xor" and "not" to get a 0/1
-			 * value, then use shifting to merge to single "match"
-			 * variable, where a one-bit indicates a match for the
-			 * worker given by the bit-position
-			 */
-			for (i = 0; i < d->num_workers; i++)
-				match |= (!(d->in_flight_tags[i] ^ new_tag)
-					<< i);
-
-			/* Only turned-on bits are considered as match */
-			match &= d->in_flight_bitmask;
-
-			if (match) {
-				next_mb = NULL;
-				unsigned worker = __builtin_ctzl(match);
-				if (add_to_backlog(&d->backlog[worker],
-						next_value) < 0)
-					next_idx--;
-			}
-		}
-
-		if ((data & RTE_DISTRIB_GET_BUF) &&
-				(d->backlog[wkr].count || next_mb)) {
-
-			if (d->backlog[wkr].count)
-				d->bufs[wkr].bufptr64 =
-						backlog_pop(&d->backlog[wkr]);
-
-			else {
-				d->bufs[wkr].bufptr64 = next_value;
-				d->in_flight_tags[wkr] = new_tag;
-				d->in_flight_bitmask |= (1UL << wkr);
-				next_mb = NULL;
-			}
-			oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
-		} else if (data & RTE_DISTRIB_RETURN_BUF) {
-			handle_worker_shutdown(d, wkr);
-			oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
-		}
-
-		/* store returns in a circular buffer */
-		store_return(oldbuf, d, &ret_start, &ret_count);
-
-		if (++wkr == d->num_workers)
-			wkr = 0;
-	}
-	/* to finish, check all workers for backlog and schedule work for them
-	 * if they are ready */
-	for (wkr = 0; wkr < d->num_workers; wkr++)
-		if (d->backlog[wkr].count &&
-				(d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
-
-			int64_t oldbuf = d->bufs[wkr].bufptr64 >>
-					RTE_DISTRIB_FLAG_BITS;
-			store_return(oldbuf, d, &ret_start, &ret_count);
-
-			d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
-		}
-
-	d->returns.start = ret_start;
-	d->returns.count = ret_count;
-	return num_mbufs;
-}
-VERSION_SYMBOL(rte_distributor_process, _v20, 2.0);
-
-/* return to the caller, packets returned from workers */
-int
-rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
-		struct rte_mbuf **mbufs, unsigned max_mbufs)
-{
-	struct rte_distributor_returned_pkts *returns = &d->returns;
-	unsigned retval = (max_mbufs < returns->count) ?
-			max_mbufs : returns->count;
-	unsigned i;
-
-	for (i = 0; i < retval; i++) {
-		unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
-		mbufs[i] = returns->mbufs[idx];
-	}
-	returns->start += i;
-	returns->count -= i;
-
-	return retval;
-}
-VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
-
-/* return the number of packets in-flight in a distributor, i.e. packets
- * being worked on or queued up in a backlog.
- */
-static inline unsigned
-total_outstanding(const struct rte_distributor_v20 *d)
-{
-	unsigned wkr, total_outstanding;
-
-	total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
-
-	for (wkr = 0; wkr < d->num_workers; wkr++)
-		total_outstanding += d->backlog[wkr].count;
-
-	return total_outstanding;
-}
-
-/* flush the distributor, so that there are no outstanding packets in flight or
- * queued up. */
-int
-rte_distributor_flush_v20(struct rte_distributor_v20 *d)
-{
-	const unsigned flushed = total_outstanding(d);
-
-	while (total_outstanding(d) > 0)
-		rte_distributor_process_v20(d, NULL, 0);
-
-	return flushed;
-}
-VERSION_SYMBOL(rte_distributor_flush, _v20, 2.0);
-
-/* clears the internal returns array in the distributor */
-void
-rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
-{
-	d->returns.start = d->returns.count = 0;
-#ifndef __OPTIMIZE__
-	memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
-#endif
-}
-VERSION_SYMBOL(rte_distributor_clear_returns, _v20, 2.0);
-
-/* creates a distributor instance */
-struct rte_distributor_v20 *
-rte_distributor_create_v20(const char *name,
-		unsigned socket_id,
-		unsigned num_workers)
-{
-	struct rte_distributor_v20 *d;
-	struct rte_distributor_list *distributor_list;
-	char mz_name[RTE_MEMZONE_NAMESIZE];
-	const struct rte_memzone *mz;
-
-	/* compilation-time checks */
-	RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
-	RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
-	RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
-				sizeof(d->in_flight_bitmask) * CHAR_BIT);
-
-	if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
-		rte_errno = EINVAL;
-		return NULL;
-	}
-
-	snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
-	mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
-	if (mz == NULL) {
-		rte_errno = ENOMEM;
-		return NULL;
-	}
-
-	d = mz->addr;
-	strlcpy(d->name, name, sizeof(d->name));
-	d->num_workers = num_workers;
-
-	distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
-					  rte_distributor_list);
-
-	rte_mcfg_tailq_write_lock();
-	TAILQ_INSERT_TAIL(distributor_list, d, next);
-	rte_mcfg_tailq_write_unlock();
-
-	return d;
-}
-VERSION_SYMBOL(rte_distributor_create, _v20, 2.0);
diff --git a/lib/librte_distributor/rte_distributor_v20.h b/lib/librte_distributor/rte_distributor_v20.h
deleted file mode 100644
index 12865658ba..0000000000
--- a/lib/librte_distributor/rte_distributor_v20.h
+++ /dev/null
@@ -1,218 +0,0 @@ 
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_DISTRIB_V20_H_
-#define _RTE_DISTRIB_V20_H_
-
-/**
- * @file
- * RTE distributor
- *
- * The distributor is a component which is designed to pass packets
- * one-at-a-time to workers, with dynamic load balancing.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
-
-struct rte_distributor_v20;
-struct rte_mbuf;
-
-/**
- * Function to create a new distributor instance
- *
- * Reserves the memory needed for the distributor operation and
- * initializes the distributor to work with the configured number of workers.
- *
- * @param name
- *   The name to be given to the distributor instance.
- * @param socket_id
- *   The NUMA node on which the memory is to be allocated
- * @param num_workers
- *   The maximum number of workers that will request packets from this
- *   distributor
- * @return
- *   The newly created distributor instance
- */
-struct rte_distributor_v20 *
-rte_distributor_create_v20(const char *name, unsigned int socket_id,
-		unsigned int num_workers);
-
-/*  *** APIS to be called on the distributor lcore ***  */
-/*
- * The following APIs are the public APIs which are designed for use on a
- * single lcore which acts as the distributor lcore for a given distributor
- * instance. These functions cannot be called on multiple cores simultaneously
- * without using locking to protect access to the internals of the distributor.
- *
- * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
- * for the same distributor instance, otherwise deadlock will result.
- */
-
-/**
- * Process a set of packets by distributing them among workers that request
- * packets. The distributor will ensure that no two packets that have the
- * same flow id, or tag, in the mbuf will be processed at the same time.
- *
- * The user is advocated to set tag for each mbuf before calling this function.
- * If user doesn't set the tag, the tag value can be various values depending on
- * driver implementation and configuration.
- *
- * This is not multi-thread safe and should only be called on a single lcore.
- *
- * @param d
- *   The distributor instance to be used
- * @param mbufs
- *   The mbufs to be distributed
- * @param num_mbufs
- *   The number of mbufs in the mbufs array
- * @return
- *   The number of mbufs processed.
- */
-int
-rte_distributor_process_v20(struct rte_distributor_v20 *d,
-		struct rte_mbuf **mbufs, unsigned int num_mbufs);
-
-/**
- * Get a set of mbufs that have been returned to the distributor by workers
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- *   The distributor instance to be used
- * @param mbufs
- *   The mbufs pointer array to be filled in
- * @param max_mbufs
- *   The size of the mbufs array
- * @return
- *   The number of mbufs returned in the mbufs array.
- */
-int
-rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
-		struct rte_mbuf **mbufs, unsigned int max_mbufs);
-
-/**
- * Flush the distributor component, so that there are no in-flight or
- * backlogged packets awaiting processing
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- *   The distributor instance to be used
- * @return
- *   The number of queued/in-flight packets that were completed by this call.
- */
-int
-rte_distributor_flush_v20(struct rte_distributor_v20 *d);
-
-/**
- * Clears the array of returned packets used as the source for the
- * rte_distributor_returned_pkts() API call.
- *
- * This should only be called on the same lcore as rte_distributor_process()
- *
- * @param d
- *   The distributor instance to be used
- */
-void
-rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d);
-
-/*  *** APIS to be called on the worker lcores ***  */
-/*
- * The following APIs are the public APIs which are designed for use on
- * multiple lcores which act as workers for a distributor. Each lcore should use
- * a unique worker id when requesting packets.
- *
- * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
- * for the same distributor instance, otherwise deadlock will result.
- */
-
-/**
- * API called by a worker to get a new packet to process. Any previous packet
- * given to the worker is assumed to have completed processing, and may be
- * optionally returned to the distributor via the oldpkt parameter.
- *
- * @param d
- *   The distributor instance to be used
- * @param worker_id
- *   The worker instance number to use - must be less that num_workers passed
- *   at distributor creation time.
- * @param oldpkt
- *   The previous packet, if any, being processed by the worker
- *
- * @return
- *   A new packet to be processed by the worker thread.
- */
-struct rte_mbuf *
-rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned int worker_id, struct rte_mbuf *oldpkt);
-
-/**
- * API called by a worker to return a completed packet without requesting a
- * new packet, for example, because a worker thread is shutting down
- *
- * @param d
- *   The distributor instance to be used
- * @param worker_id
- *   The worker instance number to use - must be less that num_workers passed
- *   at distributor creation time.
- * @param mbuf
- *   The previous packet being processed by the worker
- */
-int
-rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned int worker_id, struct rte_mbuf *mbuf);
-
-/**
- * API called by a worker to request a new packet to process.
- * Any previous packet given to the worker is assumed to have completed
- * processing, and may be optionally returned to the distributor via
- * the oldpkt parameter.
- * Unlike rte_distributor_get_pkt(), this function does not wait for a new
- * packet to be provided by the distributor.
- *
- * NOTE: after calling this function, rte_distributor_poll_pkt() should
- * be used to poll for the packet requested. The rte_distributor_get_pkt()
- * API should *not* be used to try and retrieve the new packet.
- *
- * @param d
- *   The distributor instance to be used
- * @param worker_id
- *   The worker instance number to use - must be less that num_workers passed
- *   at distributor creation time.
- * @param oldpkt
- *   The previous packet, if any, being processed by the worker
- */
-void
-rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned int worker_id, struct rte_mbuf *oldpkt);
-
-/**
- * API called by a worker to check for a new packet that was previously
- * requested by a call to rte_distributor_request_pkt(). It does not wait
- * for the new packet to be available, but returns NULL if the request has
- * not yet been fulfilled by the distributor.
- *
- * @param d
- *   The distributor instance to be used
- * @param worker_id
- *   The worker instance number to use - must be less that num_workers passed
- *   at distributor creation time.
- *
- * @return
- *   A new packet to be processed by the worker thread, or NULL if no
- *   packet is yet available.
- */
-struct rte_mbuf *
-rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
-		unsigned int worker_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/lib/librte_distributor/rte_distributor_version.map b/lib/librte_distributor/rte_distributor_version.map
index 3a285b394e..5643ab85fb 100644
--- a/lib/librte_distributor/rte_distributor_version.map
+++ b/lib/librte_distributor/rte_distributor_version.map
@@ -1,4 +1,4 @@ 
-DPDK_2.0 {
+DPDK_17.05 {
 	global:
 
 	rte_distributor_clear_returns;
@@ -13,17 +13,3 @@  DPDK_2.0 {
 
 	local: *;
 };
-
-DPDK_17.05 {
-	global:
-
-	rte_distributor_clear_returns;
-	rte_distributor_create;
-	rte_distributor_flush;
-	rte_distributor_get_pkt;
-	rte_distributor_poll_pkt;
-	rte_distributor_process;
-	rte_distributor_request_pkt;
-	rte_distributor_return_pkt;
-	rte_distributor_returned_pkts;
-} DPDK_2.0;