[v2,1/3] hash: add predictable RSS API

Message ID 1617738643-258635-2-git-send-email-vladimir.medvedkin@intel.com (mailing list archive)
State Superseded, archived
Delegated to: David Marchand
Headers
Series Predictable RSS feature |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Vladimir Medvedkin April 6, 2021, 7:50 p.m. UTC
  This patch adds predictable RSS API.
It is based on the idea of searching partial Toeplitz hash collisions.

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
 lib/librte_hash/meson.build |   3 +-
 lib/librte_hash/rte_thash.c |  96 ++++++++++++++++++++++++++++++
 lib/librte_hash/rte_thash.h | 138 ++++++++++++++++++++++++++++++++++++++++++++
 lib/librte_hash/version.map |   7 +++
 4 files changed, 243 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_hash/rte_thash.c
  

Comments

Wang, Yipeng1 April 10, 2021, 12:05 a.m. UTC | #1
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Tuesday, April 6, 2021 12:51 PM
> To: dev@dpdk.org
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; Kinsella, Ray <ray.kinsella@intel.com>; Wang,
> Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>
> Subject: [PATCH v2 1/3] hash: add predictable RSS API
> 
> This patch adds predictable RSS API.
> It is based on the idea of searching partial Toeplitz hash collisions.
> 
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
>  lib/librte_hash/meson.build |   3 +-
>  lib/librte_hash/rte_thash.c |  96 ++++++++++++++++++++++++++++++
> lib/librte_hash/rte_thash.h | 138
> ++++++++++++++++++++++++++++++++++++++++++++
>  lib/librte_hash/version.map |   7 +++
>  4 files changed, 243 insertions(+), 1 deletion(-)  create mode 100644
> lib/librte_hash/rte_thash.c
> 
> diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build index
> 242859f..3546014 100644
> --- a/lib/librte_hash/meson.build
> +++ b/lib/librte_hash/meson.build
> @@ -8,6 +8,7 @@ headers = files('rte_fbk_hash.h',
>  	'rte_thash.h')
>  indirect_headers += files('rte_crc_arm64.h')
> 
> -sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c')
> +sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c', 'rte_thash.c')
> +deps += ['net']
>  deps += ['ring']
>  deps += ['rcu']
> diff --git a/lib/librte_hash/rte_thash.c b/lib/librte_hash/rte_thash.c new file
> mode 100644 index 0000000..79e8724
> --- /dev/null
> +++ b/lib/librte_hash/rte_thash.c
> @@ -0,0 +1,96 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 Intel Corporation
> + */
> +
> +#include <rte_thash.h>
> +#include <rte_tailq.h>
> +#include <rte_random.h>
> +#include <rte_memcpy.h>
> +#include <rte_errno.h>
> +#include <rte_eal.h>
> +#include <rte_eal_memconfig.h>
> +#include <rte_malloc.h>
> +
> +#define THASH_NAME_LEN		64
> +
> +struct thash_lfsr {
> +	uint32_t	ref_cnt;
> +	uint32_t	poly;
> +	/**< polynomial associated with the lfsr */
> +	uint32_t	rev_poly;
> +	/**< polynomial to generate the sequence in reverse direction */
> +	uint32_t	state;
> +	/**< current state of the lfsr */
> +	uint32_t	rev_state;
> +	/**< current state of the lfsr for reverse direction */
> +	uint32_t	deg;	/**< polynomial degree*/
> +	uint32_t	bits_cnt;  /**< number of bits generated by lfsr*/
> +};
> +
> +struct rte_thash_subtuple_helper {
> +	char	name[THASH_NAME_LEN];	/** < Name of subtuple
> configuration */
> +	LIST_ENTRY(rte_thash_subtuple_helper)	next;
> +	struct thash_lfsr	*lfsr;
> +	uint32_t	offset;		/** < Offset in bits of the subtuple */
> +	uint32_t	len;		/** < Length in bits of the subtuple
> */
> +	uint32_t	lsb_msk;	/** < (1 << reta_sz_log) - 1 */
> +	__extension__ uint32_t	compl_table[0] __rte_cache_aligned;
> +	/** < Complimentary table */
> +};
> +
> +struct rte_thash_ctx {
> +	char		name[THASH_NAME_LEN];
> +	LIST_HEAD(, rte_thash_subtuple_helper) head;
> +	uint32_t	key_len;	/** < Length of the NIC RSS hash key
> */
> +	uint32_t	reta_sz_log;	/** < size of the RSS ReTa in bits */
> +	uint32_t	subtuples_nb;	/** < number of subtuples */
> +	uint32_t	flags;
> +	uint8_t		hash_key[0];
> +};
> +
> +struct rte_thash_ctx *
> +rte_thash_init_ctx(const char *name __rte_unused,
> +	uint32_t key_len __rte_unused, uint32_t reta_sz __rte_unused,
> +	uint8_t *key __rte_unused, uint32_t flags __rte_unused) {
> +	return NULL;
> +}
> +
> +struct rte_thash_ctx *
> +rte_thash_find_existing(const char *name __rte_unused) {
> +	return NULL;
> +}
> +
> +void
> +rte_thash_free_ctx(struct rte_thash_ctx *ctx __rte_unused) { }
> +
> +int
> +rte_thash_add_helper(struct rte_thash_ctx *ctx __rte_unused,
> +	const char *name __rte_unused, uint32_t len __rte_unused,
> +	uint32_t offset __rte_unused)
> +{
> +	return 0;
> +}
> +
> +struct rte_thash_subtuple_helper *
> +rte_thash_get_helper(struct rte_thash_ctx *ctx __rte_unused,
> +	const char *name __rte_unused)
> +{
> +	return NULL;
> +}
> +
> +uint32_t
> +rte_thash_get_compliment(struct rte_thash_subtuple_helper *h
> __rte_unused,
> +	uint32_t hash __rte_unused, uint32_t desired_hash __rte_unused) {
> +	return 0;
> +}
> +
> +const uint8_t *
> +rte_thash_get_key(struct rte_thash_ctx *ctx __rte_unused) {
> +	return NULL;
> +}
> diff --git a/lib/librte_hash/rte_thash.h b/lib/librte_hash/rte_thash.h index
> 061efa2..38a641b 100644
> --- a/lib/librte_hash/rte_thash.h
> +++ b/lib/librte_hash/rte_thash.h
> @@ -1,5 +1,6 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
>   * Copyright(c) 2015-2019 Vladimir Medvedkin <medvedkinv@gmail.com>
> + * Copyright(c) 2021 Intel Corporation
>   */
> 
>  #ifndef _RTE_THASH_H
> @@ -222,6 +223,143 @@ rte_softrss_be(uint32_t *input_tuple, uint32_t
> input_len,
>  	return ret;
>  }
> 
> +/**
> + * LFSR will ignore if generated m-sequence has more than 2^n -1 bits
> +*/
[Wang, Yipeng] 
I haven't fully got the significance/reasons behind the two flags.
For the comment above, 2^n is the reta_size right?
If so, it is better than commenting 2^n.

For the first flag:
What would be the issue for overflow? I understand that multiple helpers may overlap
on the m-sequence, but since they are for different tuples, what would be the issue?

For the second flag: is it always good to keep it minimum for each helper?

The goal is to have the best default values for user who do not understand the algorithm details.
Less flags is usually better.

> +#define RTE_THASH_IGNORE_PERIOD_OVERFLOW	0x1
> +/**
> + * Generate minimal required bit (equal to ReTa LSB) sequence into
> + * the hash_key
> + */
> +#define RTE_THASH_MINIMAL_SEQ			0x2
> +
> +/** @internal thash context structure. */ struct rte_thash_ctx;
> +/** @internal thash helper structure. */ struct
> +rte_thash_subtuple_helper;
> +
> +/**
> + * Create a new thash context.
> + *
> + * @param name
> + *  context name
> + * @param key_len
> + *  length of the toeplitz hash key
> + * @param reta_sz
> + *  logarithm of the NIC's Redirection Table (ReTa) size,
> + *  i.e. number of the LSBs if the hash used to determine
> + *  the reta entry.
> + * @param key
[Wang, Yipeng] Key will be modified by helper anyway. What is the reason of having
the users to specify the key here?

> + *  pointer to the key used to init an internal key state.
> + *  Could be NULL, in this case internal key will be inited with random.
> + * @param flags
> + *  supported flags are:
> + *   RTE_THASH_IGNORE_PERIOD_OVERFLOW
> + *   RTE_THASH_MINIMAL_SEQ
> + * @return
> + *  A pointer to the created context on success
> + *  NULL otherwise
> + */
> +__rte_experimental
> +struct rte_thash_ctx *
> +rte_thash_init_ctx(const char *name, uint32_t key_len, uint32_t reta_sz,
> +	uint8_t *key, uint32_t flags);
> +
> +/**
> + * Find an existing thash context and return a pointer to it.
> + *
> + * @param name
> + *  Name of the thash context
> + * @return
> + *  Pointer to the thash context or NULL if it was not found with
> +rte_errno
> + *  set appropriately. Possible rte_errno values include:
> + *   - ENOENT - required entry not available to return.
> + */
> +__rte_experimental
> +struct rte_thash_ctx *
> +rte_thash_find_existing(const char *name);
> +
> +/**
> + * Free a thash context object
> + *
> + * @param ctx
> + *  thash context
> + * @return
> + *  None
> + */
> +__rte_experimental
> +void
> +rte_thash_free_ctx(struct rte_thash_ctx *ctx);
> +
> +/**
> + * Add a special properties to the toeplitz hash key inside a thash context.
> + * Creates an internal helper struct which has a complimentary table
> + * to calculate toeplitz hash collisions.
> + *
> + * @param ctx
> + *  thash context
> + * @param name
> + *  name of the helper
> + * @param len
[Wang, Yipeng] 
Add requirement here so user know the expectation.
e.g. Len should be no shorter than log(reta_size).

> + *  length in bits of the target subtuple
> + * @param offset
> + *  offset in bits of the subtuple
> + * @return
> + *  0 on success
> + *  negative on error
> + */
[Wang, Yipeng] thread-safety for the APIs?
Better to add thread-safety info in the comments.

> +__rte_experimental
> +int
> +rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name,
> uint32_t len,
> +	uint32_t offset);
> +
> +/**
> + * Find a helper in the context by the given name
> + *
> + * @param ctx
> + *  thash context
> + * @param name
> + *  name of the helper
> + * @return
> + *  Pointer to the thash helper or NULL if it was not found.
> + */
> +__rte_experimental
> +struct rte_thash_subtuple_helper *
> +rte_thash_get_helper(struct rte_thash_ctx *ctx, const char *name);
> +
> +/**
> + * Get a complimentary value for the subtuple to produce a
[Wang, Yipeng] 
Should it be complimentary->complementary?  compliment -> complement?

> + * partial toeplitz hash collision. It muxt be XOR'ed with the
[Wang, Yipeng] typo *must be
> + * subtuple to produce the hash value with the desired hash LSB's
> + *
> + * @param h
> + *  Pointer to the helper struct
> + * @param hash
> + *  toeplitz hash value calculated for the given tuple
> + * @param desired_hash
> + *  desired hash value to find a collision for
> + * @return
> + *  A complimentary value which must be xored with the corresponding
> +subtuple  */ __rte_experimental uint32_t
> +rte_thash_get_compliment(struct rte_thash_subtuple_helper *h,
> +	uint32_t hash, uint32_t desired_hash);
> +
> +/**
> + * Get a pointer to the toeplitz hash contained in the context.
> + * It changes after each addition of a helper. It should be installed
> +to
> + * the NIC.
> + *
> + * @param ctx
> + *  thash context
> + * @return
> + *  A pointer to the toeplitz hash key
> + */
> +__rte_experimental
> +const uint8_t *
> +rte_thash_get_key(struct rte_thash_ctx *ctx);
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/librte_hash/version.map b/lib/librte_hash/version.map index
> c6d7308..93cb230 100644
> --- a/lib/librte_hash/version.map
> +++ b/lib/librte_hash/version.map
> @@ -37,4 +37,11 @@ EXPERIMENTAL {
>  	rte_hash_lookup_with_hash_bulk_data;
>  	rte_hash_max_key_id;
>  	rte_hash_rcu_qsbr_add;
> +	rte_thash_add_helper;
> +	rte_thash_find_existing;
> +	rte_thash_free_ctx;
> +	rte_thash_get_compliment;
> +	rte_thash_get_helper;
> +	rte_thash_get_key;
> +	rte_thash_init_ctx;
>  };
> --
> 2.7.4
  
Vladimir Medvedkin April 11, 2021, 6:52 p.m. UTC | #2
On 10/04/2021 03:05, Wang, Yipeng1 wrote:
>> -----Original Message-----
>> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
>> Sent: Tuesday, April 6, 2021 12:51 PM
>> To: dev@dpdk.org
>> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Chilikin, Andrey
>> <andrey.chilikin@intel.com>; Kinsella, Ray <ray.kinsella@intel.com>; Wang,
>> Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
>> <sameh.gobriel@intel.com>; Richardson, Bruce
>> <bruce.richardson@intel.com>
>> Subject: [PATCH v2 1/3] hash: add predictable RSS API
>>
>> This patch adds predictable RSS API.
>> It is based on the idea of searching partial Toeplitz hash collisions.
>>
>> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
>> ---
>>   lib/librte_hash/meson.build |   3 +-
>>   lib/librte_hash/rte_thash.c |  96 ++++++++++++++++++++++++++++++
>> lib/librte_hash/rte_thash.h | 138
>> ++++++++++++++++++++++++++++++++++++++++++++
>>   lib/librte_hash/version.map |   7 +++
>>   4 files changed, 243 insertions(+), 1 deletion(-)  create mode 100644
>> lib/librte_hash/rte_thash.c
>>

<snip>

>> + * LFSR will ignore if generated m-sequence has more than 2^n -1 bits
>> +*/
> [Wang, Yipeng]
> I haven't fully got the significance/reasons behind the two flags.
> For the comment above, 2^n is the reta_size right?

Here "2^n - 1" is a length of m-sequence - a pseudorandom bit sequence 
which has a number of mathematical properties we need.

> If so, it is better than commenting 2^n.
> 
> For the first flag:
> What would be the issue for overflow? I understand that multiple helpers may overlap
> on the m-sequence, but since they are for different tuples, what would be the issue?
> 

M-sequence has a period and after (2^n - 1) bits it the sequence it 
repeats. Eventually it is written to the rss hash key. In some 
circumstances an attack with spoofed packets can be made to overflow 
particular NIC queue.
So generally this flag should be used for tests, for example to spread 
evenly traffic from the packet generator among the queues.

> For the second flag: is it always good to keep it minimum for each helper?
> 

Not always, without this flag an m-sequence till be generated for all 
variable bits, for example for 16 bits of port. And if we know that all 
values of this 16-bit port are equally probable then the distribution of 
the hash LSB's will be even.

On the other hand, if the user have a number of helpers which shares a 
single m-sequence, then there could be an overflow. And having this flag 
could break a single m-sequence with two independent.

> The goal is to have the best default values for user who do not understand the algorithm details.
> Less flags is usually better.
> 

I think the default value of 0 for flags is the best general use case.

>> +#define RTE_THASH_IGNORE_PERIOD_OVERFLOW0x1
>> +/**
>> + * Generate minimal required bit (equal to ReTa LSB) sequence into
>> + * the hash_key
>> + */
>> +#define RTE_THASH_MINIMAL_SEQ0x2
>> +
>> +/** @internal thash context structure. */ struct rte_thash_ctx;
>> +/** @internal thash helper structure. */ struct
>> +rte_thash_subtuple_helper;
>> +
>> +/**
>> + * Create a new thash context.
>> + *
>> + * @param name
>> + *  context name
>> + * @param key_len
>> + *  length of the toeplitz hash key
>> + * @param reta_sz
>> + *  logarithm of the NIC's Redirection Table (ReTa) size,
>> + *  i.e. number of the LSBs if the hash used to determine
>> + *  the reta entry.
>> + * @param key
> [Wang, Yipeng] Key will be modified by helper anyway. What is the reason of having
> the users to specify the key here?
> 

In some cases user will want to specify particular key. For example, if 
user wants to symmetrically load balance ipv4/tcp and do the NAT inside 
the tunnel without decapsulation, user will submit key with repeated 
2-byte values.

>> + *  pointer to the key used to init an internal key state.
>> + *  Could be NULL, in this case internal key will be inited with random.
>> + * @param flags
>> + *  supported flags are:
>> + *   RTE_THASH_IGNORE_PERIOD_OVERFLOW
>> + *   RTE_THASH_MINIMAL_SEQ
>> + * @return
>> + *  A pointer to the created context on success
>> + *  NULL otherwise
>> + */
>> +__rte_experimental
>> +struct rte_thash_ctx *
>> +rte_thash_init_ctx(const char *name, uint32_t key_len, uint32_t reta_sz,
>> +uint8_t *key, uint32_t flags);
>> +

<snip>

>> +/**
>> + * Add a special properties to the toeplitz hash key inside a thash context.
>> + * Creates an internal helper struct which has a complimentary table
>> + * to calculate toeplitz hash collisions.
>> + *
>> + * @param ctx
>> + *  thash context
>> + * @param name
>> + *  name of the helper
>> + * @param len
> [Wang, Yipeng]
> Add requirement here so user know the expectation.
> e.g. Len should be no shorter than log(reta_size).
> 

Agree, I'll add

>> + *  length in bits of the target subtuple
>> + * @param offset
>> + *  offset in bits of the subtuple
>> + * @return
>> + *  0 on success
>> + *  negative on error
>> + */
> [Wang, Yipeng] thread-safety for the APIs?
> Better to add thread-safety info in the comments.
> 

Agree, I'll add

>> +__rte_experimental
>> +int
>> +rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name,
>> uint32_t len,
>> +uint32_t offset);
>> +
>> +/**
>> + * Find a helper in the context by the given name
>> + *
>> + * @param ctx
>> + *  thash context
>> + * @param name
>> + *  name of the helper
>> + * @return
>> + *  Pointer to the thash helper or NULL if it was not found.
>> + */
>> +__rte_experimental
>> +struct rte_thash_subtuple_helper *
>> +rte_thash_get_helper(struct rte_thash_ctx *ctx, const char *name);
>> +
>> +/**
>> + * Get a complimentary value for the subtuple to produce a
> [Wang, Yipeng]
> Should it be complimentary->complementary?  compliment -> complement?
> 

Agree, will fix

>> + * partial toeplitz hash collision. It muxt be XOR'ed with the
> [Wang, Yipeng] typo *must be

will fix

>> + * subtuple to produce the hash value with the desired hash LSB's
>> + *
>> + * @param h
>> + *  Pointer to the helper struct
>> + * @param hash
>> + *  toeplitz hash value calculated for the given tuple
>> + * @param desired_hash
>> + *  desired hash value to find a collision for
>> + * @return
>> + *  A complimentary value which must be xored with the corresponding
>> +subtuple  */ __rte_experimental uint32_t
>> +rte_thash_get_compliment(struct rte_thash_subtuple_helper *h,
>> +uint32_t hash, uint32_t desired_hash);
>> +
>> +/**
>> + * Get a pointer to the toeplitz hash contained in the context.
>> + * It changes after each addition of a helper. It should be installed
>> +to
>> + * the NIC.
>> + *
>> + * @param ctx
>> + *  thash context
>> + * @return
>> + *  A pointer to the toeplitz hash key
>> + */
>> +__rte_experimental
>> +const uint8_t *
>> +rte_thash_get_key(struct rte_thash_ctx *ctx);
>> +
>>   #ifdef __cplusplus
>>   }
>>   #endif
>> diff --git a/lib/librte_hash/version.map b/lib/librte_hash/version.map index
>> c6d7308..93cb230 100644
>> --- a/lib/librte_hash/version.map
>> +++ b/lib/librte_hash/version.map
>> @@ -37,4 +37,11 @@ EXPERIMENTAL {
>>   rte_hash_lookup_with_hash_bulk_data;
>>   rte_hash_max_key_id;
>>   rte_hash_rcu_qsbr_add;
>> +rte_thash_add_helper;
>> +rte_thash_find_existing;
>> +rte_thash_free_ctx;
>> +rte_thash_get_compliment;
>> +rte_thash_get_helper;
>> +rte_thash_get_key;
>> +rte_thash_init_ctx;
>>   };
>> --
>> 2.7.4
>
  

Patch

diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build
index 242859f..3546014 100644
--- a/lib/librte_hash/meson.build
+++ b/lib/librte_hash/meson.build
@@ -8,6 +8,7 @@  headers = files('rte_fbk_hash.h',
 	'rte_thash.h')
 indirect_headers += files('rte_crc_arm64.h')
 
-sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c')
+sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c', 'rte_thash.c')
+deps += ['net']
 deps += ['ring']
 deps += ['rcu']
diff --git a/lib/librte_hash/rte_thash.c b/lib/librte_hash/rte_thash.c
new file mode 100644
index 0000000..79e8724
--- /dev/null
+++ b/lib/librte_hash/rte_thash.c
@@ -0,0 +1,96 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_thash.h>
+#include <rte_tailq.h>
+#include <rte_random.h>
+#include <rte_memcpy.h>
+#include <rte_errno.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+
+#define THASH_NAME_LEN		64
+
+struct thash_lfsr {
+	uint32_t	ref_cnt;
+	uint32_t	poly;
+	/**< polynomial associated with the lfsr */
+	uint32_t	rev_poly;
+	/**< polynomial to generate the sequence in reverse direction */
+	uint32_t	state;
+	/**< current state of the lfsr */
+	uint32_t	rev_state;
+	/**< current state of the lfsr for reverse direction */
+	uint32_t	deg;	/**< polynomial degree*/
+	uint32_t	bits_cnt;  /**< number of bits generated by lfsr*/
+};
+
+struct rte_thash_subtuple_helper {
+	char	name[THASH_NAME_LEN];	/** < Name of subtuple configuration */
+	LIST_ENTRY(rte_thash_subtuple_helper)	next;
+	struct thash_lfsr	*lfsr;
+	uint32_t	offset;		/** < Offset in bits of the subtuple */
+	uint32_t	len;		/** < Length in bits of the subtuple */
+	uint32_t	lsb_msk;	/** < (1 << reta_sz_log) - 1 */
+	__extension__ uint32_t	compl_table[0] __rte_cache_aligned;
+	/** < Complimentary table */
+};
+
+struct rte_thash_ctx {
+	char		name[THASH_NAME_LEN];
+	LIST_HEAD(, rte_thash_subtuple_helper) head;
+	uint32_t	key_len;	/** < Length of the NIC RSS hash key */
+	uint32_t	reta_sz_log;	/** < size of the RSS ReTa in bits */
+	uint32_t	subtuples_nb;	/** < number of subtuples */
+	uint32_t	flags;
+	uint8_t		hash_key[0];
+};
+
+struct rte_thash_ctx *
+rte_thash_init_ctx(const char *name __rte_unused,
+	uint32_t key_len __rte_unused, uint32_t reta_sz __rte_unused,
+	uint8_t *key __rte_unused, uint32_t flags __rte_unused)
+{
+	return NULL;
+}
+
+struct rte_thash_ctx *
+rte_thash_find_existing(const char *name __rte_unused)
+{
+	return NULL;
+}
+
+void
+rte_thash_free_ctx(struct rte_thash_ctx *ctx __rte_unused)
+{
+}
+
+int
+rte_thash_add_helper(struct rte_thash_ctx *ctx __rte_unused,
+	const char *name __rte_unused, uint32_t len __rte_unused,
+	uint32_t offset __rte_unused)
+{
+	return 0;
+}
+
+struct rte_thash_subtuple_helper *
+rte_thash_get_helper(struct rte_thash_ctx *ctx __rte_unused,
+	const char *name __rte_unused)
+{
+	return NULL;
+}
+
+uint32_t
+rte_thash_get_compliment(struct rte_thash_subtuple_helper *h __rte_unused,
+	uint32_t hash __rte_unused, uint32_t desired_hash __rte_unused)
+{
+	return 0;
+}
+
+const uint8_t *
+rte_thash_get_key(struct rte_thash_ctx *ctx __rte_unused)
+{
+	return NULL;
+}
diff --git a/lib/librte_hash/rte_thash.h b/lib/librte_hash/rte_thash.h
index 061efa2..38a641b 100644
--- a/lib/librte_hash/rte_thash.h
+++ b/lib/librte_hash/rte_thash.h
@@ -1,5 +1,6 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2015-2019 Vladimir Medvedkin <medvedkinv@gmail.com>
+ * Copyright(c) 2021 Intel Corporation
  */
 
 #ifndef _RTE_THASH_H
@@ -222,6 +223,143 @@  rte_softrss_be(uint32_t *input_tuple, uint32_t input_len,
 	return ret;
 }
 
+/**
+ * LFSR will ignore if generated m-sequence has more than 2^n -1 bits
+ */
+#define RTE_THASH_IGNORE_PERIOD_OVERFLOW	0x1
+/**
+ * Generate minimal required bit (equal to ReTa LSB) sequence into
+ * the hash_key
+ */
+#define RTE_THASH_MINIMAL_SEQ			0x2
+
+/** @internal thash context structure. */
+struct rte_thash_ctx;
+/** @internal thash helper structure. */
+struct rte_thash_subtuple_helper;
+
+/**
+ * Create a new thash context.
+ *
+ * @param name
+ *  context name
+ * @param key_len
+ *  length of the toeplitz hash key
+ * @param reta_sz
+ *  logarithm of the NIC's Redirection Table (ReTa) size,
+ *  i.e. number of the LSBs if the hash used to determine
+ *  the reta entry.
+ * @param key
+ *  pointer to the key used to init an internal key state.
+ *  Could be NULL, in this case internal key will be inited with random.
+ * @param flags
+ *  supported flags are:
+ *   RTE_THASH_IGNORE_PERIOD_OVERFLOW
+ *   RTE_THASH_MINIMAL_SEQ
+ * @return
+ *  A pointer to the created context on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_thash_ctx *
+rte_thash_init_ctx(const char *name, uint32_t key_len, uint32_t reta_sz,
+	uint8_t *key, uint32_t flags);
+
+/**
+ * Find an existing thash context and return a pointer to it.
+ *
+ * @param name
+ *  Name of the thash context
+ * @return
+ *  Pointer to the thash context or NULL if it was not found with rte_errno
+ *  set appropriately. Possible rte_errno values include:
+ *   - ENOENT - required entry not available to return.
+ */
+__rte_experimental
+struct rte_thash_ctx *
+rte_thash_find_existing(const char *name);
+
+/**
+ * Free a thash context object
+ *
+ * @param ctx
+ *  thash context
+ * @return
+ *  None
+ */
+__rte_experimental
+void
+rte_thash_free_ctx(struct rte_thash_ctx *ctx);
+
+/**
+ * Add a special properties to the toeplitz hash key inside a thash context.
+ * Creates an internal helper struct which has a complimentary table
+ * to calculate toeplitz hash collisions.
+ *
+ * @param ctx
+ *  thash context
+ * @param name
+ *  name of the helper
+ * @param len
+ *  length in bits of the target subtuple
+ * @param offset
+ *  offset in bits of the subtuple
+ * @return
+ *  0 on success
+ *  negative on error
+ */
+__rte_experimental
+int
+rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name, uint32_t len,
+	uint32_t offset);
+
+/**
+ * Find a helper in the context by the given name
+ *
+ * @param ctx
+ *  thash context
+ * @param name
+ *  name of the helper
+ * @return
+ *  Pointer to the thash helper or NULL if it was not found.
+ */
+__rte_experimental
+struct rte_thash_subtuple_helper *
+rte_thash_get_helper(struct rte_thash_ctx *ctx, const char *name);
+
+/**
+ * Get a complimentary value for the subtuple to produce a
+ * partial toeplitz hash collision. It muxt be XOR'ed with the
+ * subtuple to produce the hash value with the desired hash LSB's
+ *
+ * @param h
+ *  Pointer to the helper struct
+ * @param hash
+ *  toeplitz hash value calculated for the given tuple
+ * @param desired_hash
+ *  desired hash value to find a collision for
+ * @return
+ *  A complimentary value which must be xored with the corresponding subtuple
+ */
+__rte_experimental
+uint32_t
+rte_thash_get_compliment(struct rte_thash_subtuple_helper *h,
+	uint32_t hash, uint32_t desired_hash);
+
+/**
+ * Get a pointer to the toeplitz hash contained in the context.
+ * It changes after each addition of a helper. It should be installed to
+ * the NIC.
+ *
+ * @param ctx
+ *  thash context
+ * @return
+ *  A pointer to the toeplitz hash key
+ */
+__rte_experimental
+const uint8_t *
+rte_thash_get_key(struct rte_thash_ctx *ctx);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_hash/version.map b/lib/librte_hash/version.map
index c6d7308..93cb230 100644
--- a/lib/librte_hash/version.map
+++ b/lib/librte_hash/version.map
@@ -37,4 +37,11 @@  EXPERIMENTAL {
 	rte_hash_lookup_with_hash_bulk_data;
 	rte_hash_max_key_id;
 	rte_hash_rcu_qsbr_add;
+	rte_thash_add_helper;
+	rte_thash_find_existing;
+	rte_thash_free_ctx;
+	rte_thash_get_compliment;
+	rte_thash_get_helper;
+	rte_thash_get_key;
+	rte_thash_init_ctx;
 };