[dpdk-dev] [PATCH v6 1/2] eal/ticketlock: ticket based to improve fairness

Ananyev, Konstantin konstantin.ananyev at intel.com
Fri Mar 15 13:55:59 CET 2019


Hi,

> diff --git a/lib/librte_eal/common/include/generic/rte_ticketlock.h b/lib/librte_eal/common/include/generic/rte_ticketlock.h
> new file mode 100644
> index 0000000..d63aaaa
> --- /dev/null
> +++ b/lib/librte_eal/common/include/generic/rte_ticketlock.h
> @@ -0,0 +1,308 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Arm Limited
> + */
> +
> +#ifndef _RTE_TICKETLOCK_H_
> +#define _RTE_TICKETLOCK_H_
> +
> +/**
> + * @file
> + *
> + * RTE ticket locks
> + *
> + * This file defines an API for ticket locks, which give each waiting
> + * thread a ticket and take the lock one by one, first come, first
> + * serviced.
> + *
> + * All locks must be initialised before use, and only initialised once.
> + *
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_common.h>
> +#include <rte_lcore.h>
> +#include <rte_pause.h>
> +
> +/**
> + * The rte_ticketlock_t type.
> + */
> +typedef struct {
> +	uint16_t current;
> +	uint16_t next;
> +} rte_ticketlock_t;
> +
> +/**
> + * A static ticketlock initializer.
> + */
> +#define RTE_TICKETLOCK_INITIALIZER { 0 }
> +
> +/**
> + * Initialize the ticketlock to an unlocked state.
> + *
> + * @param tl
> + *   A pointer to the ticketlock.
> + */
> +static inline __rte_experimental void
> +rte_ticketlock_init(rte_ticketlock_t *tl)
> +{
> +	__atomic_store_n(&tl->current, 0, __ATOMIC_RELAXED);
> +	__atomic_store_n(&tl->next, 0, __ATOMIC_RELAXED);
> +}
> +
> +/**
> + * Take the ticketlock.
> + *
> + * @param tl
> + *   A pointer to the ticketlock.
> + */
> +static inline __rte_experimental void
> +rte_ticketlock_lock(rte_ticketlock_t *tl)
> +{
> +	uint16_t me = __atomic_fetch_add(&tl->next, 1, __ATOMIC_RELAXED);
> +	while (__atomic_load_n(&tl->current, __ATOMIC_ACQUIRE) != me)
> +		rte_pause();
> +}
> +
> +/**
> + * Release the ticketlock.
> + *
> + * @param tl
> + *   A pointer to the ticketlock.
> + */
> +static inline __rte_experimental void
> +rte_ticketlock_unlock(rte_ticketlock_t *tl)
> +{
> +	uint16_t i = __atomic_load_n(&tl->current, __ATOMIC_RELAXED);
> +	__atomic_store_n(&tl->current, i+1, __ATOMIC_RELEASE);
> +}
> +
> +/**
> + * Try to take the lock.
> + *
> + * @param tl
> + *   A pointer to the ticketlock.
> + * @return
> + *   1 if the lock is successfully taken; 0 otherwise.
> + */
> +static inline __rte_experimental int
> +rte_ticketlock_trylock(rte_ticketlock_t *tl)
> +{
> +	uint16_t next = __atomic_load_n(&tl->next, __ATOMIC_RELAXED);
> +	uint16_t cur = __atomic_load_n(&tl->current, __ATOMIC_RELAXED);
> +	if (next == cur) {

Probably a naïve one:
Suppose next==cur==1 here, then this thread will experience really long context switch,
so next time it continues its execution tl->next value will wrap-up and will be 1 again, and tl->current==0 (lock held).
I suppose this function will set tl->next=2 and will return a success?
Wouldn't be better here and in _is_locked_ to do load/store for next/current values in one go
(using 32bit op)? 
Konstantin

> +		if (__atomic_compare_exchange_n(&tl->next, &next, next+1,
> +		    0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
> +			return 1;
> +	}
> +
> +	return 0;
> +}
> +
> +/**
> + * Test if the lock is taken.
> + *
> + * @param tl
> + *   A pointer to the ticketlock.
> + * @return
> + *   1 if the lock icurrently taken; 0 otherwise.
> + */
> +static inline __rte_experimental int
> +rte_ticketlock_is_locked(rte_ticketlock_t *tl)
> +{
> +	return (__atomic_load_n(&tl->current, __ATOMIC_ACQUIRE) !=
> +		__atomic_load_n(&tl->next, __ATOMIC_ACQUIRE));
> +}
> +


More information about the dev mailing list