[dpdk-dev] [RFC v2] lib: add compressdev API

Trahe, Fiona fiona.trahe at intel.com
Fri Nov 24 17:56:06 CET 2017


compressdev API

Signed-off-by: Trahe, Fiona <fiona.trahe at intel.com>
---
 config/common_base                                 |    7 +
 lib/Makefile                                       |    3 +
 lib/librte_compressdev/Makefile                    |   54 +
 lib/librte_compressdev/rte_comp.h                  |  565 ++++++++++
 lib/librte_compressdev/rte_compressdev.c           | 1181 ++++++++++++++++++++
 lib/librte_compressdev/rte_compressdev.h           |  863 ++++++++++++++
 lib/librte_compressdev/rte_compressdev_pmd.c       |  193 ++++
 lib/librte_compressdev/rte_compressdev_pmd.h       |  535 +++++++++
 lib/librte_compressdev/rte_compressdev_version.map |   58 +
 lib/librte_eal/common/include/rte_log.h            |    1 +
 10 files changed, 3460 insertions(+), 0 deletions(-)
 create mode 100644 lib/librte_compressdev/Makefile
 create mode 100644 lib/librte_compressdev/rte_comp.h
 create mode 100644 lib/librte_compressdev/rte_compressdev.c
 create mode 100644 lib/librte_compressdev/rte_compressdev.h
 create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.c
 create mode 100644 lib/librte_compressdev/rte_compressdev_pmd.h
 create mode 100644 lib/librte_compressdev/rte_compressdev_version.map

diff --git a/config/common_base b/config/common_base
index e74febe..b8d14d3 100644
--- a/config/common_base
+++ b/config/common_base
@@ -563,6 +563,13 @@ CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
 
 #
+# Compile generic compression device library
+#
+CONFIG_RTE_LIBRTE_COMPRESSDEV=y
+CONFIG_RTE_LIBRTE_COMPRESSDEV_DEBUG=n
+CONFIG_RTE_COMPRESS_MAX_DEVS=64
+
+#
 # Compile generic security library
 #
 CONFIG_RTE_LIBRTE_SECURITY=y
diff --git a/lib/Makefile b/lib/Makefile
index dc4e8df..a0308dc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,6 +52,9 @@ DEPDIRS-librte_ether += librte_mbuf
 DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
 DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
 DEPDIRS-librte_cryptodev += librte_kvargs
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += librte_compressdev
+DEPDIRS-librte_compressdev := librte_eal librte_mempool librte_ring librte_mbuf
+DEPDIRS-librte_compressdev += librte_kvargs 
 DIRS-$(CONFIG_RTE_LIBRTE_SECURITY) += librte_security
 DEPDIRS-librte_security := librte_eal librte_mempool librte_ring librte_mbuf
 DEPDIRS-librte_security += librte_ether
diff --git a/lib/librte_compressdev/Makefile b/lib/librte_compressdev/Makefile
new file mode 100644
index 0000000..d11eb27
--- /dev/null
+++ b/lib/librte_compressdev/Makefile
@@ -0,0 +1,54 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_compressdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library source files
+SRCS-y += rte_compressdev.c rte_compressdev_pmd.c
+
+# export include files
+SYMLINK-y-include += rte_comp.h
+SYMLINK-y-include += rte_compressdev.h
+SYMLINK-y-include += rte_compressdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_compressdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_compressdev/rte_comp.h b/lib/librte_compressdev/rte_comp.h
new file mode 100644
index 0000000..ce2a81b
--- /dev/null
+++ b/lib/librte_compressdev/rte_comp.h
@@ -0,0 +1,565 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMP_H_
+#define _RTE_COMP_H_
+
+/**
+ * @file rte_comp.h
+ *
+ * RTE definitions for Data Compression Service
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <rte_mempool.h>
+
+
+/** Status of comp operation */
+enum rte_comp_op_status {
+	RTE_COMP_OP_STATUS_SUCCESS = 0,
+	/**< Operation completed successfully */
+	RTE_COMP_OP_STATUS_NOT_PROCESSED,
+	/**< Operation has not yet been processed by the device */
+	RTE_COMP_OP_STATUS_INVALID_SESSION,
+	/**< Operation failed due to invalid session arguments */
+	RTE_COMP_OP_STATUS_INVALID_ARGS,
+	/**< Operation failed due to invalid arguments in request */
+	RTE_COMP_OP_STATUS_ERROR,
+	/**< Error handling operation */
+	RTE_COMP_OP_STATUS_INVALID_STATE,
+	/**< Operation is invoked in invalid state */
+	RTE_COMP_OP_STATUS_OUT_OF_SPACE,
+	/**< Output buffer ran out of space before operation completed */
+
+	//Note:
+	//QAT API has 19 error types.
+	//ISA-l has 5 inflate and 6 deflate errors.
+	//zlib has 6 errors
+	//Propose only include common subset in status - only values where appl
+	//would have different behaviour.
+	//Add separate error field on op return which a PMD could populate with
+	//PMD-specific debug info
+};
+
+
+/** Compression Algorithms */
+enum rte_comp_algorithm {
+	RTE_COMP_NULL = 0,
+	/**< No compression.
+	 * Pass-through, data is copied unchanged from source buffer to
+	 * destination buffer.
+	 */
+	RTE_COMP_DEFLATE,
+	/**< DEFLATE compression algorithm 
+	 * https://tools.ietf.org/html/rfc1951
+	 */
+	RTE_COMP_LZS,
+	/**< LZS compression algorithm
+	 * https://tools.ietf.org/html/rfc2395
+	 */
+	RTE_COMP_LIST_END
+};
+
+/**< Compression Level. 
+ * The number is interpreted by each PMD differently. However, lower numbers
+ * give fastest compression, at the expense of compression ratio while
+ * higher numbers may give better compression ratios but are likely slower.
+ */
+#define	RTE_COMP_LEVEL_PMD_DEFAULT	(-1)                                                
+/** Use PMD Default */
+#define	RTE_COMP_LEVEL_NONE		(0)    
+/** Output uncompressed blocks if supported by the specified algorithm */
+#define RTE_COMP_LEVEL_MIN		(1)
+/** Use minimum compression level supported by the PMD */
+#define RTE_COMP_LEVEL_MAX		(9)
+/** Use maximum compression level supported by the PMD */
+
+/** Compression checksum types */
+enum rte_comp_checksum_type {
+	RTE_COMP_NONE,
+	/**< No checksum generated */
+	RTE_COMP_CRC32,
+	/**< Generates a CRC32 checksum, as used by gzip */
+	RTE_COMP_ADLER32,
+	/**< Generates an Adler-32 checksum, as used by zlib */
+	RTE_COMP_CRC32_ADLER32,
+	/**< Generates both Adler-32 and CRC32 checksums, concatenated.
+	 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
+	 */
+};
+
+/* 
+enum rte_comp_hash_algo {
+    RTE_COMP_HASH_NONE,
+    RTE_COMP_HASH_SHA1,
+    RTE_COMP_HASH_SHA256,
+};
+Need further input from cavium on this
+xform will need a flag with above enum value 
+op will need to provide a virt/phys ptr to a data buffer of appropriate size.
+And via capability PMD can say whether supported or not.
+*/
+
+/** Compression Huffman Type - used by DEFLATE algorithm */
+enum rte_comp_huffman {
+	RTE_COMP_DEFAULT,
+	/**< PMD may choose which Huffman codes to use */
+	RTE_COMP_FIXED,
+	/**< Use Fixed Huffman codes */
+	RTE_COMP_DYNAMIC,
+	/**< Use Dynamic Huffman codes */
+};
+
+
+enum rte_comp_flush_flag {
+	RTE_COMP_FLUSH_NONE,
+	/**< TODO */
+	RTE_COMP_FLUSH_SYNC,
+	/**< TODO */
+	RTE_COMP_FLUSH_FULL,
+	/**< TODO */
+	RTE_COMP_FLUSH_FINAL
+	/**< TODO */
+};
+
+/** Compression transform types */
+enum rte_comp_xform_type {
+	RTE_COMP_COMPRESS,
+	/**< Compression service - compress */
+	RTE_COMP_DECOMPRESS,
+	/**< Compression service - decompress */
+};
+
+/** Parameters specific to the deflate algorithm */
+struct rte_comp_deflate_params {
+	enum rte_comp_huffman huffman;
+	/**< Compression huffman encoding type */
+};
+
+/**
+ * Session Setup Data common to all compress transforms.
+ * Includes params common to stateless and stateful
+ */
+struct rte_comp_compress_common_params {
+	enum rte_comp_algorithm algo;
+	/**< Algorithm to use for compress operation */
+	union {
+		struct rte_comp_deflate_params deflate;
+		/**< Parameters specific to the deflate algorithm */
+	}; /**< Algorithm specific parameters */
+	uint8_t level;
+	/**< Compression level */
+	enum rte_comp_checksum_type chksum;
+	/**< Type of checksum to generate on the uncompressed data */
+};
+
+/**
+ * Session Setup Data for stateful compress transform.
+ * Extra params for stateful transform
+ */
+struct rte_comp_compress_stateful_params {
+	//TODO : add extra params just needed for stateful, e.g.
+	// history buffer size, window size, state, state buffers, etc...?
+};
+/* Session Setup Data for compress transform. */
+struct rte_comp_compress_xform {
+	struct rte_comp_compress_common_params cmn;
+	struct rte_comp_compress_stateful_params stateful;
+};
+
+/**
+ * Session Setup Data common to all decompress transforms.
+ * Includes params common to stateless and stateful
+ */
+struct rte_comp_decompress_common_params {
+	enum rte_comp_algorithm algo;
+	/**< Algorithm to use for decompression */
+	enum rte_comp_checksum_type chksum;
+	/**< Type of checksum to generate on the decompressed data. */
+};
+/**
+ *  Session Setup Data for decompress transform.
+ * Extra params for stateful transform
+ */
+struct rte_comp_decompress_stateful_params {
+	//TODO : add extra params just needed for stateful, e.g.
+	// history buffer size, window size, state, state buffers, etc...?
+};
+/* Session Setup Data for decompress transform. */
+struct rte_comp_decompress_xform {
+	struct rte_comp_decompress_common_params cmn;
+	struct rte_comp_decompress_stateful_params stateful;
+};
+
+
+/**
+ * Compression transform structure.
+ *
+ * This is used to specify the compression transforms required.
+ * Each transform structure can hold a single transform, the type field is
+ * used to specify which transform is contained within the union.
+ * There are no chain cases currently supported, just single xforms of
+ *  - compress-only
+ *  - decompress-only
+ *
+ */
+struct rte_comp_xform {
+	struct rte_comp_xform *next;
+	/**< next xform in chain */
+	enum rte_comp_xform_type type;
+	/**< xform type */
+	union {
+		struct rte_comp_compress_xform compress;
+		/**< xform for compress operation */
+		struct rte_comp_decompress_xform decompress;
+		/**< decompress xform */
+	};
+};
+
+
+struct rte_comp_session;
+/**
+ * Compression Operation.
+ *
+ * This structure contains data relating to performing a compression
+ * operation on the referenced mbuf data buffers.
+ *
+ * All compression operations are Out-of-place (OOP) operations,
+ * as the size of the output data is different to the size of the input data.
+ *
+ * Comp operations are enqueued and dequeued in comp PMDs using the
+ * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
+ */
+struct rte_comp_op {
+	
+	struct rte_comp_session *session;
+	/**< Handle for the initialised session context */
+	struct rte_mempool *mempool;
+	/**< mempool from which operation is allocated */
+	phys_addr_t phys_addr;
+	/**< physical address of this operation */
+	struct rte_mbuf *m_src;
+	/**< source mbuf
+	 * The total size of the input buffer(s) can be retrieved using
+	 * rte_pktmbuf_data_len(m_src)
+	 */
+	struct rte_mbuf *m_dst;
+	/**< destination mbuf 
+	 * The total size of the output buffer(s) can be retrieved using
+	 * rte_pktmbuf_data_len(m_dst)
+	 */
+
+	struct {
+		uint32_t offset;
+		/**< Starting point for compression or decompression,
+		 * specified as number of bytes from start of packet in
+		 * source buffer.
+		 * Starting point for checksum generation in compress direction.
+		 */
+		uint32_t length;
+		/**< The length, in bytes, of the data in source buffer
+		 * to be compressed or decompressed.
+		 * Also the length of the data over which the checksum
+		 * should be generated in compress direction
+		 */
+	} src;
+	struct {
+		uint32_t offset;
+		/**< Starting point for writing output data, specified as
+		 * number of bytes from start of packet in dest
+		 * buffer. Starting point for checksum generation in
+		 * decompress direction.
+		 */
+	} dst;
+	enum rte_comp_flush_flag flush_flag;
+	/**< defines flush characteristics for the output data.
+	 * Only applicable in compress direction
+	 */
+	uint64_t input_chksum;
+	/**< An input checksum can be provided to generate a
+	 * cumulative checksum across sequential blocks.
+	 * Checksum type is as specified in xform chksum_type
+	 */
+	uint64_t output_chksum;
+	/**< If a checksum is generated it will be written in here.
+	 * Checksum type is as specified in xform chksum_type.
+	 */
+	uint32_t consumed;
+	/**< The number of bytes from the source buffer
+	 * which were compressed/decompressed.
+	 */
+	uint32_t produced;
+	/**< The number of bytes written to the destination buffer
+	 * which were compressed/decompressed.
+	 */
+	uint64_t debug_status;
+	/**<
+	 * Status of the operation is returned in the status param.
+	 * This field allows the PMD to pass back extra
+	 * pmd-specific debug information. Value is not defined on the API.
+	 */
+	uint8_t status;
+	/**<
+	 * operation status - use values from enum rte_comp_status.
+	 * This is reset to
+	 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+	 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
+	 * is successfully processed by a PMD
+	 */
+	 
+	/*
+	TODO - Are any extra params needed on stateful op or are all in xform?
+	rte_comp_op_common_params/_stateful_params?
+	 */
+};
+
+
+/**
+ * Reset the fields of an operation to their default values.
+ *
+ * @param	op	The operation to be reset.
+ */
+static inline void
+__rte_comp_op_reset(struct rte_comp_op *op)
+{
+	op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+	memset(op, 0, sizeof(*op));
+}
+
+
+/**
+ * Attach a session to a compression operation
+ *
+ * @param	op	operation
+ * @param	sess	session
+ */
+static inline int
+__rte_comp_op_attach_comp_session(struct rte_comp_op *op,
+		struct rte_comp_session *sess)
+{
+	op->session = sess;
+
+	return 0;
+}
+
+
+/**
+ * Private data structure belonging to an operation pool.
+ */
+struct rte_comp_op_pool_private {
+	uint16_t priv_size;
+	/**< Size of private area in each operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each object by
+ * the mempool
+ *
+ * @param	mempool	mempool for operations
+ *
+ * @return	private data size
+ */
+static inline uint16_t
+__rte_comp_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+	struct rte_comp_op_pool_private *priv =
+	    (struct rte_comp_op_pool_private *)rte_mempool_get_priv(mempool);
+
+	return priv->priv_size;
+}
+
+
+/**
+ * Creates an operation pool
+ *
+ * @param	name		pool name
+ * @param	nb_elts		number of elements in pool
+ * @param	cache_size	Number of elements to cache on lcore, see
+ *				*rte_mempool_create* for further details about
+ *				cache size
+ * @param	priv_size	Size of private data to allocate with each
+ *				operation
+ * @param	socket_id	Socket to allocate memory on
+ *
+ * @return
+ *  - On success pointer to mempool
+ *  - On failure NULL
+ */
+extern struct rte_mempool *
+rte_comp_op_pool_create(const char *name,
+		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+		int socket_id);
+
+/**
+ * Bulk allocate raw element from mempool and return as comp operations
+ *
+ * @param	mempool		operation mempool.
+ * @param	ops		Array to place allocated operations
+ * @param	nb_ops		Number of operations to allocate
+ *
+ * @returns
+ * - On success returns  number of ops allocated
+ */
+static inline int
+__rte_comp_op_raw_bulk_alloc(struct rte_mempool *mempool,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+
+	if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+		return nb_ops;
+
+	return 0;
+}
+
+/**
+ * Allocate an operation from a mempool with default parameters set
+ *
+ * @param	mempool	operation mempool
+ *
+ * @returns
+ * - On success returns a valid rte_comp_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_comp_op *
+rte_comp_op_alloc(struct rte_mempool *mempool)
+{
+	struct rte_comp_op *op = NULL;
+	int retval;
+
+	retval = __rte_comp_op_raw_bulk_alloc(mempool, &op, 1);
+	if (unlikely(retval != 1))
+		return NULL;
+
+	__rte_comp_op_reset(op);
+
+	return op;
+}
+
+
+/**
+ * Bulk allocate operations from a mempool with default parameters set
+ *
+ * @param	mempool	comp operation mempool
+ * @param	ops	Array to place allocated operations
+ * @param	nb_ops	Number of operations to allocate
+ *
+ * @returns
+ * - nb_ops if the number of operations requested were allocated.
+ * - 0 if the requested number of ops are not available.
+ *   None are allocated in this case.
+ */
+
+static inline unsigned
+rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+	int i;
+
+	if (unlikely(__rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops)
+			!= nb_ops))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++)
+		__rte_comp_op_reset(ops[i]);
+
+	return nb_ops;
+}
+
+
+
+/**
+ * Returns a pointer to the private data of an operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param	op	 operation.
+ * @param	size	size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_comp_op_get_priv_data(struct rte_comp_op *op, uint32_t size)
+{
+	uint32_t priv_size;
+
+	if (likely(op->mempool != NULL)) {
+		priv_size = __rte_comp_op_get_priv_data_size(op->mempool);
+
+		if (likely(priv_size >= size)) {
+			return (void *)(op + 1);
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * free operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param	op operation
+ */
+static inline void
+rte_comp_op_free(struct rte_comp_op *op)
+{
+	if (op != NULL && op->mempool != NULL)
+		rte_mempool_put(op->mempool, op);
+}
+
+/**
+ * Attach a session to an operation
+ *
+ * @param	op	operation
+ * @param	sess	session
+ */
+static inline int
+rte_comp_op_attach_session(struct rte_comp_op *op,
+		struct rte_comp_session *sess)
+{
+	op->session = sess;
+	return 0;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMP_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev.c b/lib/librte_compressdev/rte_compressdev.c
new file mode 100644
index 0000000..a29b37b
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev.c
@@ -0,0 +1,1181 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_interrupts.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "rte_comp.h"
+#include "rte_compressdev.h"
+#include "rte_compressdev_pmd.h"
+
+static uint8_t nb_drivers;
+
+struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
+
+struct rte_compressdev *rte_compressdevs = &rte_comp_devices[0];
+
+static struct rte_compressdev_global compressdev_globals = {
+		.devs			= &rte_comp_devices[0],
+		.data			= { NULL },
+		.nb_devs		= 0,
+		.max_devs		= RTE_COMPRESS_MAX_DEVS
+};
+
+struct rte_compressdev_global *rte_compressdev_globals = &compressdev_globals;
+
+/* spinlock for comp device callbacks */
+static rte_spinlock_t rte_compressdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_compressdev_callback {
+	TAILQ_ENTRY(rte_compressdev_callback) next; /**< Callbacks list */
+	rte_compressdev_cb_fn cb_fn;		/**< Callback address */
+	void *cb_arg;				/**< Parameter for callback */
+	enum rte_compressdev_event_type event;	/**< Interrupt event type */
+	uint32_t active;			/**< Callback is executing */
+};
+
+/**
+ * The compression algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_comp_algorithm_strings[] = {
+	[RTE_COMP_DEFLATE]		= "deflate",
+	[RTE_COMP_LZS]			= "lzs",
+
+};
+
+
+#define param_range_check(x, y) \
+	(((x < y.min) || (x > y.max)) || \
+	(y.increment != 0 && (x % y.increment) != 0))
+
+
+const char *
+rte_compressdev_get_feature_name(uint64_t flag)
+{
+	switch (flag) {
+	case RTE_COMP_FF_HW_ACCELERATED:
+		return "HW_ACCELERATED";
+	case RTE_COMP_FF_CPU_SSE:
+		return "CPU_SSE";
+	case RTE_COMP_FF_CPU_AVX:
+		return "CPU_AVX";
+	case RTE_COMP_FF_CPU_AVX2:
+		return "CPU_AVX2";
+	case RTE_COMP_FF_CPU_AVX512:
+		return "CPU_AVX512";
+	case RTE_COMP_FF_CPU_NEON:
+		return "CPU_NEON";
+	case RTE_COMP_FF_MBUF_SCATTER_GATHER:
+		return "MBUF_SCATTER_GATHER";
+	case RTE_COMP_FF_MULTI_PKT_CHECKSUM:
+		return "MULTI_PKT_CHKSUM";
+	case RTE_COMP_FF_STATEFUL:
+		return "STATEFUL";
+	default:
+		return NULL;
+	}
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_get_dev(uint8_t dev_id)
+{
+	return &rte_compressdev_globals->devs[dev_id];
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_get_named_dev(const char *name)
+{
+	struct rte_compressdev *dev;
+	unsigned int i;
+
+	if (name == NULL)
+		return NULL;
+
+	for (i = 0; i < rte_compressdev_globals->max_devs; i++) {
+		dev = &rte_compressdev_globals->devs[i];
+
+		if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
+				(strcmp(dev->data->name, name) == 0))
+			return dev;
+	}
+
+	return NULL;
+}
+
+unsigned int
+rte_compressdev_pmd_is_valid_dev(uint8_t dev_id)
+{
+	struct rte_compressdev *dev = NULL;
+
+	if (dev_id >= rte_compressdev_globals->nb_devs)
+		return 0;
+
+	dev = rte_compressdev_pmd_get_dev(dev_id);
+	if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
+		return 0;
+	else
+		return 1;
+}
+
+
+int
+rte_compressdev_get_dev_id(const char *name)
+{
+	unsigned i;
+
+	if (name == NULL)
+		return -1;
+
+	for (i = 0; i < rte_compressdev_globals->nb_devs; i++)
+		if ((strcmp(rte_compressdev_globals->devs[i].data->name, name)
+				== 0) &&
+				(rte_compressdev_globals->devs[i].attached ==
+						RTE_COMPRESSDEV_ATTACHED))
+			return i;
+
+	return -1;
+}
+
+uint8_t
+rte_compressdev_count(void)
+{
+	return rte_compressdev_globals->nb_devs;
+}
+
+uint8_t
+rte_compressdev_device_count_by_driver(uint8_t driver_id)
+{
+	uint8_t i, dev_count = 0;
+
+	for (i = 0; i < rte_compressdev_globals->max_devs; i++)
+		if (rte_compressdev_globals->devs[i].driver_id == driver_id &&
+			rte_compressdev_globals->devs[i].attached ==
+					RTE_COMPRESSDEV_ATTACHED)
+			dev_count++;
+
+	return dev_count;
+}
+
+uint8_t
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+	uint8_t nb_devices)
+{
+	uint8_t i, count = 0;
+	struct rte_compressdev *devs = rte_compressdev_globals->devs;
+	uint8_t max_devs = rte_compressdev_globals->max_devs;
+
+	for (i = 0; i < max_devs && count < nb_devices;	i++) {
+
+		if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
+			int cmp;
+
+			cmp = strncmp(devs[i].device->driver->name,
+					driver_name,
+					strlen(driver_name));
+
+			if (cmp == 0)
+				devices[count++] = devs[i].data->dev_id;
+		}
+	}
+
+	return count;
+}
+
+
+int
+rte_compressdev_socket_id(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id))
+		return -1;
+
+	dev = rte_compressdev_pmd_get_dev(dev_id);
+
+	return dev->data->socket_id;
+}
+
+static inline int
+rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
+		int socket_id)
+{
+	char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	const struct rte_memzone *mz;
+	int n;
+
+	/* generate memzone name */
+	n = snprintf(mz_name, sizeof(mz_name),
+			"rte_compressdev_data_%u", dev_id);
+	if (n >= (int)sizeof(mz_name))
+		return -EINVAL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		mz = rte_memzone_reserve(mz_name,
+				sizeof(struct rte_compressdev_data),
+				socket_id, 0);
+	} else
+		mz = rte_memzone_lookup(mz_name);
+
+	if (mz == NULL)
+		return -ENOMEM;
+
+	*data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(*data, 0, sizeof(struct rte_compressdev_data));
+
+	return 0;
+}
+
+static uint8_t
+rte_compressdev_find_free_device_index(void)
+{
+	uint8_t dev_id;
+
+	for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
+		if (rte_comp_devices[dev_id].attached ==
+				RTE_COMPRESSDEV_DETACHED)
+			return dev_id;
+	}
+	return RTE_COMPRESS_MAX_DEVS;
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_allocate(const char *name, int socket_id)
+{
+	struct rte_compressdev *compressdev;
+	uint8_t dev_id;
+
+	if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
+		CDEV_LOG_ERR("comp device with name %s already "
+				"allocated!", name);
+		return NULL;
+	}
+
+	dev_id = rte_compressdev_find_free_device_index();
+	if (dev_id == RTE_COMPRESS_MAX_DEVS) {
+		CDEV_LOG_ERR("Reached maximum number of comp devices");
+		return NULL;
+	}
+
+	compressdev = rte_compressdev_pmd_get_dev(dev_id);
+
+	if (compressdev->data == NULL) {
+		struct rte_compressdev_data *compressdev_data =
+				compressdev_globals.data[dev_id];
+
+		int retval = rte_compressdev_data_alloc(dev_id,
+				&compressdev_data, socket_id);
+
+		if (retval < 0 || compressdev_data == NULL)
+			return NULL;
+
+		compressdev->data = compressdev_data;
+
+		snprintf(compressdev->data->name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+				"%s", name);
+
+		compressdev->data->dev_id = dev_id;
+		compressdev->data->socket_id = socket_id;
+		compressdev->data->dev_started = 0;
+
+		/* init user callbacks */
+		TAILQ_INIT(&(compressdev->link_intr_cbs));
+
+		compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
+
+		compressdev_globals.nb_devs++;
+	}
+
+	return compressdev;
+}
+
+int
+rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
+{
+	int ret;
+
+	if (compressdev == NULL)
+		return -EINVAL;
+
+	/* Close device only if device operations have been set */
+	if (compressdev->dev_ops) {
+		ret = rte_compressdev_close(compressdev->data->dev_id);
+		if (ret < 0)
+			return ret;
+	}
+
+	compressdev->attached = RTE_COMPRESSDEV_DETACHED;
+	compressdev_globals.nb_devs--;
+	return 0;
+}
+
+uint16_t
+rte_compressdev_queue_pair_count(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+
+	dev = &rte_comp_devices[dev_id];
+	return dev->data->nb_queue_pairs;
+}
+
+static int
+rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
+		uint16_t nb_qpairs, int socket_id)
+{
+	struct rte_compressdev_info dev_info;
+	void **qp;
+	unsigned i;
+
+	if ((dev == NULL) || (nb_qpairs < 1)) {
+		CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
+							dev, nb_qpairs);
+		return -EINVAL;
+	}
+
+	CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
+			nb_qpairs, dev->data->dev_id);
+
+	memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+
+	if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
+		CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
+				nb_qpairs, dev->data->dev_id);
+	    return -EINVAL;
+	}
+
+	if (dev->data->queue_pairs == NULL) { /* first time configuration */
+		dev->data->queue_pairs = rte_zmalloc_socket(
+				"compressdev->queue_pairs",
+				sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
+				RTE_CACHE_LINE_SIZE, socket_id);
+
+		if (dev->data->queue_pairs == NULL) {
+			dev->data->nb_queue_pairs = 0;
+			CDEV_LOG_ERR("failed to get memory for qp meta data, "
+							"nb_queues %u",
+							nb_qpairs);
+			return -(ENOMEM);
+		}
+	} else { /* re-configure */
+		int ret;
+		uint16_t old_nb_queues = dev->data->nb_queue_pairs;
+
+		qp = dev->data->queue_pairs;
+
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
+				-ENOTSUP);
+
+		for (i = nb_qpairs; i < old_nb_queues; i++) {
+			ret = (*dev->dev_ops->queue_pair_release)(dev, i);
+			if (ret < 0)
+				return ret;
+		}
+
+		qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
+				RTE_CACHE_LINE_SIZE);
+		if (qp == NULL) {
+			CDEV_LOG_ERR("failed to realloc qp meta data,"
+						" nb_queues %u", nb_qpairs);
+			return -(ENOMEM);
+		}
+
+		if (nb_qpairs > old_nb_queues) {
+			uint16_t new_qs = nb_qpairs - old_nb_queues;
+
+			memset(qp + old_nb_queues, 0,
+				sizeof(qp[0]) * new_qs);
+		}
+
+		dev->data->queue_pairs = qp;
+
+	}
+	dev->data->nb_queue_pairs = nb_qpairs;
+	return 0;
+}
+
+int
+rte_compressdev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	if (queue_pair_id >= dev->data->nb_queue_pairs) {
+		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
+
+	return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
+
+}
+
+int
+rte_compressdev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	if (queue_pair_id >= dev->data->nb_queue_pairs) {
+		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
+
+	return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
+
+}
+
+int
+rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
+{
+	struct rte_compressdev *dev;
+	int diag;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	if (dev->data->dev_started) {
+		CDEV_LOG_ERR(
+		    "device %d must be stopped to allow configuration", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+	/* Setup new number of queue pairs and reconfigure device. */
+	diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
+			config->socket_id);
+	if (diag != 0) {
+		CDEV_LOG_ERR("dev%d rte_comp_dev_queue_pairs_config = %d",
+				dev_id, diag);
+		return diag;
+	}
+
+	return (*dev->dev_ops->dev_configure)(dev, config);
+}
+
+
+int
+rte_compressdev_start(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+	int diag;
+
+	CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+	if (dev->data->dev_started != 0) {
+		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
+			dev_id);
+		return 0;
+	}
+
+	diag = (*dev->dev_ops->dev_start)(dev);
+	if (diag == 0)
+		dev->data->dev_started = 1;
+	else
+		return diag;
+
+	return 0;
+}
+
+void
+rte_compressdev_stop(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+	if (dev->data->dev_started == 0) {
+		CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
+			dev_id);
+		return;
+	}
+
+	(*dev->dev_ops->dev_stop)(dev);
+	dev->data->dev_started = 0;
+}
+
+int
+rte_compressdev_close(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+	int retval;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -1;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		CDEV_LOG_ERR("Device %u must be stopped before closing",
+				dev_id);
+		return -EBUSY;
+	}
+
+	/* We can't close the device if there are outstanding sessions in use */
+	if (dev->data->session_pool != NULL) {
+		if (!rte_mempool_full(dev->data->session_pool)) {
+			CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
+					"has sessions still in use, free "
+					"all sessions before calling close",
+					(unsigned)dev_id);
+			return -EBUSY;
+		}
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	retval = (*dev->dev_ops->dev_close)(dev);
+
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+int
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+		const struct rte_compressdev_qp_conf *qp_conf, int socket_id,
+		struct rte_mempool *session_pool)
+
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	if (queue_pair_id >= dev->data->nb_queue_pairs) {
+		CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started) {
+		CDEV_LOG_ERR(
+		    "device %d must be stopped to allow configuration", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
+
+	return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
+			socket_id, session_pool);
+}
+
+
+int
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return -ENODEV;
+	}
+
+	if (stats == NULL) {
+		CDEV_LOG_ERR("Invalid stats ptr");
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	memset(stats, 0, sizeof(*stats));
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	(*dev->dev_ops->stats_get)(dev, stats);
+	return 0;
+}
+
+void
+rte_compressdev_stats_reset(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+	(*dev->dev_ops->stats_reset)(dev);
+}
+
+
+void
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
+{
+	struct rte_compressdev *dev;
+
+	if (dev_id >= compressdev_globals.nb_devs) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	memset(dev_info, 0, sizeof(struct rte_compressdev_info));
+
+	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+	dev_info->driver_name = dev->device->driver->name;
+}
+
+
+int
+rte_compressdev_callback_register(uint8_t dev_id,
+			enum rte_compressdev_event_type event,
+			rte_compressdev_cb_fn cb_fn, void *cb_arg)
+{
+	struct rte_compressdev *dev;
+	struct rte_compressdev_callback *user_cb;
+
+	if (!cb_fn)
+		return -EINVAL;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	rte_spinlock_lock(&rte_compressdev_cb_lock);
+
+	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
+		if (user_cb->cb_fn == cb_fn &&
+			user_cb->cb_arg == cb_arg &&
+			user_cb->event == event) {
+			break;
+		}
+	}
+
+	/* create a new callback. */
+	if (user_cb == NULL) {
+		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+				sizeof(struct rte_compressdev_callback), 0);
+		if (user_cb != NULL) {
+			user_cb->cb_fn = cb_fn;
+			user_cb->cb_arg = cb_arg;
+			user_cb->event = event;
+			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
+		}
+	}
+
+	rte_spinlock_unlock(&rte_compressdev_cb_lock);
+	return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int
+rte_compressdev_callback_unregister(uint8_t dev_id,
+			enum rte_compressdev_event_type event,
+			rte_compressdev_cb_fn cb_fn, void *cb_arg)
+{
+	int ret;
+	struct rte_compressdev *dev;
+	struct rte_compressdev_callback *cb, *next;
+
+	if (!cb_fn)
+		return -EINVAL;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+	rte_spinlock_lock(&rte_compressdev_cb_lock);
+
+	ret = 0;
+	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
+
+		next = TAILQ_NEXT(cb, next);
+
+		if (cb->cb_fn != cb_fn || cb->event != event ||
+				(cb->cb_arg != (void *)-1 &&
+				cb->cb_arg != cb_arg))
+			continue;
+
+		/*
+		 * if this callback is not executing right now,
+		 * then remove it.
+		 */
+		if (cb->active == 0) {
+			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
+			rte_free(cb);
+		} else {
+			ret = -EAGAIN;
+		}
+	}
+
+	rte_spinlock_unlock(&rte_compressdev_cb_lock);
+	return ret;
+}
+
+void
+rte_compressdev_pmd_callback_process(struct rte_compressdev *dev,
+	enum rte_compressdev_event_type event)
+{
+	struct rte_compressdev_callback *cb_lst;
+	struct rte_compressdev_callback dev_cb;
+
+	rte_spinlock_lock(&rte_compressdev_cb_lock);
+	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
+		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+			continue;
+		dev_cb = *cb_lst;
+		cb_lst->active = 1;
+		rte_spinlock_unlock(&rte_compressdev_cb_lock);
+		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
+						dev_cb.cb_arg);
+		rte_spinlock_lock(&rte_compressdev_cb_lock);
+		cb_lst->active = 0;
+	}
+	rte_spinlock_unlock(&rte_compressdev_cb_lock);
+}
+
+
+int
+rte_compressdev_session_init(uint8_t dev_id,
+		struct rte_comp_session *sess,
+		struct rte_comp_xform *xforms,
+		struct rte_mempool *mp)
+{
+	struct rte_compressdev *dev;
+	uint8_t index;
+	int ret;
+
+	dev = rte_compressdev_pmd_get_dev(dev_id);
+
+	if (sess == NULL || xforms == NULL || dev == NULL)
+		return -EINVAL;
+
+	index = dev->driver_id;
+
+	if (sess->sess_private_data[index] == NULL) {
+		ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
+		if (ret < 0) {
+			CDEV_LOG_ERR(
+				"dev_id %d failed to configure session details",
+				dev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+struct rte_comp_session *
+rte_compressdev_session_create(struct rte_mempool *mp)
+{
+	struct rte_comp_session *sess;
+
+	/* Allocate a session structure from the session pool */
+	if (rte_mempool_get(mp, (void *)&sess)) {
+		CDEV_LOG_ERR("couldn't get object from session mempool");
+		return NULL;
+	}
+
+	/* Clear device session pointer */
+	memset(sess, 0, (sizeof(void *) * nb_drivers));
+
+	return sess;
+}
+
+int
+rte_compressdev_queue_pair_attach_session(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_session *sess)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	/* The API is optional, not returning error if driver do not suuport */
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
+
+	void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+	if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
+		CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
+				dev_id, qp_id);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+int
+rte_compressdev_queue_pair_detach_session(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_session *sess)
+{
+	struct rte_compressdev *dev;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_comp_devices[dev_id];
+
+	/* The API is optional, not returning error if driver do not suuport */
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
+
+	void *sess_priv = get_session_private_data(sess, dev->driver_id);
+
+	if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
+		CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
+				dev_id, qp_id);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+int
+rte_compressdev_session_clear(uint8_t dev_id,
+		struct rte_comp_session *sess)
+{
+	struct rte_compressdev *dev;
+
+	dev = rte_compressdev_pmd_get_dev(dev_id);
+
+	if (dev == NULL || sess == NULL)
+		return -EINVAL;
+
+	dev->dev_ops->session_clear(dev, sess);
+
+	return 0;
+}
+
+int
+rte_compressdev_session_free(struct rte_comp_session *sess)
+{
+	uint8_t i;
+	void *sess_priv;
+	struct rte_mempool *sess_mp;
+
+	if (sess == NULL)
+		return -EINVAL;
+
+	/* Check that all device private data has been freed */
+	for (i = 0; i < nb_drivers; i++) {
+		sess_priv = get_session_private_data(sess, i);
+		if (sess_priv != NULL)
+			return -EBUSY;
+	}
+
+	/* Return session to mempool */
+	sess_mp = rte_mempool_from_obj(sess);
+	rte_mempool_put(sess_mp, sess);
+
+	return 0;
+}
+
+unsigned int
+rte_compressdev_get_header_session_size(void)
+{
+	/*
+	 * Header contains pointers to the private data
+	 * of all registered drivers
+	 */
+	return (sizeof(void *) * nb_drivers);
+}
+
+unsigned int
+rte_compressdev_get_private_session_size(uint8_t dev_id)
+{
+	struct rte_compressdev *dev;
+	unsigned int header_size = sizeof(void *) * nb_drivers;
+	unsigned int priv_sess_size;
+
+	if (!rte_compressdev_pmd_is_valid_dev(dev_id))
+		return 0;
+
+	dev = rte_compressdev_pmd_get_dev(dev_id);
+
+	if (*dev->dev_ops->session_get_size == NULL)
+		return 0;
+
+	priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+
+	/*
+	 * If size is less than session header size,
+	 * return the latter, as this guarantees that
+	 * sessionless operations will work
+	 */
+	if (priv_sess_size < header_size)
+		return header_size;
+
+	return priv_sess_size;
+
+}
+
+/** Initialise rte_comp_op mempool element */
+static void
+rte_comp_op_init(struct rte_mempool *mempool,
+		__rte_unused void *opaque_arg,
+		void *_op_data,
+		__rte_unused unsigned i)
+{
+	struct rte_comp_op *op = _op_data;
+
+	memset(_op_data, 0, mempool->elt_size);
+
+	op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+	op->phys_addr = rte_mem_virt2iova(_op_data);
+	op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_comp_op_pool_create(const char *name,
+		unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+		int socket_id)
+{
+	struct rte_comp_op_pool_private *priv;
+
+	unsigned elt_size = sizeof(struct rte_comp_op) +
+			sizeof(struct rte_comp_op) +
+			priv_size;
+
+	/* lookup mempool in case already allocated */
+	struct rte_mempool *mp = rte_mempool_lookup(name);
+
+	if (mp != NULL) {
+		priv = (struct rte_comp_op_pool_private *)
+				rte_mempool_get_priv(mp);
+
+		if (mp->elt_size != elt_size ||
+				mp->cache_size < cache_size ||
+				mp->size < nb_elts ||
+				priv->priv_size <  priv_size) {
+			mp = NULL;
+			CDEV_LOG_ERR("Mempool %s already exists but with "
+					"incompatible parameters", name);
+			return NULL;
+		}
+		return mp;
+	}
+
+	mp = rte_mempool_create(
+			name,
+			nb_elts,
+			elt_size,
+			cache_size,
+			sizeof(struct rte_comp_op_pool_private),
+			NULL,
+			NULL,
+			rte_comp_op_init,
+			NULL,
+			socket_id,
+			0);
+
+	if (mp == NULL) {
+		CDEV_LOG_ERR("Failed to create mempool %s", name);
+		return NULL;
+	}
+
+	priv = (struct rte_comp_op_pool_private *)
+			rte_mempool_get_priv(mp);
+
+	priv->priv_size = priv_size;
+
+	return mp;
+}
+
+int
+rte_compressdev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
+{
+	struct rte_compressdev *dev = NULL;
+	uint32_t i = 0;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < RTE_COMPRESS_MAX_DEVS; i++) {
+		int ret = snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+				"%s_%u", dev_name_prefix, i);
+
+		if (ret < 0)
+			return ret;
+
+		dev = rte_compressdev_pmd_get_named_dev(name);
+		if (!dev)
+			return 0;
+	}
+
+	return -1;
+}
+
+TAILQ_HEAD(compressdev_driver_list, compressdev_driver);
+
+static struct compressdev_driver_list compressdev_driver_list =
+	TAILQ_HEAD_INITIALIZER(compressdev_driver_list);
+
+int
+rte_compressdev_driver_id_get(const char *name)
+{
+	struct compressdev_driver *driver;
+	const char *driver_name;
+
+	if (name == NULL) {
+		RTE_LOG(DEBUG, COMPRESSDEV, "name pointer NULL");
+		return -1;
+	}
+
+	TAILQ_FOREACH(driver, &compressdev_driver_list, next) {
+		driver_name = driver->driver->name;
+		if (strncmp(driver_name, name, strlen(driver_name)) == 0)
+			return driver->id;
+	}
+	return -1;
+}
+
+const char *
+rte_compressdev_name_get(uint8_t dev_id)
+{
+	struct rte_compressdev *dev = rte_compressdev_pmd_get_dev(dev_id);
+
+	if (dev == NULL)
+		return NULL;
+
+	return dev->data->name;
+}
+
+const char *
+rte_compressdev_driver_name_get(uint8_t driver_id)
+{
+	struct compressdev_driver *driver;
+
+	TAILQ_FOREACH(driver, &compressdev_driver_list, next)
+		if (driver->id == driver_id)
+			return driver->driver->name;
+	return NULL;
+}
+
+uint8_t
+rte_compressdev_allocate_driver(struct compressdev_driver *comp_drv,
+		const struct rte_driver *drv)
+{
+	comp_drv->driver = drv;
+	comp_drv->id = nb_drivers;
+
+	TAILQ_INSERT_TAIL(&compressdev_driver_list, comp_drv, next);
+
+	return nb_drivers++;
+}
diff --git a/lib/librte_compressdev/rte_compressdev.h b/lib/librte_compressdev/rte_compressdev.h
new file mode 100644
index 0000000..674321e
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev.h
@@ -0,0 +1,863 @@
+/*-
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMPRESSDEV_H_
+#define _RTE_COMPRESSDEV_H_
+
+/**
+ * @file rte_compressdev.h
+ *
+ * RTE Compression Device APIs
+ *
+ * Defines RTE comp Device APIs for the provisioning of compression operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_kvargs.h"
+#include "rte_comp.h"
+#include "rte_dev.h"
+#include <rte_common.h>
+
+extern const char **rte_cyptodev_names;
+
+/* Logging Macros */
+
+#define CDEV_LOG_ERR(...) \
+	RTE_LOG(ERR, COMPRESSDEV, \
+		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define CDEV_LOG_INFO(...) \
+	RTE_LOG(INFO, COMPRESSDEV, \
+		RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+			RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifdef RTE_LIBRTE_COMPRESSDEV_DEBUG
+#define CDEV_LOG_DEBUG(...) \
+	RTE_LOG(DEBUG, COMPRESSDEV, \
+		RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+			__func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define CDEV_PMD_TRACE(...) \
+	RTE_LOG(DEBUG, COMPRESSDEV, \
+		RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+			dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#else
+#define CDEV_LOG_DEBUG(...) (void)0
+#define CDEV_PMD_TRACE(...) (void)0
+#endif
+
+
+
+/**
+ * A macro that points to an offset from the start
+ * of the comp operation structure (rte_comp_op)
+ *
+ * The returned pointer is cast to type t.
+ *
+ * @param c
+ *   The comp operation.
+ * @param o
+ *   The offset from the start of the comp operation.
+ * @param t
+ *   The type to cast the result into.
+ */
+#define rte_comp_op_ctod_offset(c, t, o)	\
+	((t)((char *)(c) + (o)))
+
+/**
+ * A macro that returns the physical address that points
+ * to an offset from the start of the comp operation
+ * (rte_comp_op)
+ *
+ * @param c
+ *   The comp operation.
+ * @param o
+ *   The offset from the start of the comp operation
+ *   to calculate address from.
+ */
+#define rte_comp_op_ctophys_offset(c, o)	\
+	(rte_iova_t)((c)->phys_addr + (o))
+
+/**
+ * comp parameters range description
+ */
+struct rte_comp_param_range {
+	uint16_t min;	/**< minimum size */
+	uint16_t max;	/**< maximum size */
+	uint16_t increment;
+	/**< if a range of sizes are supported,
+	 * this parameter is used to indicate
+	 * increments in byte size that are supported
+	 * between the minimum and maximum
+	 */
+};
+
+
+/** Structure used to capture a capability of a comp device */
+struct rte_compressdev_capabilities {
+	/* TODO */
+};
+
+
+/** Macro used at end of comp PMD list */
+#define RTE_COMP_END_OF_CAPABILITIES_LIST() \
+	{ RTE_COMP_OP_TYPE_UNDEFINED }
+
+
+/**
+ * compression device supported feature flags
+ *
+ * Note:
+ * New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_compressdev_get_feature_name()
+ */
+
+#define	RTE_COMP_FF_HW_ACCELERATED		(1ULL << 0)
+/**< Operations are off-loaded to an external hardware accelerator */
+#define	RTE_COMP_FF_CPU_SSE			(1ULL << 1)
+/**< Utilises CPU SIMD SSE instructions */
+#define	RTE_COMP_FF_CPU_AVX			(1ULL << 2)
+/**< Utilises CPU SIMD AVX instructions */
+#define	RTE_COMP_FF_CPU_AVX2			(1ULL << 3)
+/**< Utilises CPU SIMD AVX2 instructions */
+#define	RTE_COMP_FF_CPU_AVX512		(1ULL << 4)
+/**< Utilises CPU SIMD AVX512 instructions */
+#define	RTE_COMP_FF_CPU_NEON			(1ULL << 5)
+/**< Utilises CPU NEON instructions */
+#define	RTE_COMP_FF_MBUF_SCATTER_GATHER	(1ULL << 6)
+/**< Scatter-gather mbufs are supported */
+#define RTE_COMP_FF_MULTI_PKT_CHECKSUM	(1ULL << 7)
+/**< Generation of checksum across multiple stateless packets is supported */
+#define RTE_COMP_FF_STATEFUL			(1ULL << 8)
+/**< Stateful compression is supported */
+
+/**
+ * Get the name of a comp device feature flag
+ *
+ * @param	flag	The mask describing the flag.
+ *
+ * @return
+ *   The name of this flag, or NULL if it's not a valid feature flag.
+ */
+
+extern const char *
+rte_compressdev_get_feature_name(uint64_t flag);
+
+/**  comp device information */
+struct rte_compressdev_info {
+	const char *driver_name;		/**< Driver name. */
+	uint8_t driver_id;			/**< Driver identifier */
+	struct rte_pci_device *pci_dev;		/**< PCI information. */
+
+	uint64_t feature_flags;			/**< Feature flags */
+
+	const struct rte_compressdev_capabilities *capabilities;
+	/**< Array of devices supported capabilities */
+
+	unsigned max_nb_queue_pairs;
+	/**< Maximum number of queues pairs supported by device. */
+
+	unsigned int max_nb_sessions_per_qp;
+	/**< Maximum number of sessions per queue pair.
+	 * Default 0 for infinite sessions
+	 */
+	 uint8_t intermediate_buffer_size;
+	 /**< If an accelerator requires intermediate buffers, it should
+	  * allocate them and provide the size here. The application can
+	  * specify the size to use in rte_compressdev_configure(), if 0 is
+	  * specified than the PMD should use a default value.
+	  */
+	 
+};
+
+#define RTE_COMPRESSDEV_DETACHED  (0)
+#define RTE_COMPRESSDEV_ATTACHED  (1)
+
+/** Definitions of comp device event types */
+enum rte_compressdev_event_type {
+	RTE_COMPRESSDEV_EVENT_UNKNOWN,	/**< unknown event type */
+	RTE_COMPRESSDEV_EVENT_ERROR,	/**< error interrupt event */
+	RTE_COMPRESSDEV_EVENT_MAX		/**< max value of this enum */
+};
+
+/** comp device queue pair configuration structure. */
+struct rte_compressdev_qp_conf {
+	uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
+};
+
+/**
+ * Typedef for application callback function to be registered by application
+ * software for notification of device events
+ *
+ * @param	dev_id	comp device identifier
+ * @param	event	comp device event to register for notification of.
+ * @param	cb_arg	User specified parameter to be passed as to passed to
+ *			users callback function.
+ */
+typedef void (*rte_compressdev_cb_fn)(uint8_t dev_id,
+		enum rte_compressdev_event_type event, void *cb_arg);
+
+
+/** comp device statistics */
+struct rte_compressdev_stats {
+	uint64_t enqueued_count;
+	/**< Count of all operations enqueued */
+	uint64_t dequeued_count;
+	/**< Count of all operations dequeued */
+
+	uint64_t enqueue_err_count;
+	/**< Total error count on operations enqueued */
+	uint64_t dequeue_err_count;
+	/**< Total error count on operations dequeued */
+};
+
+#define RTE_COMPRESSDEV_NAME_MAX_LEN	(64)
+/**< Max length of name of comp PMD */
+
+/**
+ * Get the device identifier for the named comp device.
+ *
+ * @param	name	device name to select the device structure.
+ *
+ * @return
+ *   - Returns comp device identifier on success.
+ *   - Return -1 on failure to find named comp device.
+ */
+extern int
+rte_compressdev_get_dev_id(const char *name);
+
+/**
+ * Get the comp device name given a device identifier.
+ *
+ * @param dev_id
+ *   The identifier of the device
+ *
+ * @return
+ *   - Returns comp device name.
+ *   - Returns NULL if comp device is not present.
+ */
+extern const char *
+rte_compressdev_name_get(uint8_t dev_id);
+
+/**
+ * Get the total number of comp devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   - The total number of usable comp devices.
+ */
+extern uint8_t
+rte_compressdev_count(void);
+
+/**
+ * Get number of comp device defined type.
+ *
+ * @param	driver_id	driver identifier.
+ *
+ * @return
+ *   Returns number of comp device.
+ */
+extern uint8_t
+rte_compressdev_device_count_by_driver(uint8_t driver_id);
+
+/**
+ * Get number and identifiers of attached comp devices that
+ * use the same comp driver.
+ *
+ * @param	driver_name	driver name.
+ * @param	devices		output devices identifiers.
+ * @param	nb_devices	maximal number of devices.
+ *
+ * @return
+ *   Returns number of attached comp device.
+ */
+uint8_t
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+		uint8_t nb_devices);
+/*
+ * Return the NUMA socket to which a device is connected
+ *
+ * @param dev_id
+ *   The identifier of the device
+ * @return
+ *   The NUMA socket id to which the device is connected or
+ *   a default of zero if the socket could not be determined.
+ *   -1 if returned is the dev_id value is out of range.
+ */
+extern int
+rte_compressdev_socket_id(uint8_t dev_id);
+
+/** comp device configuration structure */
+struct rte_compressdev_config {
+	int socket_id;
+	/**< Socket on which to allocate resources */
+	uint16_t nb_queue_pairs;
+	/**< Total number of queue pairs to configure on a device */
+	uint32_t comp_intermediate_buf_size;
+	/**< For deflate algorithm HW accelerators may need to allocate
+	 * a pool of intermediate buffers for dynamic Huffman encoding.
+	 * This indicates the buffer size and should be
+	 * set a little larger than the expected maximum source buffer size.
+	 * If the compression output doesn't fit in the intermediate buffer
+	 * then compression using dynamic huffman encoding may not be possible,	
+	 * in this case the accelerator may revert back to compression using
+	 * fixed huffman codes.
+	 * If set to 0 for a device which requires buffers then the PMD
+	 * will use its default value.  
+	 */
+};
+
+/**
+ * Configure a device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param	dev_id		The identifier of the device to configure.
+ * @param	config		The comp device configuration structure.
+ *
+ * @return
+ *   - 0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+extern int
+rte_compressdev_configure(uint8_t dev_id,
+			struct rte_compressdev_config *config);
+
+/**
+ * Start an device.
+ *
+ * The device start step is the last one and consists of setting the configured
+ * offload features and in starting the transmit and the receive units of the
+ * device.
+ * On success, all basic functions exported by the API (link status,
+ * receive/transmit, and so on) can be invoked.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @return
+ *   - 0: Success, device started.
+ *   - <0: Error code of the driver device start function.
+ */
+extern int
+rte_compressdev_start(uint8_t dev_id);
+
+/**
+ * Stop an device. The device can be restarted with a call to
+ * rte_compressdev_start()
+ *
+ * @param	dev_id		The identifier of the device.
+ */
+extern void
+rte_compressdev_stop(uint8_t dev_id);
+
+/**
+ * Close an device. The device cannot be restarted!
+ *
+ * @param	dev_id		The identifier of the device.
+ *
+ * @return
+ *  - 0 on successfully closing device
+ *  - <0 on failure to close device
+ */
+extern int
+rte_compressdev_close(uint8_t dev_id);
+
+/**
+ * Allocate and set up a receive queue pair for a device.
+ *
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	queue_pair_id	The index of the queue pairs to set up. The
+ *				value must be in the range [0, nb_queue_pair
+ *				- 1] previously supplied to
+ *				rte_compressdev_configure().
+ * @param	qp_conf		The pointer to the configuration data to be
+ *				used for the queue pair. NULL value is
+ *				allowed, in which case default configuration
+ *				will be used.
+ * @param	socket_id	The *socket_id* argument is the socket
+ *				identifier in case of NUMA. The value can be
+ *				*SOCKET_ID_ANY* if there is no NUMA constraint
+ *				for the DMA memory allocated for the receive
+ *				queue pair.
+ * @param	session_pool	Pointer to device session mempool, used
+ *				for session-less operations.
+ *
+ * @return
+ *   - 0: Success, queue pair correctly set up.
+ *   - <0: Queue pair configuration failed
+ */
+extern int
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+		const struct rte_compressdev_qp_conf *qp_conf, int socket_id,
+		struct rte_mempool *session_pool);
+
+/**
+ * Start a specified queue pair of a device. It is used
+ * when deferred_start flag of the specified queue is true.
+ *
+ * @param	dev_id		The identifier of the device
+ * @param	queue_pair_id	The index of the queue pair to start. The value
+ *				must be in the range [0, nb_queue_pair - 1]
+ *				previously supplied to
+ *				rte_comp_dev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The dev_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int
+rte_compressdev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id);
+
+/**
+ * Stop specified queue pair of a device
+ *
+ * @param	dev_id		The identifier of the device
+ * @param	queue_pair_id	The index of the queue pair to stop. The value
+ *				must be in the range [0, nb_queue_pair - 1]
+ *				previously supplied to
+ *				rte_compressdev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The dev_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int
+rte_compressdev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id);
+
+/**
+ * Get the number of queue pairs on a specific comp device
+ *
+ * @param	dev_id		comp device identifier.
+ * @return
+ *   - The number of configured queue pairs.
+ */
+extern uint16_t
+rte_compressdev_queue_pair_count(uint8_t dev_id);
+
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	stats		A pointer to a structure of type
+ *				*rte_compressdev_stats* to be filled with the
+ *				values of device counters.
+ * @return
+ *   - Zero if successful.
+ *   - Non-zero otherwise.
+ */
+extern int
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
+
+/**
+ * Reset the general I/O statistics of a device.
+ *
+ * @param	dev_id		The identifier of the device.
+ */
+extern void
+rte_compressdev_stats_reset(uint8_t dev_id);
+
+/**
+ * Retrieve the contextual information of a device.
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	dev_info	A pointer to a structure of type
+ *				*rte_compressdev_info* to be filled with the
+ *				contextual information of the device.
+ *
+ * @note The capabilities field of dev_info is set to point to the first
+ * element of an array of struct rte_compressdev_capabilities. The element after
+ * the last valid element has it's op field set to
+ * RTE_COMP_OP_TYPE_UNDEFINED.
+ */
+extern void
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
+
+
+/**
+ * Register a callback function for specific device id.
+ *
+ * @param	dev_id		Device id.
+ * @param	event		Event interested.
+ * @param	cb_fn		User supplied callback function to be called.
+ * @param	cb_arg		Pointer to the parameters for the registered
+ *				callback.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+extern int
+rte_compressdev_callback_register(uint8_t dev_id,
+		enum rte_compressdev_event_type event,
+		rte_compressdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific device id.
+ *
+ * @param	dev_id		The device identifier.
+ * @param	event		Event interested.
+ * @param	cb_fn		User supplied callback function to be called.
+ * @param	cb_arg		Pointer to the parameters for the registered
+ *				callback.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+extern int
+rte_compressdev_callback_unregister(uint8_t dev_id,
+		enum rte_compressdev_event_type event,
+		rte_compressdev_cb_fn cb_fn, void *cb_arg);
+
+
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+		struct rte_comp_op **ops, uint16_t nb_ops);
+/**< Dequeue processed packets from queue pair of a device. */
+
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+		struct rte_comp_op **ops, uint16_t nb_ops);
+/**< Enqueue packets for processing on queue pair of a device. */
+
+
+
+
+struct rte_compressdev_callback;
+
+/** Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_compressdev_cb_list, rte_compressdev_callback);
+
+/** The data structure associated with each comp device. */
+struct rte_compressdev {
+	dequeue_pkt_burst_t dequeue_burst;
+	/**< Pointer to PMD receive function. */
+	enqueue_pkt_burst_t enqueue_burst;
+	/**< Pointer to PMD transmit function. */
+
+	struct rte_compressdev_data *data;
+	/**< Pointer to device data */
+	struct rte_compressdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	uint64_t feature_flags;
+	/**< Supported features */
+	struct rte_device *device;
+	/**< Backing device */
+
+	uint8_t driver_id;
+	/**< comp driver identifier*/
+
+	struct rte_compressdev_cb_list link_intr_cbs;
+	/**< User application callback for interrupts if present */
+
+	__extension__
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+
+/**
+ *
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_compressdev_data {
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t socket_id;
+	/**< Socket ID where memory is allocated */
+	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+
+	__extension__
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	struct rte_mempool *session_pool;
+	/**< Session memory pool */
+	void **queue_pairs;
+	/**< Array of pointers to queue pairs. */
+	uint16_t nb_queue_pairs;
+	/**< Number of device queue pairs. */
+
+	void *dev_private;
+	/**< PMD-specific private data */
+} __rte_cache_aligned;
+
+extern struct rte_compressdev *rte_compressdevs;
+/**
+ *
+ * Dequeue a burst of processed compression operations from a queue on the comp
+ * device. The dequeued operation are stored in *rte_comp_op* structures
+ * whose pointers are supplied in the *ops* array.
+ *
+ * The rte_compressdev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_comp_op* data structures
+ * effectively supplied into the *ops* array.
+ *
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_compressdev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
+ *
+ * The rte_compressdev_dequeue_burst() function does not provide any error
+ * notification to avoid the corresponding overhead.
+ *
+ * @param	dev_id		The compression device identifier
+ * @param	qp_id		The index of the queue pair from which to
+ *				retrieve processed operations. The value must be
+ *				in the range [0, nb_queue_pair - 1] previously
+ *				supplied to rte_compressdev_configure().
+ * @param	ops		The address of an array of pointers to
+ *				*rte_comp_op* structures that must be
+ *				large enough to store *nb_ops* pointers in it.
+ * @param	nb_ops		The maximum number of operations to dequeue.
+ *
+ * @return
+ *   - The number of operations actually dequeued, which is the number
+ *   of pointers to *rte_comp_op* structures effectively supplied to the
+ *   *ops* array.
+ */
+static inline uint16_t
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+	struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+	nb_ops = (*dev->dequeue_burst)
+			(dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+	return nb_ops;
+}
+
+/**
+ * Enqueue a burst of operations for processing on a compression device.
+ *
+ * The rte_compressdev_enqueue_burst() function is invoked to place
+ * comp operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
+ *
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_comp_op* structures.
+ *
+ * The rte_compressdev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	qp_id		The index of the queue pair on which operations
+ *				are to be enqueued for processing. The value
+ *				must be in the range [0, nb_queue_pairs - 1]
+ *				previously supplied to
+ *				 *rte_compressdev_configure*.
+ * @param	ops		The address of an array of *nb_ops* pointers
+ *				to *rte_comp_op* structures which contain
+ *				the operations to be processed.
+ * @param	nb_ops		The number of operations to process.
+ *
+ * @return
+ * The number of operations actually enqueued on the device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * comp devices queue is full or if invalid parameters are specified in
+ * a *rte_comp_op*.
+ */
+static inline uint16_t
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_op **ops, uint16_t nb_ops)
+{
+	struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+	return (*dev->enqueue_burst)(
+			dev->data->queue_pairs[qp_id], ops, nb_ops);
+}
+
+
+/** compressdev session */
+struct rte_comp_session {
+	__extension__ void *sess_private_data[0];
+	/**< Private session material */
+};
+
+
+/**
+ * Create symmetric comp session header (generic with no private data)
+ *
+ * @param   mempool    Symmetric session mempool to allocate session
+ *                     objects from
+ * @return
+ *  - On success return pointer to sym-session
+ *  - On failure returns NULL
+ */
+struct rte_comp_session *
+rte_compressdev_session_create(struct rte_mempool *mempool);
+
+/**
+ * Frees comp session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
+ *
+ * @param   sess     Session header to be freed.
+ *
+ * @return
+ *  - 0 if successful.
+ *  - -EINVAL if session is NULL.
+ *  - -EBUSY if not all device private data has been freed.
+ */
+int
+rte_compressdev_session_free(struct rte_comp_session *sess);
+
+/**
+ * Fill out private data for the device id, based on its device type.
+ *
+ * @param   dev_id   ID of device that we want the session to be used on
+ * @param   sess     Session where the private data will be attached to
+ * @param   xforms   comp transform operations to apply on flow
+ *                   processed with this session
+ * @param   mempool  Mempool where the private data is allocated.
+ *
+ * @return
+ *  - On success, zero.
+ *  - -EINVAL if input parameters are invalid.
+ *  - -ENOTSUP if comp device does not support the comp transform.
+ *  - -ENOMEM if the private session could not be allocated.
+ */
+int
+rte_compressdev_session_init(uint8_t dev_id,
+			struct rte_comp_session *sess,
+			struct rte_comp_xform *xforms,
+			struct rte_mempool *mempool);
+
+/**
+ * Frees private data for the device id, based on its device type,
+ * returning it to its mempool.
+ *
+ * @param   dev_id   ID of device that uses the session.
+ * @param   sess     Session containing the reference to the private data
+ *
+ * @return
+ *  - 0 if successful.
+ *  - -EINVAL if device is invalid or session is NULL.
+ */
+int
+rte_compressdev_session_clear(uint8_t dev_id,
+			struct rte_comp_session *sess);
+
+/**
+ * Get the size of the header session, for all registered drivers.
+ *
+ * @return
+ *   Size of the header session.
+ */
+unsigned int
+rte_compressdev_get_header_session_size(void);
+
+/**
+ * Get the size of the private session data for a device.
+ *
+ * @param	dev_id		The device identifier.
+ *
+ * @return
+ *   - Size of the private data, if successful
+ *   - 0 if device is invalid or does not have private session
+ */
+unsigned int
+rte_compressdev_get_private_session_size(uint8_t dev_id);
+
+/**
+ * Attach queue pair with sym session.
+ *
+ * @param	dev_id		Device to which the session will be attached.
+ * @param	qp_id		Queue pair to which the session will be attached.
+ * @param	session		Session pointer previously allocated by
+ *				*rte_compressdev_session_create*.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int
+rte_compressdev_queue_pair_attach_session(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_session *session);
+
+/**
+ * Detach queue pair with comp session.
+ *
+ * @param	dev_id		Device to which the session is attached.
+ * @param	qp_id		Queue pair to which the session is attached.
+ * @param	session		Session pointer previously allocated by
+ *				*rte_compressdev_session_create*.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int
+rte_compressdev_queue_pair_detach_session(uint8_t dev_id, uint16_t qp_id,
+		struct rte_comp_session *session);
+
+/**
+ * Provide driver identifier.
+ *
+ * @param name
+ *   The pointer to a driver name.
+ * @return
+ *  The driver type identifier or -1 if no driver found
+ */
+int rte_compressdev_driver_id_get(const char *name);
+
+/**
+ * Provide driver name.
+ *
+ * @param driver_id
+ *   The driver identifier.
+ * @return
+ *  The driver name or null if no driver found
+ */
+const char *rte_compressdev_driver_name_get(uint8_t driver_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev_pmd.c b/lib/librte_compressdev/rte_compressdev_pmd.c
new file mode 100644
index 0000000..c569cbd
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_pmd.c
@@ -0,0 +1,193 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of the copyright holder nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "rte_compressdev_pmd.h"
+
+/**
+ * Parse name from argument
+ */
+static int
+rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	struct rte_compressdev_pmd_init_params *params = extra_args;
+	int n;
+
+	n = snprintf(params->name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s", value);
+	if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Parse unsigned integer from argument
+ */
+static int
+rte_compressdev_pmd_parse_uint_arg(const char *key __rte_unused,
+		const char *value, void *extra_args)
+{
+	int i;
+	char *end;
+	errno = 0;
+
+	i = strtol(value, &end, 10);
+	if (*end != 0 || errno != 0 || i < 0)
+		return -EINVAL;
+
+	*((uint32_t *)extra_args) = i;
+	return 0;
+}
+
+int
+rte_compressdev_pmd_parse_input_args(
+		struct rte_compressdev_pmd_init_params *params,
+		const char *args)
+{
+	struct rte_kvargs *kvlist = NULL;
+	int ret = 0;
+
+	if (params == NULL)
+		return -EINVAL;
+
+	if (args) {
+		kvlist = rte_kvargs_parse(args,	compressdev_pmd_valid_params);
+		if (kvlist == NULL)
+			return -EINVAL;
+
+		ret = rte_kvargs_process(kvlist,
+				RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
+				&rte_compressdev_pmd_parse_uint_arg,
+				&params->max_nb_queue_pairs);
+		if (ret < 0)
+			goto free_kvlist;
+
+		ret = rte_kvargs_process(kvlist,
+				RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG,
+				&rte_compressdev_pmd_parse_uint_arg,
+				&params->socket_id);
+		if (ret < 0)
+			goto free_kvlist;
+
+		ret = rte_kvargs_process(kvlist,
+				RTE_COMPRESSDEV_PMD_NAME_ARG,
+				&rte_compressdev_pmd_parse_name_arg,
+				params);
+		if (ret < 0)
+			goto free_kvlist;
+	}
+
+free_kvlist:
+	rte_kvargs_free(kvlist);
+	return ret;
+}
+
+struct rte_compressdev *
+rte_compressdev_pmd_create(const char *name,
+		struct rte_device *device,
+		struct rte_compressdev_pmd_init_params *params)
+{
+	struct rte_compressdev *compressdev;
+
+	if (params->name[0] != '\0') {
+		CDEV_LOG_INFO("[%s] User specified device name = %s\n",
+				device->driver->name, params->name);
+		name = params->name;
+	}
+
+	CDEV_LOG_INFO("[%s] - Creating compressdev %s\n",
+			device->driver->name, name);
+
+	CDEV_LOG_INFO("[%s] - Initialisation parameters - name: %s,"
+			"socket id: %d, max queue pairs: %u",
+			device->driver->name, name,
+			params->socket_id, params->max_nb_queue_pairs);
+
+	/* allocate device structure */
+	compressdev = rte_compressdev_pmd_allocate(name, params->socket_id);
+	if (compressdev == NULL) {
+		CDEV_LOG_ERR("[%s] Failed to allocate comp device for %s",
+				device->driver->name, name);
+		return NULL;
+	}
+
+	/* allocate private device structure */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		compressdev->data->dev_private =
+				rte_zmalloc_socket("compressdev device private",
+						params->private_data_size,
+						RTE_CACHE_LINE_SIZE,
+						params->socket_id);
+
+		if (compressdev->data->dev_private == NULL) {
+			CDEV_LOG_ERR("[%s] Cannot allocate memory for "
+					"compressdev %s private data",
+					device->driver->name, name);
+
+			rte_compressdev_pmd_release_device(compressdev);
+			return NULL;
+		}
+	}
+
+	compressdev->device = device;
+
+	/* initialise user call-back tail queue */
+	TAILQ_INIT(&(compressdev->link_intr_cbs));
+
+	return compressdev;
+}
+
+int
+rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
+{
+	int retval;
+
+	CDEV_LOG_INFO("[%s] Closing comp device %s",
+			compressdev->device->driver->name,
+			compressdev->device->name);
+
+	/* free comp device */
+	retval = rte_compressdev_pmd_release_device(compressdev);
+	if (retval)
+		return retval;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_free(compressdev->data->dev_private);
+
+
+	compressdev->device = NULL;
+	compressdev->data = NULL;
+
+	return 0;
+}
diff --git a/lib/librte_compressdev/rte_compressdev_pmd.h b/lib/librte_compressdev/rte_compressdev_pmd.h
new file mode 100644
index 0000000..3b8143a
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_pmd.h
@@ -0,0 +1,535 @@
+/*-
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_COMPRESSDEV_PMD_H_
+#define _RTE_COMPRESSDEV_PMD_H_
+
+/** @file
+ * RTE comp PMD APIs
+ *
+ * @note
+ * These API are from comp PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_comp.h"
+#include "rte_compressdev.h"
+
+
+#define RTE_COMPRESSDEV_PMD_NAME_ARG			("name")
+#define RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG			("max_nb_queue_pairs")
+#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG			("socket_id")
+
+
+static const char * const compressdev_pmd_valid_params[] = {
+	RTE_COMPRESSDEV_PMD_NAME_ARG,
+	RTE_COMPRESSDEV_PMD_MAX_NB_QP_ARG,
+	RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
+};
+
+/**
+ * @internal
+ * Initialisation parameters for comp devices
+ */
+struct rte_compressdev_pmd_init_params {
+	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	size_t private_data_size;
+	int socket_id;
+	unsigned int max_nb_queue_pairs;
+};
+
+/** Global structure used for maintaining state of allocated comp devices */
+struct rte_compressdev_global {
+	struct rte_compressdev *devs;	/**< Device information array */
+	struct rte_compressdev_data *data[RTE_COMPRESS_MAX_DEVS];
+	/**< Device private data */
+	uint8_t nb_devs;		/**< Number of devices found */
+	uint8_t max_devs;		/**< Max number of devices */
+};
+
+/* compressdev driver, containing the driver ID */
+struct compressdev_driver {
+	TAILQ_ENTRY(compressdev_driver) next; /**< Next in list. */
+	const struct rte_driver *driver;
+	uint8_t id;
+};
+
+/** pointer to global comp devices data structure. */
+extern struct rte_compressdev_global *rte_compressdev_globals;
+
+/**
+ * Get the rte_compressdev structure device pointer for the device. Assumes a
+ * valid device index.
+ *
+ * @param	dev_id	Device ID value to select the device structure.
+ *
+ * @return
+ *   - The rte_compressdev structure pointer for the given device ID.
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_get_dev(uint8_t dev_id);
+
+/**
+ * Get the rte_compressdev structure device pointer for the named device.
+ *
+ * @param	name	device name to select the device structure.
+ *
+ * @return
+ *   - The rte_compressdev structure pointer for the given device ID.
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_get_named_dev(const char *name);
+
+/**
+ * Validate if the comp device index is valid attached comp device.
+ *
+ * @param	dev_id	comp device index.
+ *
+ * @return
+ *   - If the device index is valid (1) or not (0).
+ */
+unsigned int
+rte_compressdev_pmd_is_valid_dev(uint8_t dev_id);
+
+/**
+ * The pool of rte_compressdev structures.
+ */
+extern struct rte_compressdev *rte_compressdevs;
+
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *comp_dev_ops* supplied in the
+ * *rte_compressdev* structure associated with a device.
+ */
+
+/**
+ *	Function used to configure device.
+ *
+ * @param	dev	comp device pointer
+ *		config	comp device configurations
+ *
+ * @return	Returns 0 on success
+ */
+typedef int (*compressdev_configure_t)(struct rte_compressdev *dev,
+		struct rte_compressdev_config *config);
+
+/**
+ * Function used to start a configured device.
+ *
+ * @param	dev	comp device pointer
+ *
+ * @return	Returns 0 on success
+ */
+typedef int (*compressdev_start_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to stop a configured device.
+ *
+ * @param	dev	comp device pointer
+ */
+typedef void (*compressdev_stop_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to close a configured device.
+ *
+ * @param	dev	comp device pointer
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_close_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get statistics of a device.
+ *
+ * @param	dev	comp device pointer
+ * @param	stats	Pointer to comp device stats structure to populate
+ */
+typedef void (*compressdev_stats_get_t)(struct rte_compressdev *dev,
+				struct rte_compressdev_stats *stats);
+
+
+/**
+ * Function used to reset statistics of a device.
+ *
+ * @param	dev	comp device pointer
+ */
+typedef void (*compressdev_stats_reset_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get specific information of a device.
+ *
+ * @param	dev	comp device pointer
+ */
+typedef void (*compressdev_info_get_t)(struct rte_compressdev *dev,
+				struct rte_compressdev_info *dev_info);
+
+/**
+ * Start queue pair of a device.
+ *
+ * @param	dev	comp device pointer
+ * @param	qp_id	Queue Pair Index
+ *
+ * @return	Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_start_t)(struct rte_compressdev *dev,
+				uint16_t qp_id);
+
+/**
+ * Stop queue pair of a device.
+ *
+ * @param	dev	comp device pointer
+ * @param	qp_id	Queue Pair Index
+ *
+ * @return	Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_stop_t)(struct rte_compressdev *dev,
+				uint16_t qp_id);
+
+/**
+ * Setup a queue pair for a device.
+ *
+ * @param	dev		comp device pointer
+ * @param	qp_id		Queue Pair Index
+ * @param	qp_conf		Queue configuration structure
+ * @param	socket_id	Socket Index
+ * @param	session_pool	Pointer to device session mempool
+ *
+ * @return	Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_setup_t)(struct rte_compressdev *dev,
+		uint16_t qp_id,	const struct rte_compressdev_qp_conf *qp_conf,
+		int socket_id, struct rte_mempool *session_pool);
+
+/**
+ * Release memory resources allocated by given queue pair.
+ *
+ * @param	dev	comp device pointer
+ * @param	qp_id	Queue Pair Index
+ *
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_queue_pair_release_t)(struct rte_compressdev *dev,
+		uint16_t qp_id);
+
+/**
+ * Get number of available queue pairs of a device.
+ *
+ * @param	dev	comp device pointer
+ *
+ * @return	Returns number of queue pairs on success.
+ */
+typedef uint32_t (*compressdev_queue_pair_count_t)(struct rte_compressdev *dev);
+
+/**
+ * Create a session mempool to allocate sessions from
+ *
+ * @param	dev		comp device pointer
+ * @param	nb_objs		number of sessions objects in mempool
+ * @param	obj_cache	l-core object cache size, see *rte_ring_create*
+ * @param	socket_id	Socket Id to allocate  mempool on.
+ *
+ * @return
+ * - On success returns a pointer to a rte_mempool
+ * - On failure returns a NULL pointer
+ */
+typedef int (*compressdev_create_session_pool_t)(
+		struct rte_compressdev *dev, unsigned nb_objs,
+		unsigned obj_cache_size, int socket_id);
+
+
+/**
+ * Get the size of a compressdev session
+ *
+ * @param	dev		comp device pointer
+ *
+ * @return
+ *  - On success returns the size of the session structure for device
+ *  - On failure returns 0
+ */
+typedef unsigned (*compressdev_get_session_private_size_t)(
+		struct rte_compressdev *dev);
+
+/**
+ * Configure a comp session on a device.
+ *
+ * @param	dev		comp device pointer
+ * @param	xform		Single or chain of comp xforms
+ * @param	priv_sess	Pointer to compressdev's private session structure
+ * @param	mp		Mempool where the private session is allocated
+ *
+ * @return
+ *  - Returns 0 if private session structure have been created successfully.
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support the comp transform.
+ *  - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*compressdev_configure_session_t)(struct rte_compressdev *dev,
+		struct rte_comp_xform *xform,
+		struct rte_comp_session *session,
+		struct rte_mempool *mp);
+
+/**
+ * Free driver private session data.
+ *
+ * @param	dev		comp device pointer
+ * @param	sess		compressdev session structure
+ */
+typedef void (*compressdev_free_session_t)(struct rte_compressdev *dev,
+		struct rte_comp_session *sess);
+
+/**
+ * Optional API for drivers to attach sessions with queue pair.
+ * @param	dev		comp device pointer
+ * @param	qp_id		queue pair id for attaching session
+ * @param	priv_sess       Pointer to compressdev's private session structure
+ * @return
+ *  - Return 0 on success
+ */
+typedef int (*compressdev_queue_pair_attach_session_t)(
+		  struct rte_compressdev *dev,
+		  uint16_t qp_id,
+		  void *session_private);
+
+/**
+ * Optional API for drivers to detach sessions from queue pair.
+ * @param	dev		comp device pointer
+ * @param	qp_id		queue pair id for detaching session
+ * @param	priv_sess       Pointer to compressdev's private session structure
+ * @return
+ *  - Return 0 on success
+ */
+typedef int (*compressdev_queue_pair_detach_session_t)(
+		  struct rte_compressdev *dev,
+		  uint16_t qp_id,
+		  void *session_private);
+
+/** comp device operations function pointer table */
+struct rte_compressdev_ops {
+	compressdev_configure_t dev_configure;	/**< Configure device. */
+	compressdev_start_t dev_start;		/**< Start device. */
+	compressdev_stop_t dev_stop;		/**< Stop device. */
+	compressdev_close_t dev_close;		/**< Close device. */
+
+	compressdev_info_get_t dev_infos_get;	/**< Get device info. */
+
+	compressdev_stats_get_t stats_get;
+	/**< Get device statistics. */
+	compressdev_stats_reset_t stats_reset;
+	/**< Reset device statistics. */
+
+	compressdev_queue_pair_setup_t queue_pair_setup;
+	/**< Set up a device queue pair. */
+	compressdev_queue_pair_release_t queue_pair_release;
+	/**< Release a queue pair. */
+	compressdev_queue_pair_start_t queue_pair_start;
+	/**< Start a queue pair. */
+	compressdev_queue_pair_stop_t queue_pair_stop;
+	/**< Stop a queue pair. */
+	compressdev_queue_pair_count_t queue_pair_count;
+	/**< Get count of the queue pairs. */
+
+	compressdev_get_session_private_size_t session_get_size;
+	/**< Return private session. */
+	compressdev_configure_session_t session_configure;
+	/**< Configure a comp session. */
+	compressdev_free_session_t session_clear;
+	/**< Clear a comp sessions private data. */
+	compressdev_queue_pair_attach_session_t qp_attach_session;
+	/**< Attach session to queue pair. */
+	compressdev_queue_pair_detach_session_t qp_detach_session;
+	/**< Detach session from queue pair. */
+};
+
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new compressdev slot for an comp device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param	name		Unique identifier name for each device
+ * @param	socket_id	Socket to allocate resources on.
+ * @return
+ *   - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Release the specified compressdev device.
+ *
+ * @param compressdev
+ * The *compressdev* pointer is the address of the *rte_compressdev* structure.
+ * @return
+ *   - 0 on success, negative on error
+ */
+extern int
+rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev);
+
+
+/**
+ * @internal
+ *
+ * PMD assist function to parse initialisation arguments for comp driver
+ * when creating a new comp PMD device instance.
+ *
+ * PMD driver should set default values for that PMD before calling function,
+ * these default values will be over-written with successfully parsed values
+ * from args string.
+ *
+ * @param	params	parsed PMD initialisation parameters
+ * @param	args	input argument string to parse
+ *
+ * @return
+ *  - 0 on success
+ *  - errno on failure
+ */
+int
+rte_compressdev_pmd_parse_input_args(
+		struct rte_compressdev_pmd_init_params *params,
+		const char *args);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to create
+ * and allocate resources for a new comp PMD device instance.
+ *
+ * @param	name	comp device name.
+ * @param	device	base device instance
+ * @param	params	PMD initialisation parameters
+ *
+ * @return
+ *  - comp device instance on success
+ *  - NULL on creation failure
+ */
+struct rte_compressdev *
+rte_compressdev_pmd_create(const char *name,
+		struct rte_device *device,
+		struct rte_compressdev_pmd_init_params *params);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to
+ * destroy and free resources associated with a comp PMD device instance.
+ *
+ * @param	compressdev	comp device handle.
+ *
+ * @return
+ *  - 0 on success
+ *  - errno on failure
+ */
+int
+rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev);
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device.
+ *  *
+ * @param	dev	Pointer to compressdev struct
+ * @param	event	comp device interrupt event type.
+ *
+ * @return
+ *  void
+ */
+void rte_compressdev_pmd_callback_process(struct rte_compressdev *dev,
+				enum rte_compressdev_event_type event);
+
+/**
+ * @internal
+ * Create unique device name
+ */
+int
+rte_compressdev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+
+/**
+ * @internal
+ * Allocate compressdev driver.
+ *
+ * @param comp_drv
+ *   Pointer to compressdev_driver.
+ * @param drv
+ *   Pointer to rte_driver.
+ *
+ * @return
+ *  The driver type identifier
+ */
+uint8_t rte_compressdev_allocate_driver(struct compressdev_driver *comp_drv,
+		const struct rte_driver *drv);
+
+
+#define RTE_PMD_REGISTER_COMPRESSDEV_DRIVER(comp_drv, drv, driver_id)\
+RTE_INIT(init_ ##driver_id);\
+static void init_ ##driver_id(void)\
+{\
+	driver_id = rte_compressdev_allocate_driver(&comp_drv, &(drv).driver);\
+}
+
+static inline void *
+get_session_private_data(const struct rte_comp_session *sess,
+		uint8_t driver_id) {
+	return sess->sess_private_data[driver_id];
+}
+
+static inline void
+set_session_private_data(struct rte_comp_session *sess,
+		uint8_t driver_id, void *private_data)
+{
+	sess->sess_private_data[driver_id] = private_data;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_PMD_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev_version.map b/lib/librte_compressdev/rte_compressdev_version.map
new file mode 100644
index 0000000..0a8354c
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_version.map
@@ -0,0 +1,58 @@
+EXPERIMENTAL {
+        global:
+
+        rte_compressdevs;
+	rte_compressdev_callback_register;
+	rte_compressdev_callback_unregister;
+	rte_compressdev_close;
+	rte_compressdev_count;
+	rte_compressdev_configure;
+	rte_compressdev_get_dev_id;
+	rte_compressdev_allocate_driver;
+	rte_compressdev_device_count_by_driver;
+	rte_compressdev_driver_id_get;
+	rte_compressdev_driver_name_get;
+	rte_compressdev_name_get;
+	rte_compressdev_socket_id;
+	rte_compressdev_devices_get;
+	rte_compressdev_start;
+	rte_compressdev_stop;
+	
+	rte_compressdev_queue_pair_count;
+	rte_compressdev_queue_pair_setup;
+	rte_compressdev_queue_pair_start;
+	rte_compressdev_queue_pair_stop;
+	rte_compressdev_queue_pair_attach_comp_session;
+	rte_compressdev_queue_pair_detach_comp_session;
+	
+	rte_compressdev_info_get;
+	rte_compressdev_get_feature_name;
+	rte_compressdev_stats_get;
+	rte_compressdev_stats_reset;
+	
+	rte_compressdev_get_header_session_size;
+	rte_compressdev_get_private_session_size;
+	rte_compressdev_session_create;
+	rte_compressdev_session_init;
+	rte_compressdev_session_clear;
+	rte_compressdev_session_free;
+	
+	rte_comp_op_pool_create;
+	
+	rte_compressdev_pmd_create_dev_name;
+	rte_compressdev_pmd_get_dev;
+	rte_compressdev_pmd_get_named_dev;
+	rte_compressdev_pmd_is_valid_dev;
+	rte_compressdev_pmd_create;
+	rte_compressdev_pmd_destroy;
+	rte_compressdev_pmd_parse_input_args;
+	rte_compressdev_pmd_allocate;
+	rte_compressdev_pmd_callback_process;
+	rte_compressdev_pmd_release_device;
+
+        local: *;
+};
+
+
+
+	
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 6c2d356..04d145a 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -88,6 +88,7 @@ struct rte_logs {
 #define RTE_LOGTYPE_EFD       18 /**< Log related to EFD. */
 #define RTE_LOGTYPE_EVENTDEV  19 /**< Log related to eventdev. */
 #define RTE_LOGTYPE_GSO       20 /**< Log related to GSO. */
+#define RTE_LOGTYPE_COMPRESSDEV 21 /**< Log related to compressdev. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1     24 /**< User-defined log type 1. */
-- 
1.7.0.7



More information about the dev mailing list