All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions
@ 2019-08-26  7:44 Adam Dybkowski
  2019-08-26  7:44 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski
                   ` (3 more replies)
  0 siblings, 4 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-08-26  7:44 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, arturx.trybula; +Cc: Adam Dybkowski

This patch adds QAT RAM bank definitions and related macros.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 73 ++++++++++++++++++++
 1 file changed, 73 insertions(+)

diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
index 813817720..c89a2c2fd 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -479,4 +479,77 @@ struct icp_qat_fw_comp_resp {
 	/**< Common response params (checksums and byte counts) */
 };
 
+/* RAM Bank definitions */
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *      Definition of the ram bank enabled values
+ * @description
+ *      Enumeration used to define whether a ram bank is enabled or not
+ *
+ *****************************************************************************/
+enum icp_qat_fw_comp_bank_enabled {
+	ICP_QAT_FW_COMP_BANK_DISABLED = 0, /*!< BANK DISABLED */
+	ICP_QAT_FW_COMP_BANK_ENABLED = 1,  /*!< BANK ENABLED */
+	ICP_QAT_FW_COMP_BANK_DELIMITER = 2 /**< Delimiter type */
+};
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *
+ * @description
+ *      Build the ram bank flags in the compression content descriptor
+ *      which specify which banks are used to save history
+ *
+ * @param bank_i_enable
+ * @param bank_h_enable
+ * @param bank_g_enable
+ * @param bank_f_enable
+ * @param bank_e_enable
+ * @param bank_d_enable
+ * @param bank_c_enable
+ * @param bank_b_enable
+ * @param bank_a_enable
+ *****************************************************************************/
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable,                         \
+					bank_h_enable,                         \
+					bank_g_enable,                         \
+					bank_f_enable,                         \
+					bank_e_enable,                         \
+					bank_d_enable,                         \
+					bank_c_enable,                         \
+					bank_b_enable,                         \
+					bank_a_enable)                         \
+	((((bank_i_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                         \
+		<< QAT_FW_COMP_BANK_I_BITPOS) |                                \
+	(((bank_h_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_H_BITPOS) |                                \
+	(((bank_g_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_G_BITPOS) |                                \
+	(((bank_f_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_F_BITPOS) |                                \
+	(((bank_e_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_E_BITPOS) |                                \
+	(((bank_d_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_D_BITPOS) |                                \
+	(((bank_c_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_C_BITPOS) |                                \
+	(((bank_b_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_B_BITPOS) |                                \
+	(((bank_a_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_A_BITPOS))
+
 #endif
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression
  2019-08-26  7:44 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-08-26  7:44 ` Adam Dybkowski
  2019-09-20 10:09   ` Trahe, Fiona
  2019-08-26  7:45 ` [dpdk-dev] [PATCH 3/4] test/compress: add stateful decompression tests Adam Dybkowski
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-08-26  7:44 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, arturx.trybula; +Cc: Adam Dybkowski

This patch adds the stateful decompression feature
to the DPDK QAT PMD.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 drivers/compress/qat/qat_comp.c     | 256 +++++++++++++++++++++++++---
 drivers/compress/qat/qat_comp.h     |  32 ++++
 drivers/compress/qat/qat_comp_pmd.c | 166 ++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.h |   2 +
 4 files changed, 423 insertions(+), 33 deletions(-)

diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index 835aaa838..a80cd6864 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -27,22 +27,51 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 	struct rte_comp_op *op = in_op;
 	struct qat_comp_op_cookie *cookie =
 			(struct qat_comp_op_cookie *)op_cookie;
-	struct qat_comp_xform *qat_xform = op->private_xform;
-	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
+	const uint8_t *tmpl;
 	struct icp_qat_fw_comp_req *comp_req =
 	    (struct icp_qat_fw_comp_req *)out_msg;
 
-	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
-				"operation requests, op (%p) is not a "
-				"stateless operation.", op);
-		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
-		return -EINVAL;
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = op->stream;
+		qat_xform = &stream->qat_xform;
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
+			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			return -EINVAL;
+		}
+		if (unlikely(stream->op_in_progress)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateless operations on the same stream at once");
+			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
+			return -EINVAL;
+		}
+		stream->op_in_progress = 1;
+	} else {
+		stream = NULL;
+		qat_xform = op->private_xform;
 	}
+	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
 
 	rte_mov128(out_msg, tmpl);
 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comp_pars.req_par_flags =
+			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+				(stream->start_of_packet) ?
+					ICP_QAT_FW_COMP_SOP
+				      : ICP_QAT_FW_COMP_NOT_SOP,
+				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
+				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
+					ICP_QAT_FW_COMP_EOP
+				      : ICP_QAT_FW_COMP_NOT_EOP,
+				ICP_QAT_FW_COMP_NOT_BFINAL,
+				ICP_QAT_FW_COMP_NO_CNV,
+				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+	}
+
 	if (likely(qat_xform->qat_comp_request_type ==
 		    QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
@@ -94,6 +123,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_src->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -111,6 +143,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -129,6 +164,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_dst->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -146,6 +184,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -202,12 +243,22 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 			(struct qat_comp_op_cookie *)op_cookie;
 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
 			(resp_msg->opaque_data);
-	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
-				(rx_op->private_xform);
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
 	int err = resp_msg->comn_resp.comn_status &
 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
 
+	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = rx_op->stream;
+		qat_xform = &stream->qat_xform;
+		/* clear op-in-progress flag */
+		stream->op_in_progress = 0;
+	} else {
+		stream = NULL;
+		qat_xform = rx_op->private_xform;
+	}
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 	QAT_DP_LOG(DEBUG, "Direction: %s",
 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
@@ -254,7 +305,21 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		int8_t xlat_err_code =
 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
 
-		if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
+		/* handle recoverable out-of-buffer condition in stateful */
+		/* decompression scenario */
+		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
+				&& qat_xform->qat_comp_request_type
+					== QAT_COMP_REQUEST_DECOMPRESS
+				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+			struct icp_qat_fw_resp_comp_pars *comp_resp =
+					&resp_msg->comp_resp_pars;
+			rx_op->status =
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+			rx_op->consumed = comp_resp->input_byte_counter;
+			rx_op->produced = comp_resp->output_byte_counter;
+			stream->start_of_packet = 0;
+		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
+			  && !xlat_err_code)
 				||
 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
 				||
@@ -275,6 +340,8 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
 		rx_op->consumed = comp_resp->input_byte_counter;
 		rx_op->produced = comp_resp->output_byte_counter;
+		if (stream)
+			stream->start_of_packet = 0;
 
 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
@@ -297,6 +364,12 @@ qat_comp_xform_size(void)
 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
 }
 
+unsigned int
+qat_comp_stream_size(void)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
+}
+
 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 				    enum qat_comp_request_type request)
 {
@@ -317,7 +390,9 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 
 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 			const struct rte_memzone *interm_buff_mz,
-			const struct rte_comp_xform *xform)
+			const struct rte_comp_xform *xform,
+			const struct qat_comp_stream *stream,
+			enum rte_comp_op_type op_type)
 {
 	struct icp_qat_fw_comp_req *comp_req;
 	int comp_level, algo;
@@ -329,6 +404,18 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 		return -EINVAL;
 	}
 
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		if (unlikely(stream == NULL)) {
+			QAT_LOG(ERR, "Stream must be non null for stateful op");
+			return -EINVAL;
+		}
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
+			return -ENOTSUP;
+		}
+	}
+
 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
@@ -376,12 +463,43 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
 					qat_xform->qat_comp_request_type);
 
-	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
-	    ICP_QAT_FW_COMP_STATELESS_SESSION,
-	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATEFUL_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+		/* Decompression state registers */
+		comp_req->comp_cd_ctrl.comp_state_addr =
+				stream->state_registers_decomp_phys;
+
+		/* Enable A, B, C, D, and E (CAMs). */
+		comp_req->comp_cd_ctrl.ram_bank_flags =
+			ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
+				ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
+
+		comp_req->comp_cd_ctrl.ram_banks_addr =
+				stream->inflate_context_phys;
+	} else {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATELESS_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	}
 
 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
@@ -497,7 +615,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
 		qat_xform->checksum_type = xform->decompress.chksum;
 	}
 
-	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
+				      NULL, RTE_COMP_OP_STATELESS)) {
 		QAT_LOG(ERR, "QAT: Problem with setting compression");
 		return -EINVAL;
 	}
@@ -532,3 +651,102 @@ qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
 	}
 	return -EINVAL;
 }
+
+/**
+ * Reset stream state for the next use.
+ *
+ * @param stream
+ *   handle of pmd's private stream data
+ */
+static void
+qat_comp_stream_reset(struct qat_comp_stream *stream)
+{
+	if (stream) {
+		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
+		stream->start_of_packet = 1;
+		stream->op_in_progress = 0;
+	}
+}
+
+/**
+ * Create driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param xform
+ *   xform data
+ * @param stream
+ *   ptr where handle of pmd's private stream data should be stored
+ * @return
+ *  - Returns 0 if private stream structure has been created successfully.
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -ENOTSUP if comp device does not support the comp transform.
+ *  - Returns -ENOMEM if the private stream could not be allocated.
+ */
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream)
+{
+	struct qat_comp_dev_private *qat = dev->data->dev_private;
+	struct qat_comp_stream *ptr;
+
+	if (unlikely(stream == NULL)) {
+		QAT_LOG(ERR, "QAT: stream parameter is NULL");
+		return -EINVAL;
+	}
+	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
+		QAT_LOG(ERR, "QAT: stateful compression not supported");
+		return -ENOTSUP;
+	}
+	if (unlikely(qat->streampool == NULL)) {
+		QAT_LOG(ERR, "QAT device has no stream mempool");
+		return -ENOMEM;
+	}
+	if (rte_mempool_get(qat->streampool, stream)) {
+		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
+		return -ENOMEM;
+	}
+
+	ptr = (struct qat_comp_stream *) *stream;
+	qat_comp_stream_reset(ptr);
+	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+	ptr->qat_xform.checksum_type = xform->decompress.chksum;
+
+	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
+				      xform, ptr, RTE_COMP_OP_STATEFUL)) {
+		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
+		rte_mempool_put(qat->streampool, *stream);
+		*stream = NULL;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Free driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param stream
+ *   handle of pmd's private stream data
+ * @return
+ *  - 0 if successful
+ *  - <0 in error cases
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
+{
+	if (stream) {
+		struct qat_comp_dev_private *qat = dev->data->dev_private;
+		qat_comp_stream_reset((struct qat_comp_stream *) stream);
+		rte_mempool_put(qat->streampool, stream);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 61d12ecf4..2231451a1 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -26,6 +26,16 @@
 
 #define QAT_MIN_OUT_BUF_SIZE 46
 
+/* maximum size of the state registers */
+#define QAT_STATE_REGISTERS_MAX_SIZE 64
+
+/* decompressor context size */
+#define QAT_INFLATE_CONTEXT_SIZE_GEN1 36864
+#define QAT_INFLATE_CONTEXT_SIZE_GEN2 34032
+#define QAT_INFLATE_CONTEXT_SIZE_GEN3 34032
+#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(QAT_INFLATE_CONTEXT_SIZE_GEN1,\
+		QAT_INFLATE_CONTEXT_SIZE_GEN2), QAT_INFLATE_CONTEXT_SIZE_GEN3)
+
 enum qat_comp_request_type {
 	QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
 	QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
@@ -61,6 +71,17 @@ struct qat_comp_xform {
 	enum rte_comp_checksum_type checksum_type;
 };
 
+struct qat_comp_stream {
+	struct qat_comp_xform qat_xform;
+	void *state_registers_decomp;
+	phys_addr_t state_registers_decomp_phys;
+	void *inflate_context;
+	phys_addr_t inflate_context_phys;
+	const struct rte_memzone *memzone;
+	uint8_t start_of_packet;
+	volatile uint8_t op_in_progress;
+};
+
 int
 qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		       enum qat_device_gen qat_dev_gen __rte_unused);
@@ -80,5 +101,16 @@ qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
 unsigned int
 qat_comp_xform_size(void);
 
+unsigned int
+qat_comp_stream_size(void);
+
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream);
+
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+
 #endif
 #endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 072647217..05b7dfe77 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -9,6 +9,12 @@
 
 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
 
+struct stream_create_info {
+	struct qat_comp_dev_private *comp_dev;
+	int socket_id;
+	int error;
+};
+
 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 	{/* COMPRESSION - deflate */
 	 .algo = RTE_COMP_ALGO_DEFLATE,
@@ -21,7 +27,8 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 				RTE_COMP_FF_HUFFMAN_DYNAMIC |
 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
 				RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
-				RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+				RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+				RTE_COMP_FF_STATEFUL_DECOMPRESSION,
 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
 	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
 
@@ -315,6 +322,120 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
 	return mp;
 }
 
+static void
+qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
+		     void *obj, unsigned int obj_idx)
+{
+	struct stream_create_info *info = opaque;
+	struct qat_comp_stream *stream = obj;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *memzone;
+	struct qat_inter_sgl *ram_banks_desc;
+
+	/* find a memzone for RAM banks */
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
+		 info->comp_dev->qat_dev->name, obj_idx);
+	memzone = rte_memzone_lookup(mz_name);
+	if (memzone == NULL) {
+		/* allocate a memzone for compression state and RAM banks */
+		memzone = rte_memzone_reserve_aligned(mz_name,
+			QAT_STATE_REGISTERS_MAX_SIZE
+				+ sizeof(struct qat_inter_sgl)
+				+ QAT_INFLATE_CONTEXT_SIZE,
+			info->socket_id,
+			RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
+		if (memzone == NULL) {
+			QAT_LOG(ERR,
+			    "Can't allocate RAM banks for device %s, object %u",
+				info->comp_dev->qat_dev->name, obj_idx);
+			info->error = -ENOMEM;
+			return;
+		}
+	}
+
+	/* prepare the buffer list descriptor for RAM banks */
+	ram_banks_desc = (struct qat_inter_sgl *)
+		(((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
+	ram_banks_desc->num_bufs = 1;
+	ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
+	ram_banks_desc->buffers[0].addr = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE
+			+ sizeof(struct qat_inter_sgl);
+
+	memset(stream, 0, qat_comp_stream_size());
+	stream->memzone = memzone;
+	stream->state_registers_decomp = memzone->addr;
+	stream->state_registers_decomp_phys = memzone->iova;
+	stream->inflate_context = ((uint8_t *) memzone->addr)
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+	stream->inflate_context_phys = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+}
+
+static void
+qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
+			void *opaque __rte_unused, void *obj,
+			unsigned obj_idx __rte_unused)
+{
+	struct qat_comp_stream *stream = obj;
+
+	rte_memzone_free(stream->memzone);
+}
+
+static struct rte_mempool *
+qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+			    int socket_id,
+			    uint32_t num_elements)
+{
+	char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+
+	snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
+		 "%s_streams", comp_dev->qat_dev->name);
+
+	QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
+	mp = rte_mempool_lookup(stream_pool_name);
+
+	if (mp != NULL) {
+		QAT_LOG(DEBUG, "streampool already created");
+		if (mp->size != num_elements) {
+			QAT_LOG(DEBUG, "streampool wrong size - delete it");
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			rte_mempool_free(mp);
+			mp = NULL;
+			comp_dev->streampool = NULL;
+		}
+	}
+
+	if (mp == NULL) {
+		struct stream_create_info info = {
+			.comp_dev = comp_dev,
+			.socket_id = socket_id,
+			.error = 0
+		};
+		mp = rte_mempool_create(stream_pool_name,
+				num_elements,
+				qat_comp_stream_size(), 0, 0,
+				NULL, NULL, qat_comp_stream_init, &info,
+				socket_id, 0);
+		if (mp == NULL) {
+			QAT_LOG(ERR,
+			     "Err creating mempool %s w %d elements of size %d",
+			     stream_pool_name, num_elements,
+			     qat_comp_stream_size());
+		} else if (info.error) {
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			QAT_LOG(ERR,
+			     "Destoying mempool %s as at least one element failed initialisation",
+			     stream_pool_name);
+			rte_mempool_free(mp);
+			mp = NULL;
+		}
+	}
+
+	return mp;
+}
+
 static void
 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 {
@@ -330,6 +451,14 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 		rte_mempool_free(comp_dev->xformpool);
 		comp_dev->xformpool = NULL;
 	}
+
+	/* Free stream pool */
+	if (comp_dev->streampool) {
+		rte_mempool_obj_iter(comp_dev->streampool,
+				     qat_comp_stream_destroy, NULL);
+		rte_mempool_free(comp_dev->streampool);
+		comp_dev->streampool = NULL;
+	}
 }
 
 static int
@@ -339,12 +468,6 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
 	int ret = 0;
 
-	if (config->max_nb_streams != 0) {
-		QAT_LOG(ERR,
-	"QAT device does not support STATEFUL so max_nb_streams must be 0");
-		return -EINVAL;
-	}
-
 	if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
 		QAT_LOG(WARNING,
 			"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
@@ -360,13 +483,26 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 		}
 	}
 
-	comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
-					config->max_nb_priv_xforms);
-	if (comp_dev->xformpool == NULL) {
+	if (config->max_nb_priv_xforms) {
+		comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+					    config, config->max_nb_priv_xforms);
+		if (comp_dev->xformpool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->xformpool = NULL;
+
+	if (config->max_nb_streams) {
+		comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
+				     config->socket_id, config->max_nb_streams);
+		if (comp_dev->streampool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->streampool = NULL;
 
-		ret = -ENOMEM;
-		goto error_out;
-	}
 	return 0;
 
 error_out:
@@ -508,7 +644,9 @@ static struct rte_compressdev_ops compress_qat_ops = {
 
 	/* Compression related operations */
 	.private_xform_create	= qat_comp_private_xform_create,
-	.private_xform_free	= qat_comp_private_xform_free
+	.private_xform_free	= qat_comp_private_xform_free,
+	.stream_create		= qat_comp_stream_create,
+	.stream_free		= qat_comp_stream_free
 };
 
 /* An rte_driver is needed in the registration of the device with compressdev.
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
index b8299d43a..6979de14d 100644
--- a/drivers/compress/qat/qat_comp_pmd.h
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -30,6 +30,8 @@ struct qat_comp_dev_private {
 	/**< The device's memory for intermediate buffers */
 	struct rte_mempool *xformpool;
 	/**< The device's pool for qat_comp_xforms */
+	struct rte_mempool *streampool;
+	/**< The device's pool for qat_comp_streams */
 };
 
 int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 3/4] test/compress: add stateful decompression tests
  2019-08-26  7:44 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
  2019-08-26  7:44 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-08-26  7:45 ` Adam Dybkowski
  2019-08-26  7:45 ` [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT Adam Dybkowski
  2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
  3 siblings, 0 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-08-26  7:45 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, arturx.trybula; +Cc: Adam Dybkowski

This patch adds two new tests that cover the stateful
decompression feature.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 app/test/test_compressdev.c | 452 +++++++++++++++++++++++++++++++-----
 1 file changed, 395 insertions(+), 57 deletions(-)

diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c
index 992eac8e0..e92540eee 100644
--- a/app/test/test_compressdev.c
+++ b/app/test/test_compressdev.c
@@ -95,11 +95,15 @@ struct interim_data_params {
 };
 
 struct test_data_params {
-	enum rte_comp_op_type state;
+	enum rte_comp_op_type compress_state;
+	enum rte_comp_op_type decompress_state;
 	enum varied_buff buff_type;
 	enum zlib_direction zlib_dir;
 	unsigned int out_of_space;
 	unsigned int big_data;
+	/* stateful decompression specific parameters */
+	unsigned int decompress_output_block_size;
+	unsigned int decompress_steps_max;
 };
 
 static struct comp_testsuite_params testsuite_params = { 0 };
@@ -237,7 +241,7 @@ generic_ut_setup(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 
 	if (rte_compressdev_configure(0, &config) < 0) {
@@ -275,7 +279,7 @@ test_compressdev_invalid_configuration(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 	struct rte_compressdev_info dev_info;
 
@@ -724,7 +728,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
 	unsigned int num_xforms = int_data->num_xforms;
-	enum rte_comp_op_type state = test_data->state;
+	enum rte_comp_op_type compress_state = test_data->compress_state;
+	enum rte_comp_op_type decompress_state = test_data->decompress_state;
 	unsigned int buff_type = test_data->buff_type;
 	unsigned int out_of_space = test_data->out_of_space;
 	unsigned int big_data = test_data->big_data;
@@ -754,6 +759,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	char *contig_buf = NULL;
 	uint64_t compress_checksum[num_bufs];
+	void *stream = NULL;
+	char *all_decomp_data = NULL;
+	unsigned int decomp_produced_data_size = 0;
+	unsigned int step = 0;
+
+	TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
+		    "Number of stateful operations in a step should be 1");
 
 	if (capa == NULL) {
 		RTE_LOG(ERR, USER1,
@@ -768,6 +780,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
 
+	if (decompress_state == RTE_COMP_OP_STATEFUL) {
+		data_size = strlen(test_bufs[0]) + 1;
+		all_decomp_data = rte_malloc(NULL, data_size,
+					     RTE_CACHE_LINE_SIZE);
+	}
+
 	if (big_data)
 		buf_pool = ts_params->big_mbuf_pool;
 	else if (buff_type == SGL_BOTH)
@@ -859,9 +877,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		ops[i]->src.offset = 0;
 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (compress_state == RTE_COMP_OP_STATELESS)
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
-		} else {
+		else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
 				"in these tests yet\n");
@@ -1047,6 +1065,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1067,6 +1088,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1094,9 +1118,14 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		 * number of bytes that were produced in the previous stage
 		 */
 		ops[i]->src.length = ops_processed[i]->produced;
+
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+			ops[i]->op_type = RTE_COMP_OP_STATELESS;
+		} else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
+			ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
+			ops[i]->op_type = RTE_COMP_OP_STATEFUL;
 		} else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
@@ -1133,33 +1162,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 			ops_processed[i] = ops[i];
 		}
 	} else {
-		/* Create decompress private xform data */
-		for (i = 0; i < num_xforms; i++) {
-			ret = rte_compressdev_private_xform_create(0,
-				(const struct rte_comp_xform *)decompress_xforms[i],
-				&priv_xforms[i]);
-			if (ret < 0) {
-				RTE_LOG(ERR, USER1,
-					"Decompression private xform "
-					"could not be created\n");
-				goto exit;
-			}
-			num_priv_xforms++;
-		}
-
-		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
-			/* Attach shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx %
-								num_xforms;
-				ops[i]->private_xform = priv_xforms[xform_idx];
-			}
-		} else {
-			/* Create rest of the private xforms for the other ops */
-			for (i = num_xforms; i < num_bufs; i++) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
+			/* Create decompress private xform data */
+			for (i = 0; i < num_xforms; i++) {
 				ret = rte_compressdev_private_xform_create(0,
-					decompress_xforms[i % num_xforms],
+					(const struct rte_comp_xform *)
+					decompress_xforms[i],
 					&priv_xforms[i]);
 				if (ret < 0) {
 					RTE_LOG(ERR, USER1,
@@ -1170,14 +1178,60 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				num_priv_xforms++;
 			}
 
-			/* Attach non shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx;
-				ops[i]->private_xform = priv_xforms[xform_idx];
+			if (capa->comp_feature_flags &
+					RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+				/* Attach shareable private xform data to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+					       priv_data->orig_idx % num_xforms;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
+			} else {
+				/* Create rest of the private xforms */
+				/* for the other ops */
+				for (i = num_xforms; i < num_bufs; i++) {
+					ret =
+					 rte_compressdev_private_xform_create(0,
+					      decompress_xforms[i % num_xforms],
+					      &priv_xforms[i]);
+					if (ret < 0) {
+						RTE_LOG(ERR, USER1,
+							"Decompression private xform could not be created\n");
+						goto exit;
+					}
+					num_priv_xforms++;
+				}
+
+				/* Attach non shareable private xform data */
+				/* to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+							priv_data->orig_idx;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
 			}
+		} else {
+			/* Create a stream object for stateful decompression */
+			ret = rte_compressdev_stream_create(0,
+					decompress_xforms[0], &stream);
+			if (ret < 0) {
+				RTE_LOG(ERR, USER1,
+					"Decompression stream could not be created, error %d\n",
+					ret);
+				goto exit;
+			}
+			/* Attach stream to ops */
+			for (i = 0; i < num_bufs; i++)
+				ops[i]->stream = stream;
 		}
 
+next_step:
 		/* Enqueue and dequeue all operations */
 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
 		if (num_enqd < num_bufs) {
@@ -1233,9 +1287,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	for (i = 0; i < num_bufs; i++) {
 		if (out_of_space && oos_zlib_compress) {
 			if (ops_processed[i]->status !=
-					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
+				   RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
 				ret_status = -1;
-
 				RTE_LOG(ERR, USER1,
 					"Operation without expected out of "
 					"space status error\n");
@@ -1244,7 +1297,75 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				continue;
 		}
 
-		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+		if (decompress_state == RTE_COMP_OP_STATEFUL
+			&& (ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
+			    || ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_SUCCESS)) {
+			/* collect the output into all_decomp_data */
+			const void *ptr = rte_pktmbuf_read(
+					ops_processed[i]->m_dst,
+					ops_processed[i]->dst.offset,
+					ops_processed[i]->produced,
+					all_decomp_data +
+						decomp_produced_data_size);
+			if (ptr != all_decomp_data + decomp_produced_data_size)
+				rte_memcpy(all_decomp_data +
+					   decomp_produced_data_size,
+					   ptr, ops_processed[i]->produced);
+			decomp_produced_data_size += ops_processed[i]->produced;
+			if (ops_processed[i]->src.length >
+					ops_processed[i]->consumed) {
+				if (ops_processed[i]->status ==
+						RTE_COMP_OP_STATUS_SUCCESS) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					      "Operation finished too early\n");
+					goto exit;
+				}
+				step++;
+				if (step >= test_data->decompress_steps_max) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					  "Operation exceeded maximum steps\n");
+					goto exit;
+				}
+				ops[i] = ops_processed[i];
+				ops[i]->status =
+					       RTE_COMP_OP_STATUS_NOT_PROCESSED;
+				ops[i]->src.offset +=
+						ops_processed[i]->consumed;
+				ops[i]->src.length -=
+						ops_processed[i]->consumed;
+				goto next_step;
+			} else {
+				/* Compare the original stream with the */
+				/* decompressed stream (in size and the data) */
+				priv_data = (struct priv_op_data *)
+						(ops_processed[i] + 1);
+				const char *buf1 =
+						test_bufs[priv_data->orig_idx];
+				const char *buf2 = all_decomp_data;
+
+				if (compare_buffers(buf1, strlen(buf1) + 1,
+					  buf2, decomp_produced_data_size) < 0)
+					goto exit;
+				/* Test checksums */
+				if (compress_xforms[0]->compress.chksum
+						!= RTE_COMP_CHECKSUM_NONE) {
+					if (ops_processed[i]->output_chksum
+						      != compress_checksum[i]) {
+						RTE_LOG(ERR, USER1,
+							"The checksums differ\n"
+			     "Compression Checksum: %" PRIu64 "\tDecompression "
+				"Checksum: %" PRIu64 "\n", compress_checksum[i],
+					       ops_processed[i]->output_chksum);
+						goto exit;
+					}
+				}
+			}
+		} else if (ops_processed[i]->status !=
+			   RTE_COMP_OP_STATUS_SUCCESS) {
 			RTE_LOG(ERR, USER1,
 				"Some operations were not successful\n");
 			goto exit;
@@ -1254,7 +1375,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		comp_bufs[priv_data->orig_idx] = NULL;
 	}
 
-	if (out_of_space && oos_zlib_compress) {
+	if ((out_of_space && oos_zlib_compress)
+			|| (decompress_state == RTE_COMP_OP_STATEFUL)) {
 		ret_status = 0;
 		goto exit;
 	}
@@ -1307,10 +1429,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_comp_op_free(ops[i]);
 		rte_comp_op_free(ops_processed[i]);
 	}
-	for (i = 0; i < num_priv_xforms; i++) {
+	for (i = 0; i < num_priv_xforms; i++)
 		if (priv_xforms[i] != NULL)
 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
-	}
+	if (stream != NULL)
+		rte_compressdev_stream_free(0, stream);
+	if (all_decomp_data != NULL)
+		rte_free(all_decomp_data);
 	rte_free(contig_buf);
 
 	return ret_status;
@@ -1354,10 +1479,13 @@ test_compressdev_deflate_stateless_fixed(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1425,10 +1553,13 @@ test_compressdev_deflate_stateless_dynamic(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1479,10 +1610,13 @@ test_compressdev_deflate_stateless_multi_op(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1529,10 +1663,13 @@ test_compressdev_deflate_stateless_multi_level(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1619,10 +1756,13 @@ test_compressdev_deflate_stateless_multi_xform(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1665,10 +1805,13 @@ test_compressdev_deflate_stateless_sgl(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1773,10 +1916,13 @@ test_compressdev_deflate_stateless_checksum(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1876,7 +2022,7 @@ test_compressdev_out_of_space_buffer(void)
 	uint16_t i;
 	const struct rte_compressdev_capabilities *capab;
 
-	RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
+	RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
 
 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
@@ -1884,16 +2030,6 @@ test_compressdev_out_of_space_buffer(void)
 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
 		return -ENOTSUP;
 
-	struct rte_comp_xform *compress_xform =
-			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
-
-	if (compress_xform == NULL) {
-		RTE_LOG(ERR, USER1,
-			"Compress xform could not be created\n");
-		ret = TEST_FAILED;
-		goto exit;
-	}
-
 	struct interim_data_params int_data = {
 		&compress_test_bufs[0],
 		1,
@@ -1904,10 +2040,13 @@ test_compressdev_out_of_space_buffer(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		1,  /* run out-of-space test */
+		0,
+		0,
 		0
 	};
 	/* Compress with compressdev, decompress with Zlib */
@@ -1945,7 +2084,6 @@ test_compressdev_out_of_space_buffer(void)
 	ret  = TEST_SUCCESS;
 
 exit:
-	rte_free(compress_xform);
 	return ret;
 }
 
@@ -1985,11 +2123,14 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
-		1
+		1,
+		0,
+		0
 	};
 
 	ts_params->def_comp_xform->compress.deflate.huffman =
@@ -2022,6 +2163,199 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	return ret;
 }
 
+static int
+test_compressdev_deflate_stateful_decomp(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&ts_params->def_comp_xform,
+		&ts_params->def_decomp_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Compress with Zlib, decompress with compressdev */
+	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+		ret = TEST_FAILED;
+		goto exit;
+	}
+
+	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+		/* Now test with SGL buffers */
+		test_data.buff_type = SGL_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	return ret;
+}
+
+static int
+test_compressdev_deflate_stateful_decomp_checksum(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	/* Check if driver supports any checksum */
+	if (!(capab->comp_feature_flags &
+	     (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
+	      RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
+		return -ENOTSUP;
+
+	struct rte_comp_xform *compress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (compress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
+		return TEST_FAILED;
+	}
+
+	memcpy(compress_xform, ts_params->def_comp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct rte_comp_xform *decompress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (decompress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
+		rte_free(compress_xform);
+		return TEST_FAILED;
+	}
+
+	memcpy(decompress_xform, ts_params->def_decomp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&compress_xform,
+		&decompress_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Check if driver supports crc32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports adler32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports combined crc and adler checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		decompress_xform->decompress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		/* Zlib doesn't support combined checksum */
+		test_data.zlib_dir = ZLIB_NONE;
+		/* Compress stateless, decompress stateful with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	rte_free(compress_xform);
+	rte_free(decompress_xform);
+	return ret;
+}
 
 static struct unit_test_suite compressdev_testsuite  = {
 	.suite_name = "compressdev unit test suite",
@@ -2048,6 +2382,10 @@ static struct unit_test_suite compressdev_testsuite  = {
 			test_compressdev_deflate_stateless_checksum),
 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
 			test_compressdev_out_of_space_buffer),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp_checksum),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT
  2019-08-26  7:44 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
  2019-08-26  7:44 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski
  2019-08-26  7:45 ` [dpdk-dev] [PATCH 3/4] test/compress: add stateful decompression tests Adam Dybkowski
@ 2019-08-26  7:45 ` Adam Dybkowski
  2019-09-19 13:34   ` Akhil Goyal
  2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
  3 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-08-26  7:45 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, arturx.trybula; +Cc: Adam Dybkowski

This patch adds the information about stateful decompression
feature in QAT PMD.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 doc/guides/compressdevs/features/default.ini | 37 ++++++++++----------
 doc/guides/compressdevs/features/qat.ini     | 21 +++++------
 doc/guides/compressdevs/qat_comp.rst         |  5 +++
 doc/guides/rel_notes/release_19_11.rst       |  4 +++
 4 files changed, 39 insertions(+), 28 deletions(-)

diff --git a/doc/guides/compressdevs/features/default.ini b/doc/guides/compressdevs/features/default.ini
index 829e4df61..5b783b842 100644
--- a/doc/guides/compressdevs/features/default.ini
+++ b/doc/guides/compressdevs/features/default.ini
@@ -6,21 +6,22 @@
 ; the features table in the documentation.
 ;
 [Features]
-HW Accelerated      =
-CPU SSE             =
-CPU AVX             =
-CPU AVX2            =
-CPU AVX512          =
-CPU NEON            =
-Stateful            =
-Pass-through        =
-OOP SGL In SGL Out  =
-OOP SGL In LB  Out  =
-OOP LB  In SGL Out  =
-Deflate             =
-LZS                 =
-Adler32             =
-Crc32               =
-Adler32&Crc32       =
-Fixed               =
-Dynamic             =
+HW Accelerated         =
+Stateful Decompression =
+CPU SSE                =
+CPU AVX                =
+CPU AVX2               =
+CPU AVX512             =
+CPU NEON               =
+Stateful               =
+Pass-through           =
+OOP SGL In SGL Out     =
+OOP SGL In LB  Out     =
+OOP LB  In SGL Out     =
+Deflate                =
+LZS                    =
+Adler32                =
+Crc32                  =
+Adler32&Crc32          =
+Fixed                  =
+Dynamic                =
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
index 6b1e7f935..bced8f9cf 100644
--- a/doc/guides/compressdevs/features/qat.ini
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -4,13 +4,14 @@
 ; Supported features of 'QAT' compression driver.
 ;
 [Features]
-HW Accelerated      = Y
-OOP SGL In SGL Out  = Y
-OOP SGL In LB  Out  = Y
-OOP LB  In SGL Out  = Y
-Deflate             = Y
-Adler32             = Y
-Crc32               = Y
-Adler32&Crc32       = Y
-Fixed               = Y
-Dynamic             = Y
+HW Accelerated         = Y
+Stateful Decompression = Y
+OOP SGL In SGL Out     = Y
+OOP SGL In LB  Out     = Y
+OOP LB  In SGL Out     = Y
+Deflate                = Y
+Adler32                = Y
+Crc32                  = Y
+Adler32&Crc32          = Y
+Fixed                  = Y
+Dynamic                = Y
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
index 6f583a460..6421f767c 100644
--- a/doc/guides/compressdevs/qat_comp.rst
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -29,6 +29,10 @@ Checksum generation:
 
     * CRC32, Adler and combined checksum
 
+Stateful operation:
+
+    * Decompression only
+
 Limitations
 -----------
 
@@ -38,6 +42,7 @@ Limitations
 * When using Deflate dynamic huffman encoding for compression, the input size (op.src.length)
   must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file,
   see :ref:`building_qat_config` for more details.
+* Stateful compression is not supported.
 
 
 Installation
diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index 8490d897c..16775f9ac 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -56,6 +56,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
+
+  Added stateful decompression support in the Intel QuickAssist Technology PMD.
+  Please note that stateful compression is not supported.
 
 Removed Items
 -------------
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT
  2019-08-26  7:45 ` [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT Adam Dybkowski
@ 2019-09-19 13:34   ` Akhil Goyal
  2019-09-19 13:38     ` Akhil Goyal
  0 siblings, 1 reply; 22+ messages in thread
From: Akhil Goyal @ 2019-09-19 13:34 UTC (permalink / raw)
  To: Adam Dybkowski, dev, fiona.trahe, arturx.trybula

Hi Adam,
> 
> This patch adds the information about stateful decompression
> feature in QAT PMD.
> 
Could you please squash this documentation patch with the patch where the
Functionality is added. If multiple functionality/patches split this patch and merge
In the relevant feature patch.

> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
> ---
>  doc/guides/compressdevs/features/default.ini | 37 ++++++++++----------
>  doc/guides/compressdevs/features/qat.ini     | 21 +++++------
>  doc/guides/compressdevs/qat_comp.rst         |  5 +++
>  doc/guides/rel_notes/release_19_11.rst       |  4 +++
>  4 files changed, 39 insertions(+), 28 deletions(-)
> 
> diff --git a/doc/guides/compressdevs/features/default.ini
> b/doc/guides/compressdevs/features/default.ini
> index 829e4df61..5b783b842 100644
> --- a/doc/guides/compressdevs/features/default.ini
> +++ b/doc/guides/compressdevs/features/default.ini
> @@ -6,21 +6,22 @@
>  ; the features table in the documentation.
>  ;
>  [Features]
> -HW Accelerated      =
> -CPU SSE             =
> -CPU AVX             =
> -CPU AVX2            =
> -CPU AVX512          =
> -CPU NEON            =
> -Stateful            =
> -Pass-through        =
> -OOP SGL In SGL Out  =
> -OOP SGL In LB  Out  =
> -OOP LB  In SGL Out  =
> -Deflate             =
> -LZS                 =
> -Adler32             =
> -Crc32               =
> -Adler32&Crc32       =
> -Fixed               =
> -Dynamic             =
> +HW Accelerated         =
> +Stateful Decompression =
> +CPU SSE                =
> +CPU AVX                =
> +CPU AVX2               =
> +CPU AVX512             =
> +CPU NEON               =
> +Stateful               =
> +Pass-through           =
> +OOP SGL In SGL Out     =
> +OOP SGL In LB  Out     =
> +OOP LB  In SGL Out     =
> +Deflate                =
> +LZS                    =
> +Adler32                =
> +Crc32                  =
> +Adler32&Crc32          =
> +Fixed                  =
> +Dynamic                =
> diff --git a/doc/guides/compressdevs/features/qat.ini
> b/doc/guides/compressdevs/features/qat.ini
> index 6b1e7f935..bced8f9cf 100644
> --- a/doc/guides/compressdevs/features/qat.ini
> +++ b/doc/guides/compressdevs/features/qat.ini
> @@ -4,13 +4,14 @@
>  ; Supported features of 'QAT' compression driver.
>  ;
>  [Features]
> -HW Accelerated      = Y
> -OOP SGL In SGL Out  = Y
> -OOP SGL In LB  Out  = Y
> -OOP LB  In SGL Out  = Y
> -Deflate             = Y
> -Adler32             = Y
> -Crc32               = Y
> -Adler32&Crc32       = Y
> -Fixed               = Y
> -Dynamic             = Y
> +HW Accelerated         = Y
> +Stateful Decompression = Y
> +OOP SGL In SGL Out     = Y
> +OOP SGL In LB  Out     = Y
> +OOP LB  In SGL Out     = Y
> +Deflate                = Y
> +Adler32                = Y
> +Crc32                  = Y
> +Adler32&Crc32          = Y
> +Fixed                  = Y
> +Dynamic                = Y
> diff --git a/doc/guides/compressdevs/qat_comp.rst
> b/doc/guides/compressdevs/qat_comp.rst
> index 6f583a460..6421f767c 100644
> --- a/doc/guides/compressdevs/qat_comp.rst
> +++ b/doc/guides/compressdevs/qat_comp.rst
> @@ -29,6 +29,10 @@ Checksum generation:
> 
>      * CRC32, Adler and combined checksum
> 
> +Stateful operation:
> +
> +    * Decompression only
> +
>  Limitations
>  -----------
> 
> @@ -38,6 +42,7 @@ Limitations
>  * When using Deflate dynamic huffman encoding for compression, the input
> size (op.src.length)
>    must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config
> file,
>    see :ref:`building_qat_config` for more details.
> +* Stateful compression is not supported.
> 
> 
>  Installation
> diff --git a/doc/guides/rel_notes/release_19_11.rst
> b/doc/guides/rel_notes/release_19_11.rst
> index 8490d897c..16775f9ac 100644
> --- a/doc/guides/rel_notes/release_19_11.rst
> +++ b/doc/guides/rel_notes/release_19_11.rst
> @@ -56,6 +56,10 @@ New Features
>       Also, make sure to start the actual text at the margin.
>       =========================================================
> 
> +* **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
> +
> +  Added stateful decompression support in the Intel QuickAssist Technology
> PMD.
> +  Please note that stateful compression is not supported.
> 
>  Removed Items
>  -------------
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT
  2019-09-19 13:34   ` Akhil Goyal
@ 2019-09-19 13:38     ` Akhil Goyal
  0 siblings, 0 replies; 22+ messages in thread
From: Akhil Goyal @ 2019-09-19 13:38 UTC (permalink / raw)
  To: Adam Dybkowski, dev, fiona.trahe, arturx.trybula

Hi Fiona,

If there are no more changes expected in this patchset. Could you please Ack it?
I will squash the documentation patch myself with 2/4 patch of this set.
> 
> Hi Adam,
> >
> > This patch adds the information about stateful decompression
> > feature in QAT PMD.
> >
> Could you please squash this documentation patch with the patch where the
> Functionality is added. If multiple functionality/patches split this patch and
> merge
> In the relevant feature patch.
> 
> > Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
> > ---
> >  doc/guides/compressdevs/features/default.ini | 37 ++++++++++----------
> >  doc/guides/compressdevs/features/qat.ini     | 21 +++++------
> >  doc/guides/compressdevs/qat_comp.rst         |  5 +++
> >  doc/guides/rel_notes/release_19_11.rst       |  4 +++
> >  4 files changed, 39 insertions(+), 28 deletions(-)
> >
> > diff --git a/doc/guides/compressdevs/features/default.ini
> > b/doc/guides/compressdevs/features/default.ini
> > index 829e4df61..5b783b842 100644
> > --- a/doc/guides/compressdevs/features/default.ini
> > +++ b/doc/guides/compressdevs/features/default.ini
> > @@ -6,21 +6,22 @@
> >  ; the features table in the documentation.
> >  ;
> >  [Features]
> > -HW Accelerated      =
> > -CPU SSE             =
> > -CPU AVX             =
> > -CPU AVX2            =
> > -CPU AVX512          =
> > -CPU NEON            =
> > -Stateful            =
> > -Pass-through        =
> > -OOP SGL In SGL Out  =
> > -OOP SGL In LB  Out  =
> > -OOP LB  In SGL Out  =
> > -Deflate             =
> > -LZS                 =
> > -Adler32             =
> > -Crc32               =
> > -Adler32&Crc32       =
> > -Fixed               =
> > -Dynamic             =
> > +HW Accelerated         =
> > +Stateful Decompression =
> > +CPU SSE                =
> > +CPU AVX                =
> > +CPU AVX2               =
> > +CPU AVX512             =
> > +CPU NEON               =
> > +Stateful               =
> > +Pass-through           =
> > +OOP SGL In SGL Out     =
> > +OOP SGL In LB  Out     =
> > +OOP LB  In SGL Out     =
> > +Deflate                =
> > +LZS                    =
> > +Adler32                =
> > +Crc32                  =
> > +Adler32&Crc32          =
> > +Fixed                  =
> > +Dynamic                =
> > diff --git a/doc/guides/compressdevs/features/qat.ini
> > b/doc/guides/compressdevs/features/qat.ini
> > index 6b1e7f935..bced8f9cf 100644
> > --- a/doc/guides/compressdevs/features/qat.ini
> > +++ b/doc/guides/compressdevs/features/qat.ini
> > @@ -4,13 +4,14 @@
> >  ; Supported features of 'QAT' compression driver.
> >  ;
> >  [Features]
> > -HW Accelerated      = Y
> > -OOP SGL In SGL Out  = Y
> > -OOP SGL In LB  Out  = Y
> > -OOP LB  In SGL Out  = Y
> > -Deflate             = Y
> > -Adler32             = Y
> > -Crc32               = Y
> > -Adler32&Crc32       = Y
> > -Fixed               = Y
> > -Dynamic             = Y
> > +HW Accelerated         = Y
> > +Stateful Decompression = Y
> > +OOP SGL In SGL Out     = Y
> > +OOP SGL In LB  Out     = Y
> > +OOP LB  In SGL Out     = Y
> > +Deflate                = Y
> > +Adler32                = Y
> > +Crc32                  = Y
> > +Adler32&Crc32          = Y
> > +Fixed                  = Y
> > +Dynamic                = Y
> > diff --git a/doc/guides/compressdevs/qat_comp.rst
> > b/doc/guides/compressdevs/qat_comp.rst
> > index 6f583a460..6421f767c 100644
> > --- a/doc/guides/compressdevs/qat_comp.rst
> > +++ b/doc/guides/compressdevs/qat_comp.rst
> > @@ -29,6 +29,10 @@ Checksum generation:
> >
> >      * CRC32, Adler and combined checksum
> >
> > +Stateful operation:
> > +
> > +    * Decompression only
> > +
> >  Limitations
> >  -----------
> >
> > @@ -38,6 +42,7 @@ Limitations
> >  * When using Deflate dynamic huffman encoding for compression, the input
> > size (op.src.length)
> >    must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config
> > file,
> >    see :ref:`building_qat_config` for more details.
> > +* Stateful compression is not supported.
> >
> >
> >  Installation
> > diff --git a/doc/guides/rel_notes/release_19_11.rst
> > b/doc/guides/rel_notes/release_19_11.rst
> > index 8490d897c..16775f9ac 100644
> > --- a/doc/guides/rel_notes/release_19_11.rst
> > +++ b/doc/guides/rel_notes/release_19_11.rst
> > @@ -56,6 +56,10 @@ New Features
> >       Also, make sure to start the actual text at the margin.
> >       =========================================================
> >
> > +* **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
> > +
> > +  Added stateful decompression support in the Intel QuickAssist Technology
> > PMD.
> > +  Please note that stateful compression is not supported.
> >
> >  Removed Items
> >  -------------
> > --
> > 2.17.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression
  2019-08-26  7:44 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-09-20 10:09   ` Trahe, Fiona
  0 siblings, 0 replies; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-20 10:09 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, akhil.goyal, Trybula, ArturX; +Cc: Trahe, Fiona



> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Monday, August 26, 2019 8:45 AM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; akhil.goyal@nxp.com; Trybula, ArturX
> <arturx.trybula@intel.com>
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH 2/4] compress/qat: add stateful decompression
> 
> This patch adds the stateful decompression feature
> to the DPDK QAT PMD.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
> ---
>  drivers/compress/qat/qat_comp.c     | 256 +++++++++++++++++++++++++---
>  drivers/compress/qat/qat_comp.h     |  32 ++++
>  drivers/compress/qat/qat_comp_pmd.c | 166 ++++++++++++++++--
>  drivers/compress/qat/qat_comp_pmd.h |   2 +
>  4 files changed, 423 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
> index 835aaa838..a80cd6864 100644
> --- a/drivers/compress/qat/qat_comp.c
> +++ b/drivers/compress/qat/qat_comp.c
> @@ -27,22 +27,51 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
>  	struct rte_comp_op *op = in_op;
>  	struct qat_comp_op_cookie *cookie =
>  			(struct qat_comp_op_cookie *)op_cookie;
> -	struct qat_comp_xform *qat_xform = op->private_xform;
> -	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
> +	struct qat_comp_stream *stream;
> +	struct qat_comp_xform *qat_xform;
> +	const uint8_t *tmpl;
>  	struct icp_qat_fw_comp_req *comp_req =
>  	    (struct icp_qat_fw_comp_req *)out_msg;
> 
> -	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
> -		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
> -				"operation requests, op (%p) is not a "
> -				"stateless operation.", op);
> -		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
> -		return -EINVAL;
> +	if (op->op_type == RTE_COMP_OP_STATEFUL) {
> +		stream = op->stream;
> +		qat_xform = &stream->qat_xform;
> +		if (unlikely(qat_xform->qat_comp_request_type !=
> +			     QAT_COMP_REQUEST_DECOMPRESS)) {
> +			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
> +			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
> +			return -EINVAL;
> +		}
> +		if (unlikely(stream->op_in_progress)) {
> +			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateless
> operations on the same stream at once");
[Fiona] typo - should be stateful

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression
  2019-08-26  7:44 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
                   ` (2 preceding siblings ...)
  2019-08-26  7:45 ` [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT Adam Dybkowski
@ 2019-09-20 12:44 ` Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
                     ` (2 more replies)
  3 siblings, 3 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 12:44 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patchet adds the stateful decompression feature
to the QAT PMD, together with the documentation updates
and two new unit tests.

Adam Dybkowski (3):
  common/qat: add QAT RAM bank definitions
  compress/qat: add stateful decompression
  test/compress: add stateful decompression tests

 app/test/test_compressdev.c                  | 449 ++++++++++++++++---
 doc/guides/compressdevs/features/default.ini |  37 +-
 doc/guides/compressdevs/features/qat.ini     |  21 +-
 doc/guides/compressdevs/qat_comp.rst         |   5 +
 doc/guides/rel_notes/release_19_11.rst       |   4 +
 drivers/common/qat/qat_adf/icp_qat_fw_comp.h |  73 +++
 drivers/compress/qat/qat_comp.c              | 256 ++++++++++-
 drivers/compress/qat/qat_comp.h              |  32 ++
 drivers/compress/qat/qat_comp_pmd.c          | 166 ++++++-
 drivers/compress/qat/qat_comp_pmd.h          |   2 +
 10 files changed, 929 insertions(+), 116 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions
  2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-09-20 12:44   ` Adam Dybkowski
  2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 2/3] " Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 3/3] test/compress: add stateful decompression tests Adam Dybkowski
  2 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 12:44 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds QAT RAM bank definitions and related macros.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 73 ++++++++++++++++++++
 1 file changed, 73 insertions(+)

diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
index 813817720..c89a2c2fd 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -479,4 +479,77 @@ struct icp_qat_fw_comp_resp {
 	/**< Common response params (checksums and byte counts) */
 };
 
+/* RAM Bank definitions */
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *      Definition of the ram bank enabled values
+ * @description
+ *      Enumeration used to define whether a ram bank is enabled or not
+ *
+ *****************************************************************************/
+enum icp_qat_fw_comp_bank_enabled {
+	ICP_QAT_FW_COMP_BANK_DISABLED = 0, /*!< BANK DISABLED */
+	ICP_QAT_FW_COMP_BANK_ENABLED = 1,  /*!< BANK ENABLED */
+	ICP_QAT_FW_COMP_BANK_DELIMITER = 2 /**< Delimiter type */
+};
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *
+ * @description
+ *      Build the ram bank flags in the compression content descriptor
+ *      which specify which banks are used to save history
+ *
+ * @param bank_i_enable
+ * @param bank_h_enable
+ * @param bank_g_enable
+ * @param bank_f_enable
+ * @param bank_e_enable
+ * @param bank_d_enable
+ * @param bank_c_enable
+ * @param bank_b_enable
+ * @param bank_a_enable
+ *****************************************************************************/
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable,                         \
+					bank_h_enable,                         \
+					bank_g_enable,                         \
+					bank_f_enable,                         \
+					bank_e_enable,                         \
+					bank_d_enable,                         \
+					bank_c_enable,                         \
+					bank_b_enable,                         \
+					bank_a_enable)                         \
+	((((bank_i_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                         \
+		<< QAT_FW_COMP_BANK_I_BITPOS) |                                \
+	(((bank_h_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_H_BITPOS) |                                \
+	(((bank_g_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_G_BITPOS) |                                \
+	(((bank_f_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_F_BITPOS) |                                \
+	(((bank_e_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_E_BITPOS) |                                \
+	(((bank_d_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_D_BITPOS) |                                \
+	(((bank_c_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_C_BITPOS) |                                \
+	(((bank_b_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_B_BITPOS) |                                \
+	(((bank_a_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_A_BITPOS))
+
 #endif
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] compress/qat: add stateful decompression
  2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-09-20 12:44   ` Adam Dybkowski
  2019-09-23  9:46     ` Trahe, Fiona
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 3/3] test/compress: add stateful decompression tests Adam Dybkowski
  2 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 12:44 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds the stateful decompression feature
to the DPDK QAT PMD.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 doc/guides/compressdevs/features/default.ini |  37 +--
 doc/guides/compressdevs/features/qat.ini     |  21 +-
 doc/guides/compressdevs/qat_comp.rst         |   5 +
 doc/guides/rel_notes/release_19_11.rst       |   4 +
 drivers/compress/qat/qat_comp.c              | 256 +++++++++++++++++--
 drivers/compress/qat/qat_comp.h              |  32 +++
 drivers/compress/qat/qat_comp_pmd.c          | 166 +++++++++++-
 drivers/compress/qat/qat_comp_pmd.h          |   2 +
 8 files changed, 462 insertions(+), 61 deletions(-)

diff --git a/doc/guides/compressdevs/features/default.ini b/doc/guides/compressdevs/features/default.ini
index 829e4df61..5b783b842 100644
--- a/doc/guides/compressdevs/features/default.ini
+++ b/doc/guides/compressdevs/features/default.ini
@@ -6,21 +6,22 @@
 ; the features table in the documentation.
 ;
 [Features]
-HW Accelerated      =
-CPU SSE             =
-CPU AVX             =
-CPU AVX2            =
-CPU AVX512          =
-CPU NEON            =
-Stateful            =
-Pass-through        =
-OOP SGL In SGL Out  =
-OOP SGL In LB  Out  =
-OOP LB  In SGL Out  =
-Deflate             =
-LZS                 =
-Adler32             =
-Crc32               =
-Adler32&Crc32       =
-Fixed               =
-Dynamic             =
+HW Accelerated         =
+Stateful Decompression =
+CPU SSE                =
+CPU AVX                =
+CPU AVX2               =
+CPU AVX512             =
+CPU NEON               =
+Stateful               =
+Pass-through           =
+OOP SGL In SGL Out     =
+OOP SGL In LB  Out     =
+OOP LB  In SGL Out     =
+Deflate                =
+LZS                    =
+Adler32                =
+Crc32                  =
+Adler32&Crc32          =
+Fixed                  =
+Dynamic                =
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
index 6b1e7f935..bced8f9cf 100644
--- a/doc/guides/compressdevs/features/qat.ini
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -4,13 +4,14 @@
 ; Supported features of 'QAT' compression driver.
 ;
 [Features]
-HW Accelerated      = Y
-OOP SGL In SGL Out  = Y
-OOP SGL In LB  Out  = Y
-OOP LB  In SGL Out  = Y
-Deflate             = Y
-Adler32             = Y
-Crc32               = Y
-Adler32&Crc32       = Y
-Fixed               = Y
-Dynamic             = Y
+HW Accelerated         = Y
+Stateful Decompression = Y
+OOP SGL In SGL Out     = Y
+OOP SGL In LB  Out     = Y
+OOP LB  In SGL Out     = Y
+Deflate                = Y
+Adler32                = Y
+Crc32                  = Y
+Adler32&Crc32          = Y
+Fixed                  = Y
+Dynamic                = Y
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
index 6f583a460..6421f767c 100644
--- a/doc/guides/compressdevs/qat_comp.rst
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -29,6 +29,10 @@ Checksum generation:
 
     * CRC32, Adler and combined checksum
 
+Stateful operation:
+
+    * Decompression only
+
 Limitations
 -----------
 
@@ -38,6 +42,7 @@ Limitations
 * When using Deflate dynamic huffman encoding for compression, the input size (op.src.length)
   must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file,
   see :ref:`building_qat_config` for more details.
+* Stateful compression is not supported.
 
 
 Installation
diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index 27cfbd9e3..573683da4 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -56,6 +56,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
+
+  Added stateful decompression support in the Intel QuickAssist Technology PMD.
+  Please note that stateful compression is not supported.
 
 Removed Items
 -------------
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index 835aaa838..8717b7432 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -27,22 +27,51 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 	struct rte_comp_op *op = in_op;
 	struct qat_comp_op_cookie *cookie =
 			(struct qat_comp_op_cookie *)op_cookie;
-	struct qat_comp_xform *qat_xform = op->private_xform;
-	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
+	const uint8_t *tmpl;
 	struct icp_qat_fw_comp_req *comp_req =
 	    (struct icp_qat_fw_comp_req *)out_msg;
 
-	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
-				"operation requests, op (%p) is not a "
-				"stateless operation.", op);
-		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
-		return -EINVAL;
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = op->stream;
+		qat_xform = &stream->qat_xform;
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
+			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			return -EINVAL;
+		}
+		if (unlikely(stream->op_in_progress)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
+			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
+			return -EINVAL;
+		}
+		stream->op_in_progress = 1;
+	} else {
+		stream = NULL;
+		qat_xform = op->private_xform;
 	}
+	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
 
 	rte_mov128(out_msg, tmpl);
 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comp_pars.req_par_flags =
+			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+				(stream->start_of_packet) ?
+					ICP_QAT_FW_COMP_SOP
+				      : ICP_QAT_FW_COMP_NOT_SOP,
+				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
+				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
+					ICP_QAT_FW_COMP_EOP
+				      : ICP_QAT_FW_COMP_NOT_EOP,
+				ICP_QAT_FW_COMP_NOT_BFINAL,
+				ICP_QAT_FW_COMP_NO_CNV,
+				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+	}
+
 	if (likely(qat_xform->qat_comp_request_type ==
 		    QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
@@ -94,6 +123,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_src->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -111,6 +143,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -129,6 +164,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_dst->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -146,6 +184,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -202,12 +243,22 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 			(struct qat_comp_op_cookie *)op_cookie;
 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
 			(resp_msg->opaque_data);
-	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
-				(rx_op->private_xform);
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
 	int err = resp_msg->comn_resp.comn_status &
 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
 
+	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = rx_op->stream;
+		qat_xform = &stream->qat_xform;
+		/* clear op-in-progress flag */
+		stream->op_in_progress = 0;
+	} else {
+		stream = NULL;
+		qat_xform = rx_op->private_xform;
+	}
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 	QAT_DP_LOG(DEBUG, "Direction: %s",
 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
@@ -254,7 +305,21 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		int8_t xlat_err_code =
 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
 
-		if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
+		/* handle recoverable out-of-buffer condition in stateful */
+		/* decompression scenario */
+		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
+				&& qat_xform->qat_comp_request_type
+					== QAT_COMP_REQUEST_DECOMPRESS
+				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+			struct icp_qat_fw_resp_comp_pars *comp_resp =
+					&resp_msg->comp_resp_pars;
+			rx_op->status =
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+			rx_op->consumed = comp_resp->input_byte_counter;
+			rx_op->produced = comp_resp->output_byte_counter;
+			stream->start_of_packet = 0;
+		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
+			  && !xlat_err_code)
 				||
 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
 				||
@@ -275,6 +340,8 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
 		rx_op->consumed = comp_resp->input_byte_counter;
 		rx_op->produced = comp_resp->output_byte_counter;
+		if (stream)
+			stream->start_of_packet = 0;
 
 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
@@ -297,6 +364,12 @@ qat_comp_xform_size(void)
 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
 }
 
+unsigned int
+qat_comp_stream_size(void)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
+}
+
 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 				    enum qat_comp_request_type request)
 {
@@ -317,7 +390,9 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 
 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 			const struct rte_memzone *interm_buff_mz,
-			const struct rte_comp_xform *xform)
+			const struct rte_comp_xform *xform,
+			const struct qat_comp_stream *stream,
+			enum rte_comp_op_type op_type)
 {
 	struct icp_qat_fw_comp_req *comp_req;
 	int comp_level, algo;
@@ -329,6 +404,18 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 		return -EINVAL;
 	}
 
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		if (unlikely(stream == NULL)) {
+			QAT_LOG(ERR, "Stream must be non null for stateful op");
+			return -EINVAL;
+		}
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
+			return -ENOTSUP;
+		}
+	}
+
 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
@@ -376,12 +463,43 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
 					qat_xform->qat_comp_request_type);
 
-	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
-	    ICP_QAT_FW_COMP_STATELESS_SESSION,
-	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATEFUL_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+		/* Decompression state registers */
+		comp_req->comp_cd_ctrl.comp_state_addr =
+				stream->state_registers_decomp_phys;
+
+		/* Enable A, B, C, D, and E (CAMs). */
+		comp_req->comp_cd_ctrl.ram_bank_flags =
+			ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
+				ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
+
+		comp_req->comp_cd_ctrl.ram_banks_addr =
+				stream->inflate_context_phys;
+	} else {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATELESS_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	}
 
 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
@@ -497,7 +615,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
 		qat_xform->checksum_type = xform->decompress.chksum;
 	}
 
-	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
+				      NULL, RTE_COMP_OP_STATELESS)) {
 		QAT_LOG(ERR, "QAT: Problem with setting compression");
 		return -EINVAL;
 	}
@@ -532,3 +651,102 @@ qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
 	}
 	return -EINVAL;
 }
+
+/**
+ * Reset stream state for the next use.
+ *
+ * @param stream
+ *   handle of pmd's private stream data
+ */
+static void
+qat_comp_stream_reset(struct qat_comp_stream *stream)
+{
+	if (stream) {
+		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
+		stream->start_of_packet = 1;
+		stream->op_in_progress = 0;
+	}
+}
+
+/**
+ * Create driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param xform
+ *   xform data
+ * @param stream
+ *   ptr where handle of pmd's private stream data should be stored
+ * @return
+ *  - Returns 0 if private stream structure has been created successfully.
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -ENOTSUP if comp device does not support the comp transform.
+ *  - Returns -ENOMEM if the private stream could not be allocated.
+ */
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream)
+{
+	struct qat_comp_dev_private *qat = dev->data->dev_private;
+	struct qat_comp_stream *ptr;
+
+	if (unlikely(stream == NULL)) {
+		QAT_LOG(ERR, "QAT: stream parameter is NULL");
+		return -EINVAL;
+	}
+	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
+		QAT_LOG(ERR, "QAT: stateful compression not supported");
+		return -ENOTSUP;
+	}
+	if (unlikely(qat->streampool == NULL)) {
+		QAT_LOG(ERR, "QAT device has no stream mempool");
+		return -ENOMEM;
+	}
+	if (rte_mempool_get(qat->streampool, stream)) {
+		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
+		return -ENOMEM;
+	}
+
+	ptr = (struct qat_comp_stream *) *stream;
+	qat_comp_stream_reset(ptr);
+	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+	ptr->qat_xform.checksum_type = xform->decompress.chksum;
+
+	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
+				      xform, ptr, RTE_COMP_OP_STATEFUL)) {
+		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
+		rte_mempool_put(qat->streampool, *stream);
+		*stream = NULL;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Free driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param stream
+ *   handle of pmd's private stream data
+ * @return
+ *  - 0 if successful
+ *  - <0 in error cases
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
+{
+	if (stream) {
+		struct qat_comp_dev_private *qat = dev->data->dev_private;
+		qat_comp_stream_reset((struct qat_comp_stream *) stream);
+		rte_mempool_put(qat->streampool, stream);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 61d12ecf4..2231451a1 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -26,6 +26,16 @@
 
 #define QAT_MIN_OUT_BUF_SIZE 46
 
+/* maximum size of the state registers */
+#define QAT_STATE_REGISTERS_MAX_SIZE 64
+
+/* decompressor context size */
+#define QAT_INFLATE_CONTEXT_SIZE_GEN1 36864
+#define QAT_INFLATE_CONTEXT_SIZE_GEN2 34032
+#define QAT_INFLATE_CONTEXT_SIZE_GEN3 34032
+#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(QAT_INFLATE_CONTEXT_SIZE_GEN1,\
+		QAT_INFLATE_CONTEXT_SIZE_GEN2), QAT_INFLATE_CONTEXT_SIZE_GEN3)
+
 enum qat_comp_request_type {
 	QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
 	QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
@@ -61,6 +71,17 @@ struct qat_comp_xform {
 	enum rte_comp_checksum_type checksum_type;
 };
 
+struct qat_comp_stream {
+	struct qat_comp_xform qat_xform;
+	void *state_registers_decomp;
+	phys_addr_t state_registers_decomp_phys;
+	void *inflate_context;
+	phys_addr_t inflate_context_phys;
+	const struct rte_memzone *memzone;
+	uint8_t start_of_packet;
+	volatile uint8_t op_in_progress;
+};
+
 int
 qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		       enum qat_device_gen qat_dev_gen __rte_unused);
@@ -80,5 +101,16 @@ qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
 unsigned int
 qat_comp_xform_size(void);
 
+unsigned int
+qat_comp_stream_size(void);
+
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream);
+
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+
 #endif
 #endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 072647217..05b7dfe77 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -9,6 +9,12 @@
 
 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
 
+struct stream_create_info {
+	struct qat_comp_dev_private *comp_dev;
+	int socket_id;
+	int error;
+};
+
 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 	{/* COMPRESSION - deflate */
 	 .algo = RTE_COMP_ALGO_DEFLATE,
@@ -21,7 +27,8 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 				RTE_COMP_FF_HUFFMAN_DYNAMIC |
 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
 				RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
-				RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+				RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+				RTE_COMP_FF_STATEFUL_DECOMPRESSION,
 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
 	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
 
@@ -315,6 +322,120 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
 	return mp;
 }
 
+static void
+qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
+		     void *obj, unsigned int obj_idx)
+{
+	struct stream_create_info *info = opaque;
+	struct qat_comp_stream *stream = obj;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *memzone;
+	struct qat_inter_sgl *ram_banks_desc;
+
+	/* find a memzone for RAM banks */
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
+		 info->comp_dev->qat_dev->name, obj_idx);
+	memzone = rte_memzone_lookup(mz_name);
+	if (memzone == NULL) {
+		/* allocate a memzone for compression state and RAM banks */
+		memzone = rte_memzone_reserve_aligned(mz_name,
+			QAT_STATE_REGISTERS_MAX_SIZE
+				+ sizeof(struct qat_inter_sgl)
+				+ QAT_INFLATE_CONTEXT_SIZE,
+			info->socket_id,
+			RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
+		if (memzone == NULL) {
+			QAT_LOG(ERR,
+			    "Can't allocate RAM banks for device %s, object %u",
+				info->comp_dev->qat_dev->name, obj_idx);
+			info->error = -ENOMEM;
+			return;
+		}
+	}
+
+	/* prepare the buffer list descriptor for RAM banks */
+	ram_banks_desc = (struct qat_inter_sgl *)
+		(((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
+	ram_banks_desc->num_bufs = 1;
+	ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
+	ram_banks_desc->buffers[0].addr = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE
+			+ sizeof(struct qat_inter_sgl);
+
+	memset(stream, 0, qat_comp_stream_size());
+	stream->memzone = memzone;
+	stream->state_registers_decomp = memzone->addr;
+	stream->state_registers_decomp_phys = memzone->iova;
+	stream->inflate_context = ((uint8_t *) memzone->addr)
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+	stream->inflate_context_phys = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+}
+
+static void
+qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
+			void *opaque __rte_unused, void *obj,
+			unsigned obj_idx __rte_unused)
+{
+	struct qat_comp_stream *stream = obj;
+
+	rte_memzone_free(stream->memzone);
+}
+
+static struct rte_mempool *
+qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+			    int socket_id,
+			    uint32_t num_elements)
+{
+	char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+
+	snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
+		 "%s_streams", comp_dev->qat_dev->name);
+
+	QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
+	mp = rte_mempool_lookup(stream_pool_name);
+
+	if (mp != NULL) {
+		QAT_LOG(DEBUG, "streampool already created");
+		if (mp->size != num_elements) {
+			QAT_LOG(DEBUG, "streampool wrong size - delete it");
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			rte_mempool_free(mp);
+			mp = NULL;
+			comp_dev->streampool = NULL;
+		}
+	}
+
+	if (mp == NULL) {
+		struct stream_create_info info = {
+			.comp_dev = comp_dev,
+			.socket_id = socket_id,
+			.error = 0
+		};
+		mp = rte_mempool_create(stream_pool_name,
+				num_elements,
+				qat_comp_stream_size(), 0, 0,
+				NULL, NULL, qat_comp_stream_init, &info,
+				socket_id, 0);
+		if (mp == NULL) {
+			QAT_LOG(ERR,
+			     "Err creating mempool %s w %d elements of size %d",
+			     stream_pool_name, num_elements,
+			     qat_comp_stream_size());
+		} else if (info.error) {
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			QAT_LOG(ERR,
+			     "Destoying mempool %s as at least one element failed initialisation",
+			     stream_pool_name);
+			rte_mempool_free(mp);
+			mp = NULL;
+		}
+	}
+
+	return mp;
+}
+
 static void
 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 {
@@ -330,6 +451,14 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 		rte_mempool_free(comp_dev->xformpool);
 		comp_dev->xformpool = NULL;
 	}
+
+	/* Free stream pool */
+	if (comp_dev->streampool) {
+		rte_mempool_obj_iter(comp_dev->streampool,
+				     qat_comp_stream_destroy, NULL);
+		rte_mempool_free(comp_dev->streampool);
+		comp_dev->streampool = NULL;
+	}
 }
 
 static int
@@ -339,12 +468,6 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
 	int ret = 0;
 
-	if (config->max_nb_streams != 0) {
-		QAT_LOG(ERR,
-	"QAT device does not support STATEFUL so max_nb_streams must be 0");
-		return -EINVAL;
-	}
-
 	if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
 		QAT_LOG(WARNING,
 			"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
@@ -360,13 +483,26 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 		}
 	}
 
-	comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
-					config->max_nb_priv_xforms);
-	if (comp_dev->xformpool == NULL) {
+	if (config->max_nb_priv_xforms) {
+		comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+					    config, config->max_nb_priv_xforms);
+		if (comp_dev->xformpool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->xformpool = NULL;
+
+	if (config->max_nb_streams) {
+		comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
+				     config->socket_id, config->max_nb_streams);
+		if (comp_dev->streampool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->streampool = NULL;
 
-		ret = -ENOMEM;
-		goto error_out;
-	}
 	return 0;
 
 error_out:
@@ -508,7 +644,9 @@ static struct rte_compressdev_ops compress_qat_ops = {
 
 	/* Compression related operations */
 	.private_xform_create	= qat_comp_private_xform_create,
-	.private_xform_free	= qat_comp_private_xform_free
+	.private_xform_free	= qat_comp_private_xform_free,
+	.stream_create		= qat_comp_stream_create,
+	.stream_free		= qat_comp_stream_free
 };
 
 /* An rte_driver is needed in the registration of the device with compressdev.
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
index b8299d43a..6979de14d 100644
--- a/drivers/compress/qat/qat_comp_pmd.h
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -30,6 +30,8 @@ struct qat_comp_dev_private {
 	/**< The device's memory for intermediate buffers */
 	struct rte_mempool *xformpool;
 	/**< The device's pool for qat_comp_xforms */
+	struct rte_mempool *streampool;
+	/**< The device's pool for qat_comp_streams */
 };
 
 int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] test/compress: add stateful decompression tests
  2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 2/3] " Adam Dybkowski
@ 2019-09-20 12:44   ` Adam Dybkowski
  2 siblings, 0 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 12:44 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds two new tests that cover the stateful
decompression feature.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 app/test/test_compressdev.c | 449 +++++++++++++++++++++++++++++++-----
 1 file changed, 394 insertions(+), 55 deletions(-)

diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c
index 167c48f10..9a7989683 100644
--- a/app/test/test_compressdev.c
+++ b/app/test/test_compressdev.c
@@ -95,11 +95,15 @@ struct interim_data_params {
 };
 
 struct test_data_params {
-	enum rte_comp_op_type state;
+	enum rte_comp_op_type compress_state;
+	enum rte_comp_op_type decompress_state;
 	enum varied_buff buff_type;
 	enum zlib_direction zlib_dir;
 	unsigned int out_of_space;
 	unsigned int big_data;
+	/* stateful decompression specific parameters */
+	unsigned int decompress_output_block_size;
+	unsigned int decompress_steps_max;
 };
 
 static struct comp_testsuite_params testsuite_params = { 0 };
@@ -237,7 +241,7 @@ generic_ut_setup(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 
 	if (rte_compressdev_configure(0, &config) < 0) {
@@ -275,7 +279,7 @@ test_compressdev_invalid_configuration(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 	struct rte_compressdev_info dev_info;
 
@@ -724,7 +728,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
 	unsigned int num_xforms = int_data->num_xforms;
-	enum rte_comp_op_type state = test_data->state;
+	enum rte_comp_op_type compress_state = test_data->compress_state;
+	enum rte_comp_op_type decompress_state = test_data->decompress_state;
 	unsigned int buff_type = test_data->buff_type;
 	unsigned int out_of_space = test_data->out_of_space;
 	unsigned int big_data = test_data->big_data;
@@ -754,6 +759,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	char *contig_buf = NULL;
 	uint64_t compress_checksum[num_bufs];
+	void *stream = NULL;
+	char *all_decomp_data = NULL;
+	unsigned int decomp_produced_data_size = 0;
+	unsigned int step = 0;
+
+	TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
+		    "Number of stateful operations in a step should be 1");
 
 	if (capa == NULL) {
 		RTE_LOG(ERR, USER1,
@@ -768,6 +780,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
 
+	if (decompress_state == RTE_COMP_OP_STATEFUL) {
+		data_size = strlen(test_bufs[0]) + 1;
+		all_decomp_data = rte_malloc(NULL, data_size,
+					     RTE_CACHE_LINE_SIZE);
+	}
+
 	if (big_data)
 		buf_pool = ts_params->big_mbuf_pool;
 	else if (buff_type == SGL_BOTH)
@@ -859,9 +877,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		ops[i]->src.offset = 0;
 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (compress_state == RTE_COMP_OP_STATELESS)
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
-		} else {
+		else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
 				"in these tests yet\n");
@@ -1046,6 +1064,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1066,6 +1087,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1093,9 +1117,14 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		 * number of bytes that were produced in the previous stage
 		 */
 		ops[i]->src.length = ops_processed[i]->produced;
+
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+			ops[i]->op_type = RTE_COMP_OP_STATELESS;
+		} else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
+			ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
+			ops[i]->op_type = RTE_COMP_OP_STATEFUL;
 		} else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
@@ -1132,33 +1161,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 			ops_processed[i] = ops[i];
 		}
 	} else {
-		/* Create decompress private xform data */
-		for (i = 0; i < num_xforms; i++) {
-			ret = rte_compressdev_private_xform_create(0,
-				(const struct rte_comp_xform *)decompress_xforms[i],
-				&priv_xforms[i]);
-			if (ret < 0) {
-				RTE_LOG(ERR, USER1,
-					"Decompression private xform "
-					"could not be created\n");
-				goto exit;
-			}
-			num_priv_xforms++;
-		}
-
-		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
-			/* Attach shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx %
-								num_xforms;
-				ops[i]->private_xform = priv_xforms[xform_idx];
-			}
-		} else {
-			/* Create rest of the private xforms for the other ops */
-			for (i = num_xforms; i < num_bufs; i++) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
+			/* Create decompress private xform data */
+			for (i = 0; i < num_xforms; i++) {
 				ret = rte_compressdev_private_xform_create(0,
-					decompress_xforms[i % num_xforms],
+					(const struct rte_comp_xform *)
+					decompress_xforms[i],
 					&priv_xforms[i]);
 				if (ret < 0) {
 					RTE_LOG(ERR, USER1,
@@ -1169,14 +1177,60 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				num_priv_xforms++;
 			}
 
-			/* Attach non shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx;
-				ops[i]->private_xform = priv_xforms[xform_idx];
+			if (capa->comp_feature_flags &
+					RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+				/* Attach shareable private xform data to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+					       priv_data->orig_idx % num_xforms;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
+			} else {
+				/* Create rest of the private xforms */
+				/* for the other ops */
+				for (i = num_xforms; i < num_bufs; i++) {
+					ret =
+					 rte_compressdev_private_xform_create(0,
+					      decompress_xforms[i % num_xforms],
+					      &priv_xforms[i]);
+					if (ret < 0) {
+						RTE_LOG(ERR, USER1,
+							"Decompression private xform could not be created\n");
+						goto exit;
+					}
+					num_priv_xforms++;
+				}
+
+				/* Attach non shareable private xform data */
+				/* to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+							priv_data->orig_idx;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
+			}
+		} else {
+			/* Create a stream object for stateful decompression */
+			ret = rte_compressdev_stream_create(0,
+					decompress_xforms[0], &stream);
+			if (ret < 0) {
+				RTE_LOG(ERR, USER1,
+					"Decompression stream could not be created, error %d\n",
+					ret);
+				goto exit;
 			}
+			/* Attach stream to ops */
+			for (i = 0; i < num_bufs; i++)
+				ops[i]->stream = stream;
 		}
 
+next_step:
 		/* Enqueue and dequeue all operations */
 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
 		if (num_enqd < num_bufs) {
@@ -1242,7 +1296,75 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				continue;
 		}
 
-		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+		if (decompress_state == RTE_COMP_OP_STATEFUL
+			&& (ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
+			    || ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_SUCCESS)) {
+			/* collect the output into all_decomp_data */
+			const void *ptr = rte_pktmbuf_read(
+					ops_processed[i]->m_dst,
+					ops_processed[i]->dst.offset,
+					ops_processed[i]->produced,
+					all_decomp_data +
+						decomp_produced_data_size);
+			if (ptr != all_decomp_data + decomp_produced_data_size)
+				rte_memcpy(all_decomp_data +
+					   decomp_produced_data_size,
+					   ptr, ops_processed[i]->produced);
+			decomp_produced_data_size += ops_processed[i]->produced;
+			if (ops_processed[i]->src.length >
+					ops_processed[i]->consumed) {
+				if (ops_processed[i]->status ==
+						RTE_COMP_OP_STATUS_SUCCESS) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					      "Operation finished too early\n");
+					goto exit;
+				}
+				step++;
+				if (step >= test_data->decompress_steps_max) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					  "Operation exceeded maximum steps\n");
+					goto exit;
+				}
+				ops[i] = ops_processed[i];
+				ops[i]->status =
+					       RTE_COMP_OP_STATUS_NOT_PROCESSED;
+				ops[i]->src.offset +=
+						ops_processed[i]->consumed;
+				ops[i]->src.length -=
+						ops_processed[i]->consumed;
+				goto next_step;
+			} else {
+				/* Compare the original stream with the */
+				/* decompressed stream (in size and the data) */
+				priv_data = (struct priv_op_data *)
+						(ops_processed[i] + 1);
+				const char *buf1 =
+						test_bufs[priv_data->orig_idx];
+				const char *buf2 = all_decomp_data;
+
+				if (compare_buffers(buf1, strlen(buf1) + 1,
+					  buf2, decomp_produced_data_size) < 0)
+					goto exit;
+				/* Test checksums */
+				if (compress_xforms[0]->compress.chksum
+						!= RTE_COMP_CHECKSUM_NONE) {
+					if (ops_processed[i]->output_chksum
+						      != compress_checksum[i]) {
+						RTE_LOG(ERR, USER1,
+							"The checksums differ\n"
+			     "Compression Checksum: %" PRIu64 "\tDecompression "
+				"Checksum: %" PRIu64 "\n", compress_checksum[i],
+					       ops_processed[i]->output_chksum);
+						goto exit;
+					}
+				}
+			}
+		} else if (ops_processed[i]->status !=
+			   RTE_COMP_OP_STATUS_SUCCESS) {
 			RTE_LOG(ERR, USER1,
 				"Some operations were not successful\n");
 			goto exit;
@@ -1252,7 +1374,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		comp_bufs[priv_data->orig_idx] = NULL;
 	}
 
-	if (out_of_space && oos_zlib_compress) {
+	if ((out_of_space && oos_zlib_compress)
+			|| (decompress_state == RTE_COMP_OP_STATEFUL)) {
 		ret_status = TEST_SUCCESS;
 		goto exit;
 	}
@@ -1305,10 +1428,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_comp_op_free(ops[i]);
 		rte_comp_op_free(ops_processed[i]);
 	}
-	for (i = 0; i < num_priv_xforms; i++) {
+	for (i = 0; i < num_priv_xforms; i++)
 		if (priv_xforms[i] != NULL)
 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
-	}
+	if (stream != NULL)
+		rte_compressdev_stream_free(0, stream);
+	if (all_decomp_data != NULL)
+		rte_free(all_decomp_data);
 	rte_free(contig_buf);
 
 	return ret_status;
@@ -1352,10 +1478,13 @@ test_compressdev_deflate_stateless_fixed(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1421,10 +1550,13 @@ test_compressdev_deflate_stateless_dynamic(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1474,10 +1606,13 @@ test_compressdev_deflate_stateless_multi_op(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1526,10 +1661,13 @@ test_compressdev_deflate_stateless_multi_level(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1614,10 +1752,13 @@ test_compressdev_deflate_stateless_multi_xform(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1661,10 +1802,13 @@ test_compressdev_deflate_stateless_sgl(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1770,10 +1914,13 @@ test_compressdev_deflate_stateless_checksum(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1868,7 +2015,7 @@ test_compressdev_out_of_space_buffer(void)
 	uint16_t i;
 	const struct rte_compressdev_capabilities *capab;
 
-	RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
+	RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
 
 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
@@ -1876,16 +2023,6 @@ test_compressdev_out_of_space_buffer(void)
 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
 		return -ENOTSUP;
 
-	struct rte_comp_xform *compress_xform =
-			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
-
-	if (compress_xform == NULL) {
-		RTE_LOG(ERR, USER1,
-			"Compress xform could not be created\n");
-		ret = TEST_FAILED;
-		goto exit;
-	}
-
 	struct interim_data_params int_data = {
 		&compress_test_bufs[0],
 		1,
@@ -1896,10 +2033,13 @@ test_compressdev_out_of_space_buffer(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		1,  /* run out-of-space test */
+		0,
+		0,
 		0
 	};
 	/* Compress with compressdev, decompress with Zlib */
@@ -1933,7 +2073,6 @@ test_compressdev_out_of_space_buffer(void)
 	ret  = TEST_SUCCESS;
 
 exit:
-	rte_free(compress_xform);
 	return ret;
 }
 
@@ -1973,11 +2112,14 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
-		1
+		1,
+		0,
+		0
 	};
 
 	ts_params->def_comp_xform->compress.deflate.huffman =
@@ -2010,6 +2152,199 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	return ret;
 }
 
+static int
+test_compressdev_deflate_stateful_decomp(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&ts_params->def_comp_xform,
+		&ts_params->def_decomp_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Compress with Zlib, decompress with compressdev */
+	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+		ret = TEST_FAILED;
+		goto exit;
+	}
+
+	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+		/* Now test with SGL buffers */
+		test_data.buff_type = SGL_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	return ret;
+}
+
+static int
+test_compressdev_deflate_stateful_decomp_checksum(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	/* Check if driver supports any checksum */
+	if (!(capab->comp_feature_flags &
+	     (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
+	      RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
+		return -ENOTSUP;
+
+	struct rte_comp_xform *compress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (compress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
+		return TEST_FAILED;
+	}
+
+	memcpy(compress_xform, ts_params->def_comp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct rte_comp_xform *decompress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (decompress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
+		rte_free(compress_xform);
+		return TEST_FAILED;
+	}
+
+	memcpy(decompress_xform, ts_params->def_decomp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&compress_xform,
+		&decompress_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Check if driver supports crc32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports adler32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports combined crc and adler checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		decompress_xform->decompress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		/* Zlib doesn't support combined checksum */
+		test_data.zlib_dir = ZLIB_NONE;
+		/* Compress stateless, decompress stateful with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	rte_free(compress_xform);
+	rte_free(decompress_xform);
+	return ret;
+}
 
 static struct unit_test_suite compressdev_testsuite  = {
 	.suite_name = "compressdev unit test suite",
@@ -2036,6 +2371,10 @@ static struct unit_test_suite compressdev_testsuite  = {
 			test_compressdev_deflate_stateless_checksum),
 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
 			test_compressdev_out_of_space_buffer),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp_checksum),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-09-20 20:06     ` Adam Dybkowski
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
                         ` (3 more replies)
  0 siblings, 4 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 20:06 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patchset adds the stateful decompression feature
to the QAT PMD, together with the documentation updates
and two new unit tests.
---
v3:
* Minor corrections in features list in the documentation.

v2:
* Typo correction in the error message.

Adam Dybkowski (3):
  common/qat: add QAT RAM bank definitions
  compress/qat: add stateful decompression
  test/compress: add stateful decompression tests

 app/test/test_compressdev.c                  | 449 ++++++++++++++++---
 doc/guides/compressdevs/features/default.ini |  37 +-
 doc/guides/compressdevs/features/qat.ini     |  21 +-
 doc/guides/compressdevs/qat_comp.rst         |   5 +
 doc/guides/rel_notes/release_19_11.rst       |   4 +
 drivers/common/qat/qat_adf/icp_qat_fw_comp.h |  73 +++
 drivers/compress/qat/qat_comp.c              | 256 ++++++++++-
 drivers/compress/qat/qat_comp.h              |  32 ++
 drivers/compress/qat/qat_comp_pmd.c          | 166 ++++++-
 drivers/compress/qat/qat_comp_pmd.h          |   2 +
 10 files changed, 929 insertions(+), 116 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions
  2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-09-20 20:06       ` Adam Dybkowski
  2019-09-24 11:16         ` Trahe, Fiona
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression Adam Dybkowski
                         ` (2 subsequent siblings)
  3 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 20:06 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds QAT RAM bank definitions and related macros.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 73 ++++++++++++++++++++
 1 file changed, 73 insertions(+)

diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
index 813817720..c89a2c2fd 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -479,4 +479,77 @@ struct icp_qat_fw_comp_resp {
 	/**< Common response params (checksums and byte counts) */
 };
 
+/* RAM Bank definitions */
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+/**
+ *****************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *      Definition of the ram bank enabled values
+ * @description
+ *      Enumeration used to define whether a ram bank is enabled or not
+ *
+ *****************************************************************************/
+enum icp_qat_fw_comp_bank_enabled {
+	ICP_QAT_FW_COMP_BANK_DISABLED = 0, /*!< BANK DISABLED */
+	ICP_QAT_FW_COMP_BANK_ENABLED = 1,  /*!< BANK ENABLED */
+	ICP_QAT_FW_COMP_BANK_DELIMITER = 2 /**< Delimiter type */
+};
+
+/**
+ ******************************************************************************
+ * @ingroup icp_qat_fw_comp
+ *
+ * @description
+ *      Build the ram bank flags in the compression content descriptor
+ *      which specify which banks are used to save history
+ *
+ * @param bank_i_enable
+ * @param bank_h_enable
+ * @param bank_g_enable
+ * @param bank_f_enable
+ * @param bank_e_enable
+ * @param bank_d_enable
+ * @param bank_c_enable
+ * @param bank_b_enable
+ * @param bank_a_enable
+ *****************************************************************************/
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable,                         \
+					bank_h_enable,                         \
+					bank_g_enable,                         \
+					bank_f_enable,                         \
+					bank_e_enable,                         \
+					bank_d_enable,                         \
+					bank_c_enable,                         \
+					bank_b_enable,                         \
+					bank_a_enable)                         \
+	((((bank_i_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                         \
+		<< QAT_FW_COMP_BANK_I_BITPOS) |                                \
+	(((bank_h_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_H_BITPOS) |                                \
+	(((bank_g_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_G_BITPOS) |                                \
+	(((bank_f_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_F_BITPOS) |                                \
+	(((bank_e_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_E_BITPOS) |                                \
+	(((bank_d_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_D_BITPOS) |                                \
+	(((bank_c_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_C_BITPOS) |                                \
+	(((bank_b_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_B_BITPOS) |                                \
+	(((bank_a_enable)&QAT_FW_COMP_BANK_FLAG_MASK)                          \
+		<< QAT_FW_COMP_BANK_A_BITPOS))
+
 #endif
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression
  2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-09-20 20:06       ` Adam Dybkowski
  2019-09-24 11:17         ` Trahe, Fiona
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests Adam Dybkowski
  2019-09-23 15:56       ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Trahe, Fiona
  3 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 20:06 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds the stateful decompression feature
to the DPDK QAT PMD.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 doc/guides/compressdevs/features/default.ini |  37 +--
 doc/guides/compressdevs/features/qat.ini     |  21 +-
 doc/guides/compressdevs/qat_comp.rst         |   5 +
 doc/guides/rel_notes/release_19_11.rst       |   4 +
 drivers/compress/qat/qat_comp.c              | 256 +++++++++++++++++--
 drivers/compress/qat/qat_comp.h              |  32 +++
 drivers/compress/qat/qat_comp_pmd.c          | 166 +++++++++++-
 drivers/compress/qat/qat_comp_pmd.h          |   2 +
 8 files changed, 462 insertions(+), 61 deletions(-)

diff --git a/doc/guides/compressdevs/features/default.ini b/doc/guides/compressdevs/features/default.ini
index 829e4df61..e1419ee8d 100644
--- a/doc/guides/compressdevs/features/default.ini
+++ b/doc/guides/compressdevs/features/default.ini
@@ -6,21 +6,22 @@
 ; the features table in the documentation.
 ;
 [Features]
-HW Accelerated      =
-CPU SSE             =
-CPU AVX             =
-CPU AVX2            =
-CPU AVX512          =
-CPU NEON            =
-Stateful            =
-Pass-through        =
-OOP SGL In SGL Out  =
-OOP SGL In LB  Out  =
-OOP LB  In SGL Out  =
-Deflate             =
-LZS                 =
-Adler32             =
-Crc32               =
-Adler32&Crc32       =
-Fixed               =
-Dynamic             =
+HW Accelerated         =
+CPU SSE                =
+CPU AVX                =
+CPU AVX2               =
+CPU AVX512             =
+CPU NEON               =
+Stateful Compression   =
+Stateful Decompression =
+Pass-through           =
+OOP SGL In SGL Out     =
+OOP SGL In LB  Out     =
+OOP LB  In SGL Out     =
+Deflate                =
+LZS                    =
+Adler32                =
+Crc32                  =
+Adler32&Crc32          =
+Fixed                  =
+Dynamic                =
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
index 6b1e7f935..bced8f9cf 100644
--- a/doc/guides/compressdevs/features/qat.ini
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -4,13 +4,14 @@
 ; Supported features of 'QAT' compression driver.
 ;
 [Features]
-HW Accelerated      = Y
-OOP SGL In SGL Out  = Y
-OOP SGL In LB  Out  = Y
-OOP LB  In SGL Out  = Y
-Deflate             = Y
-Adler32             = Y
-Crc32               = Y
-Adler32&Crc32       = Y
-Fixed               = Y
-Dynamic             = Y
+HW Accelerated         = Y
+Stateful Decompression = Y
+OOP SGL In SGL Out     = Y
+OOP SGL In LB  Out     = Y
+OOP LB  In SGL Out     = Y
+Deflate                = Y
+Adler32                = Y
+Crc32                  = Y
+Adler32&Crc32          = Y
+Fixed                  = Y
+Dynamic                = Y
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
index 6f583a460..6421f767c 100644
--- a/doc/guides/compressdevs/qat_comp.rst
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -29,6 +29,10 @@ Checksum generation:
 
     * CRC32, Adler and combined checksum
 
+Stateful operation:
+
+    * Decompression only
+
 Limitations
 -----------
 
@@ -38,6 +42,7 @@ Limitations
 * When using Deflate dynamic huffman encoding for compression, the input size (op.src.length)
   must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file,
   see :ref:`building_qat_config` for more details.
+* Stateful compression is not supported.
 
 
 Installation
diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index 27cfbd9e3..573683da4 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -56,6 +56,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Updated the Intel QuickAssist Technology (QAT) compression PMD.**
+
+  Added stateful decompression support in the Intel QuickAssist Technology PMD.
+  Please note that stateful compression is not supported.
 
 Removed Items
 -------------
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index 835aaa838..8717b7432 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -27,22 +27,51 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 	struct rte_comp_op *op = in_op;
 	struct qat_comp_op_cookie *cookie =
 			(struct qat_comp_op_cookie *)op_cookie;
-	struct qat_comp_xform *qat_xform = op->private_xform;
-	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
+	const uint8_t *tmpl;
 	struct icp_qat_fw_comp_req *comp_req =
 	    (struct icp_qat_fw_comp_req *)out_msg;
 
-	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
-				"operation requests, op (%p) is not a "
-				"stateless operation.", op);
-		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
-		return -EINVAL;
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = op->stream;
+		qat_xform = &stream->qat_xform;
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
+			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			return -EINVAL;
+		}
+		if (unlikely(stream->op_in_progress)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
+			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
+			return -EINVAL;
+		}
+		stream->op_in_progress = 1;
+	} else {
+		stream = NULL;
+		qat_xform = op->private_xform;
 	}
+	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
 
 	rte_mov128(out_msg, tmpl);
 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comp_pars.req_par_flags =
+			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+				(stream->start_of_packet) ?
+					ICP_QAT_FW_COMP_SOP
+				      : ICP_QAT_FW_COMP_NOT_SOP,
+				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
+				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
+					ICP_QAT_FW_COMP_EOP
+				      : ICP_QAT_FW_COMP_NOT_EOP,
+				ICP_QAT_FW_COMP_NOT_BFINAL,
+				ICP_QAT_FW_COMP_NO_CNV,
+				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+	}
+
 	if (likely(qat_xform->qat_comp_request_type ==
 		    QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
@@ -94,6 +123,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_src->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -111,6 +143,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -129,6 +164,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_dst->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -146,6 +184,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -202,12 +243,22 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 			(struct qat_comp_op_cookie *)op_cookie;
 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
 			(resp_msg->opaque_data);
-	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
-				(rx_op->private_xform);
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
 	int err = resp_msg->comn_resp.comn_status &
 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
 
+	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = rx_op->stream;
+		qat_xform = &stream->qat_xform;
+		/* clear op-in-progress flag */
+		stream->op_in_progress = 0;
+	} else {
+		stream = NULL;
+		qat_xform = rx_op->private_xform;
+	}
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 	QAT_DP_LOG(DEBUG, "Direction: %s",
 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
@@ -254,7 +305,21 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		int8_t xlat_err_code =
 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
 
-		if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
+		/* handle recoverable out-of-buffer condition in stateful */
+		/* decompression scenario */
+		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
+				&& qat_xform->qat_comp_request_type
+					== QAT_COMP_REQUEST_DECOMPRESS
+				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+			struct icp_qat_fw_resp_comp_pars *comp_resp =
+					&resp_msg->comp_resp_pars;
+			rx_op->status =
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+			rx_op->consumed = comp_resp->input_byte_counter;
+			rx_op->produced = comp_resp->output_byte_counter;
+			stream->start_of_packet = 0;
+		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
+			  && !xlat_err_code)
 				||
 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
 				||
@@ -275,6 +340,8 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
 		rx_op->consumed = comp_resp->input_byte_counter;
 		rx_op->produced = comp_resp->output_byte_counter;
+		if (stream)
+			stream->start_of_packet = 0;
 
 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
@@ -297,6 +364,12 @@ qat_comp_xform_size(void)
 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
 }
 
+unsigned int
+qat_comp_stream_size(void)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
+}
+
 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 				    enum qat_comp_request_type request)
 {
@@ -317,7 +390,9 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 
 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 			const struct rte_memzone *interm_buff_mz,
-			const struct rte_comp_xform *xform)
+			const struct rte_comp_xform *xform,
+			const struct qat_comp_stream *stream,
+			enum rte_comp_op_type op_type)
 {
 	struct icp_qat_fw_comp_req *comp_req;
 	int comp_level, algo;
@@ -329,6 +404,18 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 		return -EINVAL;
 	}
 
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		if (unlikely(stream == NULL)) {
+			QAT_LOG(ERR, "Stream must be non null for stateful op");
+			return -EINVAL;
+		}
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
+			return -ENOTSUP;
+		}
+	}
+
 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
@@ -376,12 +463,43 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
 					qat_xform->qat_comp_request_type);
 
-	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
-	    ICP_QAT_FW_COMP_STATELESS_SESSION,
-	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATEFUL_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+		/* Decompression state registers */
+		comp_req->comp_cd_ctrl.comp_state_addr =
+				stream->state_registers_decomp_phys;
+
+		/* Enable A, B, C, D, and E (CAMs). */
+		comp_req->comp_cd_ctrl.ram_bank_flags =
+			ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
+				ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
+
+		comp_req->comp_cd_ctrl.ram_banks_addr =
+				stream->inflate_context_phys;
+	} else {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATELESS_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	}
 
 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
@@ -497,7 +615,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
 		qat_xform->checksum_type = xform->decompress.chksum;
 	}
 
-	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
+				      NULL, RTE_COMP_OP_STATELESS)) {
 		QAT_LOG(ERR, "QAT: Problem with setting compression");
 		return -EINVAL;
 	}
@@ -532,3 +651,102 @@ qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
 	}
 	return -EINVAL;
 }
+
+/**
+ * Reset stream state for the next use.
+ *
+ * @param stream
+ *   handle of pmd's private stream data
+ */
+static void
+qat_comp_stream_reset(struct qat_comp_stream *stream)
+{
+	if (stream) {
+		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
+		stream->start_of_packet = 1;
+		stream->op_in_progress = 0;
+	}
+}
+
+/**
+ * Create driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param xform
+ *   xform data
+ * @param stream
+ *   ptr where handle of pmd's private stream data should be stored
+ * @return
+ *  - Returns 0 if private stream structure has been created successfully.
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -ENOTSUP if comp device does not support the comp transform.
+ *  - Returns -ENOMEM if the private stream could not be allocated.
+ */
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream)
+{
+	struct qat_comp_dev_private *qat = dev->data->dev_private;
+	struct qat_comp_stream *ptr;
+
+	if (unlikely(stream == NULL)) {
+		QAT_LOG(ERR, "QAT: stream parameter is NULL");
+		return -EINVAL;
+	}
+	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
+		QAT_LOG(ERR, "QAT: stateful compression not supported");
+		return -ENOTSUP;
+	}
+	if (unlikely(qat->streampool == NULL)) {
+		QAT_LOG(ERR, "QAT device has no stream mempool");
+		return -ENOMEM;
+	}
+	if (rte_mempool_get(qat->streampool, stream)) {
+		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
+		return -ENOMEM;
+	}
+
+	ptr = (struct qat_comp_stream *) *stream;
+	qat_comp_stream_reset(ptr);
+	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+	ptr->qat_xform.checksum_type = xform->decompress.chksum;
+
+	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
+				      xform, ptr, RTE_COMP_OP_STATEFUL)) {
+		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
+		rte_mempool_put(qat->streampool, *stream);
+		*stream = NULL;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Free driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param stream
+ *   handle of pmd's private stream data
+ * @return
+ *  - 0 if successful
+ *  - <0 in error cases
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
+{
+	if (stream) {
+		struct qat_comp_dev_private *qat = dev->data->dev_private;
+		qat_comp_stream_reset((struct qat_comp_stream *) stream);
+		rte_mempool_put(qat->streampool, stream);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 61d12ecf4..2231451a1 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -26,6 +26,16 @@
 
 #define QAT_MIN_OUT_BUF_SIZE 46
 
+/* maximum size of the state registers */
+#define QAT_STATE_REGISTERS_MAX_SIZE 64
+
+/* decompressor context size */
+#define QAT_INFLATE_CONTEXT_SIZE_GEN1 36864
+#define QAT_INFLATE_CONTEXT_SIZE_GEN2 34032
+#define QAT_INFLATE_CONTEXT_SIZE_GEN3 34032
+#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(QAT_INFLATE_CONTEXT_SIZE_GEN1,\
+		QAT_INFLATE_CONTEXT_SIZE_GEN2), QAT_INFLATE_CONTEXT_SIZE_GEN3)
+
 enum qat_comp_request_type {
 	QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
 	QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
@@ -61,6 +71,17 @@ struct qat_comp_xform {
 	enum rte_comp_checksum_type checksum_type;
 };
 
+struct qat_comp_stream {
+	struct qat_comp_xform qat_xform;
+	void *state_registers_decomp;
+	phys_addr_t state_registers_decomp_phys;
+	void *inflate_context;
+	phys_addr_t inflate_context_phys;
+	const struct rte_memzone *memzone;
+	uint8_t start_of_packet;
+	volatile uint8_t op_in_progress;
+};
+
 int
 qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		       enum qat_device_gen qat_dev_gen __rte_unused);
@@ -80,5 +101,16 @@ qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
 unsigned int
 qat_comp_xform_size(void);
 
+unsigned int
+qat_comp_stream_size(void);
+
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream);
+
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+
 #endif
 #endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 072647217..05b7dfe77 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -9,6 +9,12 @@
 
 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
 
+struct stream_create_info {
+	struct qat_comp_dev_private *comp_dev;
+	int socket_id;
+	int error;
+};
+
 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 	{/* COMPRESSION - deflate */
 	 .algo = RTE_COMP_ALGO_DEFLATE,
@@ -21,7 +27,8 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 				RTE_COMP_FF_HUFFMAN_DYNAMIC |
 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
 				RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
-				RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+				RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+				RTE_COMP_FF_STATEFUL_DECOMPRESSION,
 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
 	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
 
@@ -315,6 +322,120 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
 	return mp;
 }
 
+static void
+qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
+		     void *obj, unsigned int obj_idx)
+{
+	struct stream_create_info *info = opaque;
+	struct qat_comp_stream *stream = obj;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *memzone;
+	struct qat_inter_sgl *ram_banks_desc;
+
+	/* find a memzone for RAM banks */
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
+		 info->comp_dev->qat_dev->name, obj_idx);
+	memzone = rte_memzone_lookup(mz_name);
+	if (memzone == NULL) {
+		/* allocate a memzone for compression state and RAM banks */
+		memzone = rte_memzone_reserve_aligned(mz_name,
+			QAT_STATE_REGISTERS_MAX_SIZE
+				+ sizeof(struct qat_inter_sgl)
+				+ QAT_INFLATE_CONTEXT_SIZE,
+			info->socket_id,
+			RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
+		if (memzone == NULL) {
+			QAT_LOG(ERR,
+			    "Can't allocate RAM banks for device %s, object %u",
+				info->comp_dev->qat_dev->name, obj_idx);
+			info->error = -ENOMEM;
+			return;
+		}
+	}
+
+	/* prepare the buffer list descriptor for RAM banks */
+	ram_banks_desc = (struct qat_inter_sgl *)
+		(((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
+	ram_banks_desc->num_bufs = 1;
+	ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
+	ram_banks_desc->buffers[0].addr = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE
+			+ sizeof(struct qat_inter_sgl);
+
+	memset(stream, 0, qat_comp_stream_size());
+	stream->memzone = memzone;
+	stream->state_registers_decomp = memzone->addr;
+	stream->state_registers_decomp_phys = memzone->iova;
+	stream->inflate_context = ((uint8_t *) memzone->addr)
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+	stream->inflate_context_phys = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+}
+
+static void
+qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
+			void *opaque __rte_unused, void *obj,
+			unsigned obj_idx __rte_unused)
+{
+	struct qat_comp_stream *stream = obj;
+
+	rte_memzone_free(stream->memzone);
+}
+
+static struct rte_mempool *
+qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+			    int socket_id,
+			    uint32_t num_elements)
+{
+	char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+
+	snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
+		 "%s_streams", comp_dev->qat_dev->name);
+
+	QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
+	mp = rte_mempool_lookup(stream_pool_name);
+
+	if (mp != NULL) {
+		QAT_LOG(DEBUG, "streampool already created");
+		if (mp->size != num_elements) {
+			QAT_LOG(DEBUG, "streampool wrong size - delete it");
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			rte_mempool_free(mp);
+			mp = NULL;
+			comp_dev->streampool = NULL;
+		}
+	}
+
+	if (mp == NULL) {
+		struct stream_create_info info = {
+			.comp_dev = comp_dev,
+			.socket_id = socket_id,
+			.error = 0
+		};
+		mp = rte_mempool_create(stream_pool_name,
+				num_elements,
+				qat_comp_stream_size(), 0, 0,
+				NULL, NULL, qat_comp_stream_init, &info,
+				socket_id, 0);
+		if (mp == NULL) {
+			QAT_LOG(ERR,
+			     "Err creating mempool %s w %d elements of size %d",
+			     stream_pool_name, num_elements,
+			     qat_comp_stream_size());
+		} else if (info.error) {
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			QAT_LOG(ERR,
+			     "Destoying mempool %s as at least one element failed initialisation",
+			     stream_pool_name);
+			rte_mempool_free(mp);
+			mp = NULL;
+		}
+	}
+
+	return mp;
+}
+
 static void
 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 {
@@ -330,6 +451,14 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 		rte_mempool_free(comp_dev->xformpool);
 		comp_dev->xformpool = NULL;
 	}
+
+	/* Free stream pool */
+	if (comp_dev->streampool) {
+		rte_mempool_obj_iter(comp_dev->streampool,
+				     qat_comp_stream_destroy, NULL);
+		rte_mempool_free(comp_dev->streampool);
+		comp_dev->streampool = NULL;
+	}
 }
 
 static int
@@ -339,12 +468,6 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
 	int ret = 0;
 
-	if (config->max_nb_streams != 0) {
-		QAT_LOG(ERR,
-	"QAT device does not support STATEFUL so max_nb_streams must be 0");
-		return -EINVAL;
-	}
-
 	if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
 		QAT_LOG(WARNING,
 			"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
@@ -360,13 +483,26 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 		}
 	}
 
-	comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
-					config->max_nb_priv_xforms);
-	if (comp_dev->xformpool == NULL) {
+	if (config->max_nb_priv_xforms) {
+		comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+					    config, config->max_nb_priv_xforms);
+		if (comp_dev->xformpool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->xformpool = NULL;
+
+	if (config->max_nb_streams) {
+		comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
+				     config->socket_id, config->max_nb_streams);
+		if (comp_dev->streampool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->streampool = NULL;
 
-		ret = -ENOMEM;
-		goto error_out;
-	}
 	return 0;
 
 error_out:
@@ -508,7 +644,9 @@ static struct rte_compressdev_ops compress_qat_ops = {
 
 	/* Compression related operations */
 	.private_xform_create	= qat_comp_private_xform_create,
-	.private_xform_free	= qat_comp_private_xform_free
+	.private_xform_free	= qat_comp_private_xform_free,
+	.stream_create		= qat_comp_stream_create,
+	.stream_free		= qat_comp_stream_free
 };
 
 /* An rte_driver is needed in the registration of the device with compressdev.
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
index b8299d43a..6979de14d 100644
--- a/drivers/compress/qat/qat_comp_pmd.h
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -30,6 +30,8 @@ struct qat_comp_dev_private {
 	/**< The device's memory for intermediate buffers */
 	struct rte_mempool *xformpool;
 	/**< The device's pool for qat_comp_xforms */
+	struct rte_mempool *streampool;
+	/**< The device's pool for qat_comp_streams */
 };
 
 int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests
  2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-09-20 20:06       ` Adam Dybkowski
  2019-09-24 11:18         ` Trahe, Fiona
  2019-09-23 15:56       ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Trahe, Fiona
  3 siblings, 1 reply; 22+ messages in thread
From: Adam Dybkowski @ 2019-09-20 20:06 UTC (permalink / raw)
  To: dev, fiona.trahe, arturx.trybula, akhil.goyal; +Cc: Adam Dybkowski

This patch adds two new tests that cover the stateful
decompression feature.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 app/test/test_compressdev.c | 449 +++++++++++++++++++++++++++++++-----
 1 file changed, 394 insertions(+), 55 deletions(-)

diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c
index 167c48f10..9a7989683 100644
--- a/app/test/test_compressdev.c
+++ b/app/test/test_compressdev.c
@@ -95,11 +95,15 @@ struct interim_data_params {
 };
 
 struct test_data_params {
-	enum rte_comp_op_type state;
+	enum rte_comp_op_type compress_state;
+	enum rte_comp_op_type decompress_state;
 	enum varied_buff buff_type;
 	enum zlib_direction zlib_dir;
 	unsigned int out_of_space;
 	unsigned int big_data;
+	/* stateful decompression specific parameters */
+	unsigned int decompress_output_block_size;
+	unsigned int decompress_steps_max;
 };
 
 static struct comp_testsuite_params testsuite_params = { 0 };
@@ -237,7 +241,7 @@ generic_ut_setup(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 
 	if (rte_compressdev_configure(0, &config) < 0) {
@@ -275,7 +279,7 @@ test_compressdev_invalid_configuration(void)
 		.socket_id = rte_socket_id(),
 		.nb_queue_pairs = 1,
 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
-		.max_nb_streams = 0
+		.max_nb_streams = 1
 	};
 	struct rte_compressdev_info dev_info;
 
@@ -724,7 +728,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
 	unsigned int num_xforms = int_data->num_xforms;
-	enum rte_comp_op_type state = test_data->state;
+	enum rte_comp_op_type compress_state = test_data->compress_state;
+	enum rte_comp_op_type decompress_state = test_data->decompress_state;
 	unsigned int buff_type = test_data->buff_type;
 	unsigned int out_of_space = test_data->out_of_space;
 	unsigned int big_data = test_data->big_data;
@@ -754,6 +759,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	char *contig_buf = NULL;
 	uint64_t compress_checksum[num_bufs];
+	void *stream = NULL;
+	char *all_decomp_data = NULL;
+	unsigned int decomp_produced_data_size = 0;
+	unsigned int step = 0;
+
+	TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
+		    "Number of stateful operations in a step should be 1");
 
 	if (capa == NULL) {
 		RTE_LOG(ERR, USER1,
@@ -768,6 +780,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
 
+	if (decompress_state == RTE_COMP_OP_STATEFUL) {
+		data_size = strlen(test_bufs[0]) + 1;
+		all_decomp_data = rte_malloc(NULL, data_size,
+					     RTE_CACHE_LINE_SIZE);
+	}
+
 	if (big_data)
 		buf_pool = ts_params->big_mbuf_pool;
 	else if (buff_type == SGL_BOTH)
@@ -859,9 +877,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		ops[i]->src.offset = 0;
 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (compress_state == RTE_COMP_OP_STATELESS)
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
-		} else {
+		else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
 				"in these tests yet\n");
@@ -1046,6 +1064,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1066,6 +1087,9 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 					(ops_processed[i] + 1);
 			if (out_of_space == 1 && oos_zlib_compress)
 				data_size = OUT_OF_SPACE_BUF;
+			else if (test_data->decompress_output_block_size != 0)
+				data_size =
+					test_data->decompress_output_block_size;
 			else
 				data_size =
 				strlen(test_bufs[priv_data->orig_idx]) + 1;
@@ -1093,9 +1117,14 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		 * number of bytes that were produced in the previous stage
 		 */
 		ops[i]->src.length = ops_processed[i]->produced;
+
 		ops[i]->dst.offset = 0;
-		if (state == RTE_COMP_OP_STATELESS) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+			ops[i]->op_type = RTE_COMP_OP_STATELESS;
+		} else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
+			ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
+			ops[i]->op_type = RTE_COMP_OP_STATEFUL;
 		} else {
 			RTE_LOG(ERR, USER1,
 				"Stateful operations are not supported "
@@ -1132,33 +1161,12 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 			ops_processed[i] = ops[i];
 		}
 	} else {
-		/* Create decompress private xform data */
-		for (i = 0; i < num_xforms; i++) {
-			ret = rte_compressdev_private_xform_create(0,
-				(const struct rte_comp_xform *)decompress_xforms[i],
-				&priv_xforms[i]);
-			if (ret < 0) {
-				RTE_LOG(ERR, USER1,
-					"Decompression private xform "
-					"could not be created\n");
-				goto exit;
-			}
-			num_priv_xforms++;
-		}
-
-		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
-			/* Attach shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx %
-								num_xforms;
-				ops[i]->private_xform = priv_xforms[xform_idx];
-			}
-		} else {
-			/* Create rest of the private xforms for the other ops */
-			for (i = num_xforms; i < num_bufs; i++) {
+		if (decompress_state == RTE_COMP_OP_STATELESS) {
+			/* Create decompress private xform data */
+			for (i = 0; i < num_xforms; i++) {
 				ret = rte_compressdev_private_xform_create(0,
-					decompress_xforms[i % num_xforms],
+					(const struct rte_comp_xform *)
+					decompress_xforms[i],
 					&priv_xforms[i]);
 				if (ret < 0) {
 					RTE_LOG(ERR, USER1,
@@ -1169,14 +1177,60 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				num_priv_xforms++;
 			}
 
-			/* Attach non shareable private xform data to ops */
-			for (i = 0; i < num_bufs; i++) {
-				priv_data = (struct priv_op_data *)(ops[i] + 1);
-				uint16_t xform_idx = priv_data->orig_idx;
-				ops[i]->private_xform = priv_xforms[xform_idx];
+			if (capa->comp_feature_flags &
+					RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+				/* Attach shareable private xform data to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+					       priv_data->orig_idx % num_xforms;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
+			} else {
+				/* Create rest of the private xforms */
+				/* for the other ops */
+				for (i = num_xforms; i < num_bufs; i++) {
+					ret =
+					 rte_compressdev_private_xform_create(0,
+					      decompress_xforms[i % num_xforms],
+					      &priv_xforms[i]);
+					if (ret < 0) {
+						RTE_LOG(ERR, USER1,
+							"Decompression private xform could not be created\n");
+						goto exit;
+					}
+					num_priv_xforms++;
+				}
+
+				/* Attach non shareable private xform data */
+				/* to ops */
+				for (i = 0; i < num_bufs; i++) {
+					priv_data = (struct priv_op_data *)
+							(ops[i] + 1);
+					uint16_t xform_idx =
+							priv_data->orig_idx;
+					ops[i]->private_xform =
+							priv_xforms[xform_idx];
+				}
+			}
+		} else {
+			/* Create a stream object for stateful decompression */
+			ret = rte_compressdev_stream_create(0,
+					decompress_xforms[0], &stream);
+			if (ret < 0) {
+				RTE_LOG(ERR, USER1,
+					"Decompression stream could not be created, error %d\n",
+					ret);
+				goto exit;
 			}
+			/* Attach stream to ops */
+			for (i = 0; i < num_bufs; i++)
+				ops[i]->stream = stream;
 		}
 
+next_step:
 		/* Enqueue and dequeue all operations */
 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
 		if (num_enqd < num_bufs) {
@@ -1242,7 +1296,75 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 				continue;
 		}
 
-		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+		if (decompress_state == RTE_COMP_OP_STATEFUL
+			&& (ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
+			    || ops_processed[i]->status ==
+				RTE_COMP_OP_STATUS_SUCCESS)) {
+			/* collect the output into all_decomp_data */
+			const void *ptr = rte_pktmbuf_read(
+					ops_processed[i]->m_dst,
+					ops_processed[i]->dst.offset,
+					ops_processed[i]->produced,
+					all_decomp_data +
+						decomp_produced_data_size);
+			if (ptr != all_decomp_data + decomp_produced_data_size)
+				rte_memcpy(all_decomp_data +
+					   decomp_produced_data_size,
+					   ptr, ops_processed[i]->produced);
+			decomp_produced_data_size += ops_processed[i]->produced;
+			if (ops_processed[i]->src.length >
+					ops_processed[i]->consumed) {
+				if (ops_processed[i]->status ==
+						RTE_COMP_OP_STATUS_SUCCESS) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					      "Operation finished too early\n");
+					goto exit;
+				}
+				step++;
+				if (step >= test_data->decompress_steps_max) {
+					ret_status = -1;
+					RTE_LOG(ERR, USER1,
+					  "Operation exceeded maximum steps\n");
+					goto exit;
+				}
+				ops[i] = ops_processed[i];
+				ops[i]->status =
+					       RTE_COMP_OP_STATUS_NOT_PROCESSED;
+				ops[i]->src.offset +=
+						ops_processed[i]->consumed;
+				ops[i]->src.length -=
+						ops_processed[i]->consumed;
+				goto next_step;
+			} else {
+				/* Compare the original stream with the */
+				/* decompressed stream (in size and the data) */
+				priv_data = (struct priv_op_data *)
+						(ops_processed[i] + 1);
+				const char *buf1 =
+						test_bufs[priv_data->orig_idx];
+				const char *buf2 = all_decomp_data;
+
+				if (compare_buffers(buf1, strlen(buf1) + 1,
+					  buf2, decomp_produced_data_size) < 0)
+					goto exit;
+				/* Test checksums */
+				if (compress_xforms[0]->compress.chksum
+						!= RTE_COMP_CHECKSUM_NONE) {
+					if (ops_processed[i]->output_chksum
+						      != compress_checksum[i]) {
+						RTE_LOG(ERR, USER1,
+							"The checksums differ\n"
+			     "Compression Checksum: %" PRIu64 "\tDecompression "
+				"Checksum: %" PRIu64 "\n", compress_checksum[i],
+					       ops_processed[i]->output_chksum);
+						goto exit;
+					}
+				}
+			}
+		} else if (ops_processed[i]->status !=
+			   RTE_COMP_OP_STATUS_SUCCESS) {
 			RTE_LOG(ERR, USER1,
 				"Some operations were not successful\n");
 			goto exit;
@@ -1252,7 +1374,8 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		comp_bufs[priv_data->orig_idx] = NULL;
 	}
 
-	if (out_of_space && oos_zlib_compress) {
+	if ((out_of_space && oos_zlib_compress)
+			|| (decompress_state == RTE_COMP_OP_STATEFUL)) {
 		ret_status = TEST_SUCCESS;
 		goto exit;
 	}
@@ -1305,10 +1428,13 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
 		rte_comp_op_free(ops[i]);
 		rte_comp_op_free(ops_processed[i]);
 	}
-	for (i = 0; i < num_priv_xforms; i++) {
+	for (i = 0; i < num_priv_xforms; i++)
 		if (priv_xforms[i] != NULL)
 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
-	}
+	if (stream != NULL)
+		rte_compressdev_stream_free(0, stream);
+	if (all_decomp_data != NULL)
+		rte_free(all_decomp_data);
 	rte_free(contig_buf);
 
 	return ret_status;
@@ -1352,10 +1478,13 @@ test_compressdev_deflate_stateless_fixed(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1421,10 +1550,13 @@ test_compressdev_deflate_stateless_dynamic(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1474,10 +1606,13 @@ test_compressdev_deflate_stateless_multi_op(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1526,10 +1661,13 @@ test_compressdev_deflate_stateless_multi_level(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1614,10 +1752,13 @@ test_compressdev_deflate_stateless_multi_xform(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1661,10 +1802,13 @@ test_compressdev_deflate_stateless_sgl(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1770,10 +1914,13 @@ test_compressdev_deflate_stateless_checksum(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
+		0,
+		0,
 		0
 	};
 
@@ -1868,7 +2015,7 @@ test_compressdev_out_of_space_buffer(void)
 	uint16_t i;
 	const struct rte_compressdev_capabilities *capab;
 
-	RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
+	RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
 
 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
@@ -1876,16 +2023,6 @@ test_compressdev_out_of_space_buffer(void)
 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
 		return -ENOTSUP;
 
-	struct rte_comp_xform *compress_xform =
-			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
-
-	if (compress_xform == NULL) {
-		RTE_LOG(ERR, USER1,
-			"Compress xform could not be created\n");
-		ret = TEST_FAILED;
-		goto exit;
-	}
-
 	struct interim_data_params int_data = {
 		&compress_test_bufs[0],
 		1,
@@ -1896,10 +2033,13 @@ test_compressdev_out_of_space_buffer(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		LB_BOTH,
 		ZLIB_DECOMPRESS,
 		1,  /* run out-of-space test */
+		0,
+		0,
 		0
 	};
 	/* Compress with compressdev, decompress with Zlib */
@@ -1933,7 +2073,6 @@ test_compressdev_out_of_space_buffer(void)
 	ret  = TEST_SUCCESS;
 
 exit:
-	rte_free(compress_xform);
 	return ret;
 }
 
@@ -1973,11 +2112,14 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	};
 
 	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
 		RTE_COMP_OP_STATELESS,
 		SGL_BOTH,
 		ZLIB_DECOMPRESS,
 		0,
-		1
+		1,
+		0,
+		0
 	};
 
 	ts_params->def_comp_xform->compress.deflate.huffman =
@@ -2010,6 +2152,199 @@ test_compressdev_deflate_stateless_dynamic_big(void)
 	return ret;
 }
 
+static int
+test_compressdev_deflate_stateful_decomp(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&ts_params->def_comp_xform,
+		&ts_params->def_decomp_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Compress with Zlib, decompress with compressdev */
+	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+		ret = TEST_FAILED;
+		goto exit;
+	}
+
+	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+		/* Now test with SGL buffers */
+		test_data.buff_type = SGL_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	return ret;
+}
+
+static int
+test_compressdev_deflate_stateful_decomp_checksum(void)
+{
+	struct comp_testsuite_params *ts_params = &testsuite_params;
+	int ret;
+	uint16_t i;
+	const struct rte_compressdev_capabilities *capab;
+
+	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
+		return -ENOTSUP;
+
+	/* Check if driver supports any checksum */
+	if (!(capab->comp_feature_flags &
+	     (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
+	      RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
+		return -ENOTSUP;
+
+	struct rte_comp_xform *compress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (compress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
+		return TEST_FAILED;
+	}
+
+	memcpy(compress_xform, ts_params->def_comp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct rte_comp_xform *decompress_xform =
+			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+	if (decompress_xform == NULL) {
+		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
+		rte_free(compress_xform);
+		return TEST_FAILED;
+	}
+
+	memcpy(decompress_xform, ts_params->def_decomp_xform,
+	       sizeof(struct rte_comp_xform));
+
+	struct interim_data_params int_data = {
+		&compress_test_bufs[0],
+		1,
+		&i,
+		&compress_xform,
+		&decompress_xform,
+		1
+	};
+
+	struct test_data_params test_data = {
+		RTE_COMP_OP_STATELESS,
+		RTE_COMP_OP_STATEFUL,
+		LB_BOTH,
+		ZLIB_COMPRESS,
+		0,
+		0,
+		2000,
+		4
+	};
+
+	/* Check if driver supports crc32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports adler32 checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
+		/* Compress with Zlib, decompress with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	/* Check if driver supports combined crc and adler checksum and test */
+	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
+		compress_xform->compress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		decompress_xform->decompress.chksum =
+				RTE_COMP_CHECKSUM_CRC32_ADLER32;
+		/* Zlib doesn't support combined checksum */
+		test_data.zlib_dir = ZLIB_NONE;
+		/* Compress stateless, decompress stateful with compressdev */
+		test_data.buff_type = LB_BOTH;
+		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
+			ret = TEST_FAILED;
+			goto exit;
+		}
+		if (capab->comp_feature_flags &
+				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
+			/* Now test with SGL buffers */
+			test_data.buff_type = SGL_BOTH;
+			if (test_deflate_comp_decomp(&int_data,
+						     &test_data) < 0) {
+				ret = TEST_FAILED;
+				goto exit;
+			}
+		}
+	}
+
+	ret  = TEST_SUCCESS;
+
+exit:
+	rte_free(compress_xform);
+	rte_free(decompress_xform);
+	return ret;
+}
 
 static struct unit_test_suite compressdev_testsuite  = {
 	.suite_name = "compressdev unit test suite",
@@ -2036,6 +2371,10 @@ static struct unit_test_suite compressdev_testsuite  = {
 			test_compressdev_deflate_stateless_checksum),
 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
 			test_compressdev_out_of_space_buffer),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp),
+		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+			test_compressdev_deflate_stateful_decomp_checksum),
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
 };
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/3] compress/qat: add stateful decompression
  2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 2/3] " Adam Dybkowski
@ 2019-09-23  9:46     ` Trahe, Fiona
  0 siblings, 0 replies; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-23  9:46 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, Trybula, ArturX, akhil.goyal; +Cc: Trahe, Fiona

Hi Adam,


> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 20, 2019 1:45 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Trybula, ArturX
> <arturx.trybula@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v2 2/3] compress/qat: add stateful decompression
> 
> This patch adds the stateful decompression feature
> to the DPDK QAT PMD.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
> ---
>  doc/guides/compressdevs/features/default.ini |  37 +--
>  doc/guides/compressdevs/features/qat.ini     |  21 +-
>  doc/guides/compressdevs/qat_comp.rst         |   5 +
>  doc/guides/rel_notes/release_19_11.rst       |   4 +
>  drivers/compress/qat/qat_comp.c              | 256 +++++++++++++++++--
>  drivers/compress/qat/qat_comp.h              |  32 +++
>  drivers/compress/qat/qat_comp_pmd.c          | 166 +++++++++++-
>  drivers/compress/qat/qat_comp_pmd.h          |   2 +
>  8 files changed, 462 insertions(+), 61 deletions(-)
> 
> diff --git a/doc/guides/compressdevs/features/default.ini
> b/doc/guides/compressdevs/features/default.ini
> index 829e4df61..5b783b842 100644
> --- a/doc/guides/compressdevs/features/default.ini
> +++ b/doc/guides/compressdevs/features/default.ini
> @@ -6,21 +6,22 @@
>  ; the features table in the documentation.
>  ;
>  [Features]
> -HW Accelerated      =
> -CPU SSE             =
> -CPU AVX             =
> -CPU AVX2            =
> -CPU AVX512          =
> -CPU NEON            =
> -Stateful            =
> -Pass-through        =
> -OOP SGL In SGL Out  =
> -OOP SGL In LB  Out  =
> -OOP LB  In SGL Out  =
> -Deflate             =
> -LZS                 =
> -Adler32             =
> -Crc32               =
> -Adler32&Crc32       =
> -Fixed               =
> -Dynamic             =
> +HW Accelerated         =
> +Stateful Decompression =
> +CPU SSE                =
> +CPU AVX                =
> +CPU AVX2               =
> +CPU AVX512             =
> +CPU NEON               =
> +Stateful               =
[Fiona] Pease move Stateful Decompression down after this and rename this to Stateful Compression
And check if any other driver ini files need corresponding update - though I don't expect so, as I don't think any stateful support there yet.

And as you're sending a v3, please add sections for v2 changes and v3 changes in the cover letter.


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression
  2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
                         ` (2 preceding siblings ...)
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests Adam Dybkowski
@ 2019-09-23 15:56       ` Trahe, Fiona
  2019-09-27 14:42         ` Akhil Goyal
  3 siblings, 1 reply; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-23 15:56 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, Trybula, ArturX, akhil.goyal; +Cc: Trahe, Fiona



> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 20, 2019 9:06 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Trybula, ArturX
> <arturx.trybula@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v3 0/3] compress/qat: add stateful decompression
> 
> This patchset adds the stateful decompression feature
> to the QAT PMD, together with the documentation updates
> and two new unit tests.
> ---
> v3:
> * Minor corrections in features list in the documentation.
> 
> v2:
> * Typo correction in the error message.
> 
> Adam Dybkowski (3):
>   common/qat: add QAT RAM bank definitions
>   compress/qat: add stateful decompression
>   test/compress: add stateful decompression tests
> 
>  app/test/test_compressdev.c                  | 449 ++++++++++++++++---
>  doc/guides/compressdevs/features/default.ini |  37 +-
>  doc/guides/compressdevs/features/qat.ini     |  21 +-
>  doc/guides/compressdevs/qat_comp.rst         |   5 +
>  doc/guides/rel_notes/release_19_11.rst       |   4 +
>  drivers/common/qat/qat_adf/icp_qat_fw_comp.h |  73 +++
>  drivers/compress/qat/qat_comp.c              | 256 ++++++++++-
>  drivers/compress/qat/qat_comp.h              |  32 ++
>  drivers/compress/qat/qat_comp_pmd.c          | 166 ++++++-
>  drivers/compress/qat/qat_comp_pmd.h          |   2 +
>  10 files changed, 929 insertions(+), 116 deletions(-)
> 
> --
> 2.17.1
Series-acked-by: Fiona Trahe <fiona.trahe@intel.com>


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-09-24 11:16         ` Trahe, Fiona
  0 siblings, 0 replies; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-24 11:16 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, Trybula, ArturX, akhil.goyal



> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 20, 2019 9:06 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Trybula, ArturX
> <arturx.trybula@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v3 1/3] common/qat: add QAT RAM bank definitions
> 
> This patch adds QAT RAM bank definitions and related macros.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression Adam Dybkowski
@ 2019-09-24 11:17         ` Trahe, Fiona
  0 siblings, 0 replies; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-24 11:17 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, Trybula, ArturX, akhil.goyal



> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 20, 2019 9:06 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Trybula, ArturX
> <arturx.trybula@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v3 2/3] compress/qat: add stateful decompression
> 
> This patch adds the stateful decompression feature
> to the DPDK QAT PMD.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests
  2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests Adam Dybkowski
@ 2019-09-24 11:18         ` Trahe, Fiona
  0 siblings, 0 replies; 22+ messages in thread
From: Trahe, Fiona @ 2019-09-24 11:18 UTC (permalink / raw)
  To: Dybkowski, AdamX, dev, Trybula, ArturX, akhil.goyal



> -----Original Message-----
> From: Dybkowski, AdamX
> Sent: Friday, September 20, 2019 9:06 PM
> To: dev@dpdk.org; Trahe, Fiona <fiona.trahe@intel.com>; Trybula, ArturX
> <arturx.trybula@intel.com>; akhil.goyal@nxp.com
> Cc: Dybkowski, AdamX <adamx.dybkowski@intel.com>
> Subject: [PATCH v3 3/3] test/compress: add stateful decompression tests
> 
> This patch adds two new tests that cover the stateful
> decompression feature.
> 
> Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Fiona Trahe <fiona.trahe@intel.com>


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression
  2019-09-23 15:56       ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Trahe, Fiona
@ 2019-09-27 14:42         ` Akhil Goyal
  0 siblings, 0 replies; 22+ messages in thread
From: Akhil Goyal @ 2019-09-27 14:42 UTC (permalink / raw)
  To: Trahe, Fiona, Dybkowski, AdamX, dev, Trybula, ArturX

> > This patchset adds the stateful decompression feature
> > to the QAT PMD, together with the documentation updates
> > and two new unit tests.
> > ---
> > v3:
> > * Minor corrections in features list in the documentation.
> >
> > v2:
> > * Typo correction in the error message.
> >
> > Adam Dybkowski (3):
> >   common/qat: add QAT RAM bank definitions
> >   compress/qat: add stateful decompression
> >   test/compress: add stateful decompression tests
> >
> >  app/test/test_compressdev.c                  | 449 ++++++++++++++++---
> >  doc/guides/compressdevs/features/default.ini |  37 +-
> >  doc/guides/compressdevs/features/qat.ini     |  21 +-
> >  doc/guides/compressdevs/qat_comp.rst         |   5 +
> >  doc/guides/rel_notes/release_19_11.rst       |   4 +
> >  drivers/common/qat/qat_adf/icp_qat_fw_comp.h |  73 +++
> >  drivers/compress/qat/qat_comp.c              | 256 ++++++++++-
> >  drivers/compress/qat/qat_comp.h              |  32 ++
> >  drivers/compress/qat/qat_comp_pmd.c          | 166 ++++++-
> >  drivers/compress/qat/qat_comp_pmd.h          |   2 +
> >  10 files changed, 929 insertions(+), 116 deletions(-)
> >
> > --
> > 2.17.1
> Series-acked-by: Fiona Trahe <fiona.trahe@intel.com>
Applied to dpdk-next-crypto

Thanks.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression
  2019-08-26  7:13 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
@ 2019-08-26  7:13 ` Adam Dybkowski
  0 siblings, 0 replies; 22+ messages in thread
From: Adam Dybkowski @ 2019-08-26  7:13 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, arturx.trybula; +Cc: Adam Dybkowski

This patch adds the stateful decompression feature
to the DPDK QAT PMD.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 drivers/compress/qat/qat_comp.c     | 256 +++++++++++++++++++++++++---
 drivers/compress/qat/qat_comp.h     |  32 ++++
 drivers/compress/qat/qat_comp_pmd.c | 166 ++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.h |   2 +
 4 files changed, 423 insertions(+), 33 deletions(-)

diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index 835aaa838..a80cd6864 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -27,22 +27,51 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 	struct rte_comp_op *op = in_op;
 	struct qat_comp_op_cookie *cookie =
 			(struct qat_comp_op_cookie *)op_cookie;
-	struct qat_comp_xform *qat_xform = op->private_xform;
-	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
+	const uint8_t *tmpl;
 	struct icp_qat_fw_comp_req *comp_req =
 	    (struct icp_qat_fw_comp_req *)out_msg;
 
-	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
-				"operation requests, op (%p) is not a "
-				"stateless operation.", op);
-		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
-		return -EINVAL;
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = op->stream;
+		qat_xform = &stream->qat_xform;
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
+			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			return -EINVAL;
+		}
+		if (unlikely(stream->op_in_progress)) {
+			QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateless operations on the same stream at once");
+			op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
+			return -EINVAL;
+		}
+		stream->op_in_progress = 1;
+	} else {
+		stream = NULL;
+		qat_xform = op->private_xform;
 	}
+	tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
 
 	rte_mov128(out_msg, tmpl);
 	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
 
+	if (op->op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comp_pars.req_par_flags =
+			ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+				(stream->start_of_packet) ?
+					ICP_QAT_FW_COMP_SOP
+				      : ICP_QAT_FW_COMP_NOT_SOP,
+				(op->flush_flag == RTE_COMP_FLUSH_FULL ||
+				 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
+					ICP_QAT_FW_COMP_EOP
+				      : ICP_QAT_FW_COMP_NOT_EOP,
+				ICP_QAT_FW_COMP_NOT_BFINAL,
+				ICP_QAT_FW_COMP_NO_CNV,
+				ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+	}
+
 	if (likely(qat_xform->qat_comp_request_type ==
 		    QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
 		if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
@@ -94,6 +123,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_src->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -111,6 +143,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -129,6 +164,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 					   " for %d elements of SGL",
 					   op->m_dst->nb_segs);
 				op->status = RTE_COMP_OP_STATUS_ERROR;
+				/* clear op-in-progress flag */
+				if (stream)
+					stream->op_in_progress = 0;
 				return -ENOMEM;
 			}
 			/* new SGL is valid now */
@@ -146,6 +184,9 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 		if (ret) {
 			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+			/* clear op-in-progress flag */
+			if (stream)
+				stream->op_in_progress = 0;
 			return ret;
 		}
 
@@ -202,12 +243,22 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 			(struct qat_comp_op_cookie *)op_cookie;
 	struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
 			(resp_msg->opaque_data);
-	struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
-				(rx_op->private_xform);
+	struct qat_comp_stream *stream;
+	struct qat_comp_xform *qat_xform;
 	int err = resp_msg->comn_resp.comn_status &
 			((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
 			 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
 
+	if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+		stream = rx_op->stream;
+		qat_xform = &stream->qat_xform;
+		/* clear op-in-progress flag */
+		stream->op_in_progress = 0;
+	} else {
+		stream = NULL;
+		qat_xform = rx_op->private_xform;
+	}
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 	QAT_DP_LOG(DEBUG, "Direction: %s",
 	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
@@ -254,7 +305,21 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		int8_t xlat_err_code =
 			(int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
 
-		if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
+		/* handle recoverable out-of-buffer condition in stateful */
+		/* decompression scenario */
+		if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
+				&& qat_xform->qat_comp_request_type
+					== QAT_COMP_REQUEST_DECOMPRESS
+				&& rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+			struct icp_qat_fw_resp_comp_pars *comp_resp =
+					&resp_msg->comp_resp_pars;
+			rx_op->status =
+				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+			rx_op->consumed = comp_resp->input_byte_counter;
+			rx_op->produced = comp_resp->output_byte_counter;
+			stream->start_of_packet = 0;
+		} else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
+			  && !xlat_err_code)
 				||
 		    (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
 				||
@@ -275,6 +340,8 @@ qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
 		rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
 		rx_op->consumed = comp_resp->input_byte_counter;
 		rx_op->produced = comp_resp->output_byte_counter;
+		if (stream)
+			stream->start_of_packet = 0;
 
 		if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
 			if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
@@ -297,6 +364,12 @@ qat_comp_xform_size(void)
 	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
 }
 
+unsigned int
+qat_comp_stream_size(void)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
+}
+
 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 				    enum qat_comp_request_type request)
 {
@@ -317,7 +390,9 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
 
 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 			const struct rte_memzone *interm_buff_mz,
-			const struct rte_comp_xform *xform)
+			const struct rte_comp_xform *xform,
+			const struct qat_comp_stream *stream,
+			enum rte_comp_op_type op_type)
 {
 	struct icp_qat_fw_comp_req *comp_req;
 	int comp_level, algo;
@@ -329,6 +404,18 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 		return -EINVAL;
 	}
 
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		if (unlikely(stream == NULL)) {
+			QAT_LOG(ERR, "Stream must be non null for stateful op");
+			return -EINVAL;
+		}
+		if (unlikely(qat_xform->qat_comp_request_type !=
+			     QAT_COMP_REQUEST_DECOMPRESS)) {
+			QAT_LOG(ERR, "QAT PMD does not support stateful compression");
+			return -ENOTSUP;
+		}
+	}
+
 	if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
 		direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
 		comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
@@ -376,12 +463,43 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
 	qat_comp_create_req_hdr(&comp_req->comn_hdr,
 					qat_xform->qat_comp_request_type);
 
-	comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
-	    ICP_QAT_FW_COMP_STATELESS_SESSION,
-	    ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-	    ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	if (op_type == RTE_COMP_OP_STATEFUL) {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATEFUL_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+		/* Decompression state registers */
+		comp_req->comp_cd_ctrl.comp_state_addr =
+				stream->state_registers_decomp_phys;
+
+		/* Enable A, B, C, D, and E (CAMs). */
+		comp_req->comp_cd_ctrl.ram_bank_flags =
+			ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
+				ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
+				ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
+				ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
+
+		comp_req->comp_cd_ctrl.ram_banks_addr =
+				stream->inflate_context_phys;
+	} else {
+		comp_req->comn_hdr.serv_specif_flags =
+				ICP_QAT_FW_COMP_FLAGS_BUILD(
+			ICP_QAT_FW_COMP_STATELESS_SESSION,
+			ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+			ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+	}
 
 	comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
 	    ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
@@ -497,7 +615,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
 		qat_xform->checksum_type = xform->decompress.chksum;
 	}
 
-	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+	if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
+				      NULL, RTE_COMP_OP_STATELESS)) {
 		QAT_LOG(ERR, "QAT: Problem with setting compression");
 		return -EINVAL;
 	}
@@ -532,3 +651,102 @@ qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
 	}
 	return -EINVAL;
 }
+
+/**
+ * Reset stream state for the next use.
+ *
+ * @param stream
+ *   handle of pmd's private stream data
+ */
+static void
+qat_comp_stream_reset(struct qat_comp_stream *stream)
+{
+	if (stream) {
+		memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
+		stream->start_of_packet = 1;
+		stream->op_in_progress = 0;
+	}
+}
+
+/**
+ * Create driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param xform
+ *   xform data
+ * @param stream
+ *   ptr where handle of pmd's private stream data should be stored
+ * @return
+ *  - Returns 0 if private stream structure has been created successfully.
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -ENOTSUP if comp device does not support the comp transform.
+ *  - Returns -ENOMEM if the private stream could not be allocated.
+ */
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream)
+{
+	struct qat_comp_dev_private *qat = dev->data->dev_private;
+	struct qat_comp_stream *ptr;
+
+	if (unlikely(stream == NULL)) {
+		QAT_LOG(ERR, "QAT: stream parameter is NULL");
+		return -EINVAL;
+	}
+	if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
+		QAT_LOG(ERR, "QAT: stateful compression not supported");
+		return -ENOTSUP;
+	}
+	if (unlikely(qat->streampool == NULL)) {
+		QAT_LOG(ERR, "QAT device has no stream mempool");
+		return -ENOMEM;
+	}
+	if (rte_mempool_get(qat->streampool, stream)) {
+		QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
+		return -ENOMEM;
+	}
+
+	ptr = (struct qat_comp_stream *) *stream;
+	qat_comp_stream_reset(ptr);
+	ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+	ptr->qat_xform.checksum_type = xform->decompress.chksum;
+
+	if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
+				      xform, ptr, RTE_COMP_OP_STATEFUL)) {
+		QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
+		rte_mempool_put(qat->streampool, *stream);
+		*stream = NULL;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * Free driver private stream data.
+ *
+ * @param dev
+ *   Compressdev device
+ * @param stream
+ *   handle of pmd's private stream data
+ * @return
+ *  - 0 if successful
+ *  - <0 in error cases
+ *  - Returns -EINVAL if input parameters are invalid.
+ *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ *  - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
+{
+	if (stream) {
+		struct qat_comp_dev_private *qat = dev->data->dev_private;
+		qat_comp_stream_reset((struct qat_comp_stream *) stream);
+		rte_mempool_put(qat->streampool, stream);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 61d12ecf4..2231451a1 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -26,6 +26,16 @@
 
 #define QAT_MIN_OUT_BUF_SIZE 46
 
+/* maximum size of the state registers */
+#define QAT_STATE_REGISTERS_MAX_SIZE 64
+
+/* decompressor context size */
+#define QAT_INFLATE_CONTEXT_SIZE_GEN1 36864
+#define QAT_INFLATE_CONTEXT_SIZE_GEN2 34032
+#define QAT_INFLATE_CONTEXT_SIZE_GEN3 34032
+#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(QAT_INFLATE_CONTEXT_SIZE_GEN1,\
+		QAT_INFLATE_CONTEXT_SIZE_GEN2), QAT_INFLATE_CONTEXT_SIZE_GEN3)
+
 enum qat_comp_request_type {
 	QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
 	QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
@@ -61,6 +71,17 @@ struct qat_comp_xform {
 	enum rte_comp_checksum_type checksum_type;
 };
 
+struct qat_comp_stream {
+	struct qat_comp_xform qat_xform;
+	void *state_registers_decomp;
+	phys_addr_t state_registers_decomp_phys;
+	void *inflate_context;
+	phys_addr_t inflate_context_phys;
+	const struct rte_memzone *memzone;
+	uint8_t start_of_packet;
+	volatile uint8_t op_in_progress;
+};
+
 int
 qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		       enum qat_device_gen qat_dev_gen __rte_unused);
@@ -80,5 +101,16 @@ qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
 unsigned int
 qat_comp_xform_size(void);
 
+unsigned int
+qat_comp_stream_size(void);
+
+int
+qat_comp_stream_create(struct rte_compressdev *dev,
+		       const struct rte_comp_xform *xform,
+		       void **stream);
+
+int
+qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+
 #endif
 #endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 072647217..05b7dfe77 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -9,6 +9,12 @@
 
 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
 
+struct stream_create_info {
+	struct qat_comp_dev_private *comp_dev;
+	int socket_id;
+	int error;
+};
+
 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 	{/* COMPRESSION - deflate */
 	 .algo = RTE_COMP_ALGO_DEFLATE,
@@ -21,7 +27,8 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
 				RTE_COMP_FF_HUFFMAN_DYNAMIC |
 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
 				RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
-				RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+				RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+				RTE_COMP_FF_STATEFUL_DECOMPRESSION,
 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
 	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
 
@@ -315,6 +322,120 @@ qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
 	return mp;
 }
 
+static void
+qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
+		     void *obj, unsigned int obj_idx)
+{
+	struct stream_create_info *info = opaque;
+	struct qat_comp_stream *stream = obj;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *memzone;
+	struct qat_inter_sgl *ram_banks_desc;
+
+	/* find a memzone for RAM banks */
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
+		 info->comp_dev->qat_dev->name, obj_idx);
+	memzone = rte_memzone_lookup(mz_name);
+	if (memzone == NULL) {
+		/* allocate a memzone for compression state and RAM banks */
+		memzone = rte_memzone_reserve_aligned(mz_name,
+			QAT_STATE_REGISTERS_MAX_SIZE
+				+ sizeof(struct qat_inter_sgl)
+				+ QAT_INFLATE_CONTEXT_SIZE,
+			info->socket_id,
+			RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
+		if (memzone == NULL) {
+			QAT_LOG(ERR,
+			    "Can't allocate RAM banks for device %s, object %u",
+				info->comp_dev->qat_dev->name, obj_idx);
+			info->error = -ENOMEM;
+			return;
+		}
+	}
+
+	/* prepare the buffer list descriptor for RAM banks */
+	ram_banks_desc = (struct qat_inter_sgl *)
+		(((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
+	ram_banks_desc->num_bufs = 1;
+	ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
+	ram_banks_desc->buffers[0].addr = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE
+			+ sizeof(struct qat_inter_sgl);
+
+	memset(stream, 0, qat_comp_stream_size());
+	stream->memzone = memzone;
+	stream->state_registers_decomp = memzone->addr;
+	stream->state_registers_decomp_phys = memzone->iova;
+	stream->inflate_context = ((uint8_t *) memzone->addr)
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+	stream->inflate_context_phys = memzone->iova
+			+ QAT_STATE_REGISTERS_MAX_SIZE;
+}
+
+static void
+qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
+			void *opaque __rte_unused, void *obj,
+			unsigned obj_idx __rte_unused)
+{
+	struct qat_comp_stream *stream = obj;
+
+	rte_memzone_free(stream->memzone);
+}
+
+static struct rte_mempool *
+qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+			    int socket_id,
+			    uint32_t num_elements)
+{
+	char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+
+	snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
+		 "%s_streams", comp_dev->qat_dev->name);
+
+	QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
+	mp = rte_mempool_lookup(stream_pool_name);
+
+	if (mp != NULL) {
+		QAT_LOG(DEBUG, "streampool already created");
+		if (mp->size != num_elements) {
+			QAT_LOG(DEBUG, "streampool wrong size - delete it");
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			rte_mempool_free(mp);
+			mp = NULL;
+			comp_dev->streampool = NULL;
+		}
+	}
+
+	if (mp == NULL) {
+		struct stream_create_info info = {
+			.comp_dev = comp_dev,
+			.socket_id = socket_id,
+			.error = 0
+		};
+		mp = rte_mempool_create(stream_pool_name,
+				num_elements,
+				qat_comp_stream_size(), 0, 0,
+				NULL, NULL, qat_comp_stream_init, &info,
+				socket_id, 0);
+		if (mp == NULL) {
+			QAT_LOG(ERR,
+			     "Err creating mempool %s w %d elements of size %d",
+			     stream_pool_name, num_elements,
+			     qat_comp_stream_size());
+		} else if (info.error) {
+			rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+			QAT_LOG(ERR,
+			     "Destoying mempool %s as at least one element failed initialisation",
+			     stream_pool_name);
+			rte_mempool_free(mp);
+			mp = NULL;
+		}
+	}
+
+	return mp;
+}
+
 static void
 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 {
@@ -330,6 +451,14 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
 		rte_mempool_free(comp_dev->xformpool);
 		comp_dev->xformpool = NULL;
 	}
+
+	/* Free stream pool */
+	if (comp_dev->streampool) {
+		rte_mempool_obj_iter(comp_dev->streampool,
+				     qat_comp_stream_destroy, NULL);
+		rte_mempool_free(comp_dev->streampool);
+		comp_dev->streampool = NULL;
+	}
 }
 
 static int
@@ -339,12 +468,6 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
 	int ret = 0;
 
-	if (config->max_nb_streams != 0) {
-		QAT_LOG(ERR,
-	"QAT device does not support STATEFUL so max_nb_streams must be 0");
-		return -EINVAL;
-	}
-
 	if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
 		QAT_LOG(WARNING,
 			"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
@@ -360,13 +483,26 @@ qat_comp_dev_config(struct rte_compressdev *dev,
 		}
 	}
 
-	comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
-					config->max_nb_priv_xforms);
-	if (comp_dev->xformpool == NULL) {
+	if (config->max_nb_priv_xforms) {
+		comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+					    config, config->max_nb_priv_xforms);
+		if (comp_dev->xformpool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->xformpool = NULL;
+
+	if (config->max_nb_streams) {
+		comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
+				     config->socket_id, config->max_nb_streams);
+		if (comp_dev->streampool == NULL) {
+			ret = -ENOMEM;
+			goto error_out;
+		}
+	} else
+		comp_dev->streampool = NULL;
 
-		ret = -ENOMEM;
-		goto error_out;
-	}
 	return 0;
 
 error_out:
@@ -508,7 +644,9 @@ static struct rte_compressdev_ops compress_qat_ops = {
 
 	/* Compression related operations */
 	.private_xform_create	= qat_comp_private_xform_create,
-	.private_xform_free	= qat_comp_private_xform_free
+	.private_xform_free	= qat_comp_private_xform_free,
+	.stream_create		= qat_comp_stream_create,
+	.stream_free		= qat_comp_stream_free
 };
 
 /* An rte_driver is needed in the registration of the device with compressdev.
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
index b8299d43a..6979de14d 100644
--- a/drivers/compress/qat/qat_comp_pmd.h
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -30,6 +30,8 @@ struct qat_comp_dev_private {
 	/**< The device's memory for intermediate buffers */
 	struct rte_mempool *xformpool;
 	/**< The device's pool for qat_comp_xforms */
+	struct rte_mempool *streampool;
+	/**< The device's pool for qat_comp_streams */
 };
 
 int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2019-09-27 14:42 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-26  7:44 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
2019-08-26  7:44 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski
2019-09-20 10:09   ` Trahe, Fiona
2019-08-26  7:45 ` [dpdk-dev] [PATCH 3/4] test/compress: add stateful decompression tests Adam Dybkowski
2019-08-26  7:45 ` [dpdk-dev] [PATCH 4/4] doc/guides: add stateful feature in QAT Adam Dybkowski
2019-09-19 13:34   ` Akhil Goyal
2019-09-19 13:38     ` Akhil Goyal
2019-09-20 12:44 ` [dpdk-dev] [PATCH v2 0/3] compress/qat: add stateful decompression Adam Dybkowski
2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
2019-09-20 20:06     ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Adam Dybkowski
2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 1/3] common/qat: add QAT RAM bank definitions Adam Dybkowski
2019-09-24 11:16         ` Trahe, Fiona
2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 2/3] compress/qat: add stateful decompression Adam Dybkowski
2019-09-24 11:17         ` Trahe, Fiona
2019-09-20 20:06       ` [dpdk-dev] [PATCH v3 3/3] test/compress: add stateful decompression tests Adam Dybkowski
2019-09-24 11:18         ` Trahe, Fiona
2019-09-23 15:56       ` [dpdk-dev] [PATCH v3 0/3] compress/qat: add stateful decompression Trahe, Fiona
2019-09-27 14:42         ` Akhil Goyal
2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 2/3] " Adam Dybkowski
2019-09-23  9:46     ` Trahe, Fiona
2019-09-20 12:44   ` [dpdk-dev] [PATCH v2 3/3] test/compress: add stateful decompression tests Adam Dybkowski
  -- strict thread matches above, loose matches on Subject: below --
2019-08-26  7:13 [dpdk-dev] [PATCH 1/4] common/qat: add QAT RAM bank definitions Adam Dybkowski
2019-08-26  7:13 ` [dpdk-dev] [PATCH 2/4] compress/qat: add stateful decompression Adam Dybkowski

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.