All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: declan.doherty@intel.com
Cc: dev@dpdk.org, Pablo de Lara <pablo.de.lara.guarch@intel.com>
Subject: [PATCH 3/3] app/crypto-perf: add range/list of sizes
Date: Fri,  3 Mar 2017 16:13:12 +0000	[thread overview]
Message-ID: <1488557592-46193-4-git-send-email-pablo.de.lara.guarch@intel.com> (raw)
In-Reply-To: <1488557592-46193-1-git-send-email-pablo.de.lara.guarch@intel.com>

So far, the crypto performance application was only able to
test one buffer size and one burst size.

With this commit, multiple sizes can be passed, either as a range
of values or as a list of values.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 app/test-crypto-perf/cperf_ops.c                 |  20 +-
 app/test-crypto-perf/cperf_options.h             |  29 +-
 app/test-crypto-perf/cperf_options_parsing.c     | 234 +++++++++++--
 app/test-crypto-perf/cperf_test_latency.c        | 400 +++++++++++------------
 app/test-crypto-perf/cperf_test_throughput.c     | 340 ++++++++++---------
 app/test-crypto-perf/cperf_test_vector_parsing.c |  12 +-
 app/test-crypto-perf/cperf_test_vectors.c        |   8 +-
 app/test-crypto-perf/cperf_test_verify.c         | 115 +++----
 app/test-crypto-perf/cperf_verify_parser.c       |   4 +-
 app/test-crypto-perf/main.c                      |  50 ++-
 doc/guides/tools/cryptoperf.rst                  |  15 +
 11 files changed, 707 insertions(+), 520 deletions(-)

diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index b8c0398..e25e011 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -53,7 +53,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* cipher parameters */
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 	}
 
@@ -78,7 +78,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* auth parameters */
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -107,7 +107,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 	}
 
@@ -139,7 +139,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		} else {
 
-			uint32_t offset = options->buffer_sz;
+			uint32_t offset = options->test_buffer_size;
 			struct rte_mbuf *buf, *tbuf;
 
 			if (options->out_of_place) {
@@ -166,7 +166,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -195,7 +195,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 
 		/* authentication parameters */
@@ -206,7 +206,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		} else {
 
-			uint32_t offset = options->buffer_sz;
+			uint32_t offset = options->test_buffer_size;
 			struct rte_mbuf *buf, *tbuf;
 
 			if (options->out_of_place) {
@@ -232,7 +232,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 			sym_op->auth.aad.length = options->auth_aad_sz;
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -261,7 +261,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset =
 				RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
 
@@ -302,7 +302,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = options->auth_aad_sz;
 	}
 
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 823059d..fc34c48 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -32,6 +32,7 @@
 #define CPERF_AUTH_AAD_SZ	("auth-aad-sz")
 #define CPERF_CSV		("csv-friendly")
 
+#define MAX_LIST 32
 
 enum cperf_perf_test_type {
 	CPERF_TEST_TYPE_THROUGHPUT,
@@ -58,21 +59,14 @@ struct cperf_options {
 
 	uint32_t pool_sz;
 	uint32_t total_ops;
-	uint32_t burst_sz;
-	uint32_t buffer_sz;
 	uint32_t segments_nb;
-
-	char device_type[RTE_CRYPTODEV_NAME_LEN];
-	enum cperf_op_type op_type;
+	uint32_t test_buffer_size;
 
 	uint32_t sessionless:1;
 	uint32_t out_of_place:1;
 	uint32_t silent:1;
 	uint32_t csv:1;
 
-	char *test_file;
-	char *test_name;
-
 	enum rte_crypto_cipher_algorithm cipher_algo;
 	enum rte_crypto_cipher_operation cipher_op;
 
@@ -85,6 +79,25 @@ struct cperf_options {
 	uint16_t auth_key_sz;
 	uint16_t auth_digest_sz;
 	uint16_t auth_aad_sz;
+
+	char device_type[RTE_CRYPTODEV_NAME_LEN];
+	enum cperf_op_type op_type;
+
+	char *test_file;
+	char *test_name;
+
+	uint32_t buffer_size_list[MAX_LIST];
+	uint8_t buffer_size_count;
+	uint32_t max_buffer_size;
+	uint32_t min_buffer_size;
+	uint32_t inc_buffer_size;
+
+	uint32_t burst_size_list[MAX_LIST];
+	uint8_t burst_size_count;
+	uint32_t max_burst_size;
+	uint32_t min_burst_size;
+	uint32_t inc_burst_size;
+
 };
 
 void
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index d89c239..3a3259f 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -38,6 +38,9 @@
 
 #include "cperf_options.h"
 
+#define AES_BLOCK_SIZE 16
+#define DES_BLOCK_SIZE 8
+
 struct name_id_map {
 	const char *name;
 	uint32_t id;
@@ -124,6 +127,132 @@ parse_uint16_t(uint16_t *value, const char *arg)
 }
 
 static int
+parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
+{
+	char *token;
+	uint32_t number;
+
+	char *copy_arg = strdup(arg);
+
+	if (copy_arg == NULL)
+		return -1;
+
+	token = strtok(copy_arg, ":");
+
+	/* Parse minimum value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_range;
+
+		*min = number;
+	} else
+		goto err_range;
+
+	token = strtok(NULL, ":");
+
+	/* Parse increment value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_range;
+
+		*inc = number;
+	} else
+		goto err_range;
+
+	token = strtok(NULL, ":");
+
+	/* Parse maximum value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0 ||
+				number < *min)
+			goto err_range;
+
+		*max = number;
+	} else
+		goto err_range;
+
+	if (strtok(NULL, ":") != NULL)
+		goto err_range;
+
+	free(copy_arg);
+	return 0;
+
+err_range:
+	free(copy_arg);
+	return -1;
+}
+
+static int
+parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
+{
+	char *token;
+	uint32_t number;
+	uint8_t count = 0;
+
+	char *copy_arg = strdup(arg);
+
+	if (copy_arg == NULL)
+		return -1;
+
+	token = strtok(copy_arg, ",");
+
+	/* Parse first value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_list;
+
+		list[count++] = number;
+		*min = number;
+		*max = number;
+	} else
+		goto err_list;
+
+	token = strtok(NULL, ",");
+
+	while (token != NULL) {
+		if (count == MAX_LIST) {
+			RTE_LOG(WARNING, USER1, "Using only the first %u sizes\n",
+					MAX_LIST);
+			break;
+		}
+
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_list;
+
+		list[count++] = number;
+
+		if (number < *min)
+			*min = number;
+		if (number > *max)
+			*max = number;
+
+		token = strtok(NULL, ",");
+	}
+
+	free(copy_arg);
+	return count;
+
+err_list:
+	free(copy_arg);
+	return -1;
+}
+
+static int
 parse_total_ops(struct cperf_options *opts, const char *arg)
 {
 	int ret = parse_uint32_t(&opts->total_ops, arg);
@@ -153,32 +282,43 @@ parse_pool_sz(struct cperf_options *opts, const char *arg)
 static int
 parse_burst_sz(struct cperf_options *opts, const char *arg)
 {
-	int ret = parse_uint32_t(&opts->burst_sz, arg);
+	int ret;
+
+	/* Try parsing the argument as a range, if it fails, parse it as a list */
+	if (parse_range(arg, &opts->min_burst_size, &opts->max_burst_size,
+			&opts->inc_burst_size) < 0) {
+		ret = parse_list(arg, opts->burst_size_list,
+					&opts->min_burst_size,
+					&opts->max_burst_size);
+		if (ret < 0) {
+			RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+			return -1;
+		}
+		opts->burst_size_count = ret;
+	}
 
-	if (ret)
-		RTE_LOG(ERR, USER1, "failed to parse burst size");
-	return ret;
+	return 0;
 }
 
 static int
 parse_buffer_sz(struct cperf_options *opts, const char *arg)
 {
-	uint32_t i, valid_buf_sz[] = {
-			32, 64, 128, 256, 384, 512, 768, 1024, 1280, 1536, 1792,
-			2048
-	};
-
-	if (parse_uint32_t(&opts->buffer_sz, arg)) {
-		RTE_LOG(ERR, USER1, "failed to parse buffer size");
-		return -1;
+	int ret;
+
+	/* Try parsing the argument as a range, if it fails, parse it as a list */
+	if (parse_range(arg, &opts->min_buffer_size, &opts->max_buffer_size,
+			&opts->inc_buffer_size) < 0) {
+		ret = parse_list(arg, opts->buffer_size_list,
+					&opts->min_buffer_size,
+					&opts->max_buffer_size);
+		if (ret < 0) {
+			RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+			return -1;
+		}
+		opts->buffer_size_count = ret;
 	}
 
-	for (i = 0; i < RTE_DIM(valid_buf_sz); i++)
-		if (valid_buf_sz[i] == opts->buffer_sz)
-			return 0;
-
-	RTE_LOG(ERR, USER1, "invalid buffer size specified");
-	return -1;
+	return 0;
 }
 
 static int
@@ -474,8 +614,19 @@ cperf_options_default(struct cperf_options *opts)
 
 	opts->pool_sz = 8192;
 	opts->total_ops = 10000000;
-	opts->burst_sz = 32;
-	opts->buffer_sz = 64;
+
+	opts->buffer_size_list[0] = 64;
+	opts->buffer_size_count = 1;
+	opts->max_buffer_size = 64;
+	opts->min_buffer_size = 64;
+	opts->inc_buffer_size = 0;
+
+	opts->burst_size_list[0] = 32;
+	opts->burst_size_count = 1;
+	opts->max_burst_size = 32;
+	opts->min_burst_size = 32;
+	opts->inc_burst_size = 0;
+
 	opts->segments_nb = 1;
 
 	strncpy(opts->device_type, "crypto_aesni_mb",
@@ -569,7 +720,9 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
 int
 cperf_options_check(struct cperf_options *options)
 {
-	if (options->segments_nb > options->buffer_sz) {
+	uint32_t buffer_size;
+
+	if (options->segments_nb > options->min_buffer_size) {
 		RTE_LOG(ERR, USER1,
 				"Segments number greater than buffer size.\n");
 		return -EINVAL;
@@ -632,6 +785,33 @@ cperf_options_check(struct cperf_options *options)
 		}
 	}
 
+	if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+			options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
+		for (buffer_size = options->min_buffer_size;
+				buffer_size < options->max_buffer_size;
+				buffer_size += options->inc_buffer_size) {
+			if ((buffer_size % AES_BLOCK_SIZE) != 0) {
+				RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+					"not suitable for the algorithm selected\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
+			options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
+			options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+		for (buffer_size = options->min_buffer_size;
+				buffer_size < options->max_buffer_size;
+				buffer_size += options->inc_buffer_size) {
+			if ((buffer_size % DES_BLOCK_SIZE) != 0) {
+				RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+					"not suitable for the algorithm selected\n");
+				return -EINVAL;
+			}
+		}
+	}
+
 	return 0;
 }
 
@@ -644,9 +824,15 @@ cperf_options_dump(struct cperf_options *opts)
 	printf("#\n");
 	printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
 	printf("# total number of ops: %u\n", opts->total_ops);
-	printf("# burst size: %u\n", opts->burst_sz);
-	printf("# buffer size: %u\n", opts->buffer_sz);
-	printf("# segments per buffer: %u\n", opts->segments_nb);
+	printf("# buffer size:\n");
+	printf("#\t min: %u\n", opts->min_buffer_size);
+	printf("#\t max: %u\n", opts->max_buffer_size);
+	printf("#\t inc: %u\n", opts->inc_buffer_size);
+	printf("# burst sizes:\n");
+	printf("#\t min: %u\n", opts->min_burst_size);
+	printf("#\t max: %u\n", opts->max_burst_size);
+	printf("#\t inc: %u\n", opts->inc_burst_size);
+	printf("\n# segments per buffer: %u\n", opts->segments_nb);
 	printf("#\n");
 	printf("# cryptodev type: %s\n", opts->device_type);
 	printf("#\n");
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 25f7749..84d03cd 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -39,26 +39,6 @@
 #include "cperf_ops.h"
 
 
-struct cperf_latency_results {
-
-	uint64_t ops_failed;
-
-	uint64_t enqd_tot;
-	uint64_t enqd_max;
-	uint64_t enqd_min;
-
-	uint64_t deqd_tot;
-	uint64_t deqd_max;
-	uint64_t deqd_min;
-
-	uint64_t cycles_tot;
-	uint64_t cycles_max;
-	uint64_t cycles_min;
-
-	uint64_t burst_num;
-	uint64_t num;
-};
-
 struct cperf_op_result {
 	uint64_t tsc_start;
 	uint64_t tsc_end;
@@ -84,7 +64,6 @@ struct cperf_latency_ctx {
 	const struct cperf_options *options;
 	const struct cperf_test_vector *test_vector;
 	struct cperf_op_result *res;
-	struct cperf_latency_results results;
 };
 
 #define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
@@ -136,8 +115,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -239,8 +218,8 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -248,9 +227,6 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 		goto err;
 
 	/* Generate mbufs_in with plaintext populated for test */
-	if (ctx->options->pool_sz % ctx->options->burst_sz)
-		goto err;
-
 	ctx->mbufs_in = rte_malloc(NULL,
 			(sizeof(struct rte_mbuf *) *
 			ctx->options->pool_sz), 0);
@@ -273,7 +249,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -324,20 +300,17 @@ cperf_latency_test_runner(void *arg)
 {
 	struct cperf_latency_ctx *ctx = arg;
 	struct cperf_op_result *pres;
+	uint32_t i;
+	uint16_t test_burst_size;
+	uint8_t burst_size_idx = 0;
+
+	static int only_once;
 
 	if (ctx == NULL)
 		return 0;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
-	uint64_t ops_enqd = 0, ops_deqd = 0;
-	uint16_t ops_unused = 0;
-	uint64_t m_idx = 0, b_idx = 0, i;
-
-	uint64_t tsc_val, tsc_end, tsc_start;
-	uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
-	uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
-	uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 
 	uint32_t lcore = rte_lcore_id();
 
@@ -360,143 +333,210 @@ cperf_latency_test_runner(void *arg)
 	for (i = 0; i < ctx->options->total_ops; i++)
 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-	while (enqd_tot < ctx->options->total_ops) {
+	/* Get first size from range or list */
+	if (ctx->options->inc_burst_size != 0)
+		test_burst_size = ctx->options->min_burst_size;
+	else
+		test_burst_size = ctx->options->burst_size_list[0];
+
+	while (test_burst_size <= ctx->options->max_burst_size) {
+		uint64_t ops_enqd = 0, ops_deqd = 0;
+		uint16_t ops_unused = 0;
+		uint64_t m_idx = 0, b_idx = 0, i;
+
+		uint64_t tsc_val, tsc_end, tsc_start;
+		uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
+		uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
+		uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+
+		while (enqd_tot < ctx->options->total_ops) {
+
+			uint16_t burst_size = ((enqd_tot + test_burst_size)
+					<= ctx->options->total_ops) ?
+							test_burst_size :
+							ctx->options->total_ops -
+							enqd_tot;
+			uint16_t ops_needed = burst_size - ops_unused;
+
+			/* Allocate crypto ops from pool */
+			if (ops_needed != rte_crypto_op_bulk_alloc(
+					ctx->crypto_op_pool,
+					RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+					ops, ops_needed))
+				return -1;
+
+			/* Setup crypto op, attach mbuf etc */
+			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+					&ctx->mbufs_out[m_idx],
+					ops_needed, ctx->sess, ctx->options,
+					ctx->test_vector);
+
+			tsc_start = rte_rdtsc_precise();
 
-		uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
-				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
-						ctx->options->total_ops -
-						enqd_tot;
-		uint16_t ops_needed = burst_size - ops_unused;
+#ifdef CPERF_LINEARIZATION_ENABLE
+			if (linearize) {
+				/* PMD doesn't support scatter-gather and source buffer
+				 * is segmented.
+				 * We need to linearize it before enqueuing.
+				 */
+				for (i = 0; i < burst_size; i++)
+					rte_pktmbuf_linearize(ops[i]->sym->m_src);
+			}
+#endif /* CPERF_LINEARIZATION_ENABLE */
 
-		/* Allocate crypto ops from pool */
-		if (ops_needed != rte_crypto_op_bulk_alloc(
-				ctx->crypto_op_pool,
-				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-				ops, ops_needed))
-			return -1;
+			/* Enqueue burst of ops on crypto device */
+			ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+					ops, burst_size);
 
-		/* Setup crypto op, attach mbuf etc */
-		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
-				&ctx->mbufs_out[m_idx],
-				ops_needed, ctx->sess, ctx->options,
-				ctx->test_vector);
+			/* Dequeue processed burst of ops from crypto device */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
 
-		tsc_start = rte_rdtsc_precise();
+			tsc_end = rte_rdtsc_precise();
 
-#ifdef CPERF_LINEARIZATION_ENABLE
-		if (linearize) {
-			/* PMD doesn't support scatter-gather and source buffer
-			 * is segmented.
-			 * We need to linearize it before enqueuing.
+			for (i = 0; i < ops_needed; i++) {
+				ctx->res[tsc_idx].tsc_start = tsc_start;
+				ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+				tsc_idx++;
+			}
+
+			/*
+			 * Calculate number of ops not enqueued (mainly for hw
+			 * accelerators whose ingress queue can fill up).
 			 */
-			for (i = 0; i < burst_size; i++)
-				rte_pktmbuf_linearize(ops[i]->sym->m_src);
-		}
-#endif /* CPERF_LINEARIZATION_ENABLE */
+			ops_unused = burst_size - ops_enqd;
+
+			if (likely(ops_deqd))  {
+				/*
+				 * free crypto ops so they can be reused. We don't free
+				 * the mbufs here as we don't want to reuse them as
+				 * the crypto operation will change the data and cause
+				 * failures.
+				 */
+				for (i = 0; i < ops_deqd; i++) {
+					pres = (struct cperf_op_result *)
+							(ops_processed[i]->opaque_data);
+					pres->status = ops_processed[i]->status;
+					pres->tsc_end = tsc_end;
+
+					rte_crypto_op_free(ops_processed[i]);
+				}
+
+				deqd_tot += ops_deqd;
+				deqd_max = max(ops_deqd, deqd_max);
+				deqd_min = min(ops_deqd, deqd_min);
+			}
 
-		/* Enqueue burst of ops on crypto device */
-		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
-				ops, burst_size);
+			enqd_tot += ops_enqd;
+			enqd_max = max(ops_enqd, enqd_max);
+			enqd_min = min(ops_enqd, enqd_min);
 
-		/* Dequeue processed burst of ops from crypto device */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+			m_idx += ops_needed;
+			m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+					0 : m_idx;
+			b_idx++;
+		}
 
-		tsc_end = rte_rdtsc_precise();
+		/* Dequeue any operations still in the crypto device */
+		while (deqd_tot < ctx->options->total_ops) {
+			/* Sending 0 length burst to flush sw crypto device */
+			rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-		for (i = 0; i < ops_needed; i++) {
-			ctx->res[tsc_idx].tsc_start = tsc_start;
-			ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
-			tsc_idx++;
-		}
+			/* dequeue burst */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
 
-		/*
-		 * Calculate number of ops not enqueued (mainly for hw
-		 * accelerators whose ingress queue can fill up).
-		 */
-		ops_unused = burst_size - ops_enqd;
+			tsc_end = rte_rdtsc_precise();
 
-		if (likely(ops_deqd))  {
-			/*
-			 * free crypto ops so they can be reused. We don't free
-			 * the mbufs here as we don't want to reuse them as
-			 * the crypto operation will change the data and cause
-			 * failures.
-			 */
-			for (i = 0; i < ops_deqd; i++) {
-				pres = (struct cperf_op_result *)
-						(ops_processed[i]->opaque_data);
-				pres->status = ops_processed[i]->status;
-				pres->tsc_end = tsc_end;
+			if (ops_deqd != 0) {
+				for (i = 0; i < ops_deqd; i++) {
+					pres = (struct cperf_op_result *)
+							(ops_processed[i]->opaque_data);
+					pres->status = ops_processed[i]->status;
+					pres->tsc_end = tsc_end;
+
+					rte_crypto_op_free(ops_processed[i]);
+				}
 
-				rte_crypto_op_free(ops_processed[i]);
+				deqd_tot += ops_deqd;
+				deqd_max = max(ops_deqd, deqd_max);
+				deqd_min = min(ops_deqd, deqd_min);
 			}
+		}
 
-			deqd_tot += ops_deqd;
-			deqd_max = max(ops_deqd, deqd_max);
-			deqd_min = min(ops_deqd, deqd_min);
+		for (i = 0; i < tsc_idx; i++) {
+			tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
+			tsc_max = max(tsc_val, tsc_max);
+			tsc_min = min(tsc_val, tsc_min);
+			tsc_tot += tsc_val;
 		}
 
-		enqd_tot += ops_enqd;
-		enqd_max = max(ops_enqd, enqd_max);
-		enqd_min = min(ops_enqd, enqd_min);
+		double time_tot, time_avg, time_max, time_min;
 
-		m_idx += ops_needed;
-		m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
-				0 : m_idx;
-		b_idx++;
-	}
+		const uint64_t tunit = 1000000; /* us */
+		const uint64_t tsc_hz = rte_get_tsc_hz();
 
-	/* Dequeue any operations still in the crypto device */
-	while (deqd_tot < ctx->options->total_ops) {
-		/* Sending 0 length burst to flush sw crypto device */
-		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+		uint64_t enqd_avg = enqd_tot / b_idx;
+		uint64_t deqd_avg = deqd_tot / b_idx;
+		uint64_t tsc_avg = tsc_tot / tsc_idx;
 
-		/* dequeue burst */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+		time_tot = tunit*(double)(tsc_tot) / tsc_hz;
+		time_avg = tunit*(double)(tsc_avg) / tsc_hz;
+		time_max = tunit*(double)(tsc_max) / tsc_hz;
+		time_min = tunit*(double)(tsc_min) / tsc_hz;
 
-		tsc_end = rte_rdtsc_precise();
+		if (ctx->options->csv) {
+			if (!only_once)
+				printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
+						"Packet Size, cycles, time (us)");
 
-		if (ops_deqd != 0) {
-			for (i = 0; i < ops_deqd; i++) {
-				pres = (struct cperf_op_result *)
-						(ops_processed[i]->opaque_data);
-				pres->status = ops_processed[i]->status;
-				pres->tsc_end = tsc_end;
+			for (i = 0; i < ctx->options->total_ops; i++) {
+
+				printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
+					ctx->lcore_id, ctx->options->test_buffer_size,
+					test_burst_size, i + 1,
+					ctx->res[i].tsc_end - ctx->res[i].tsc_start,
+					tunit * (double) (ctx->res[i].tsc_end
+							- ctx->res[i].tsc_start)
+						/ tsc_hz);
 
-				rte_crypto_op_free(ops_processed[i]);
 			}
+			only_once = 1;
+		} else {
+			printf("\n# Device %d on lcore %u\n", ctx->dev_id,
+				ctx->lcore_id);
+			printf("\n# total operations: %u", ctx->options->total_ops);
+			printf("\n# Buffer size: %u", ctx->options->test_buffer_size);
+			printf("\n# Burst size: %u", test_burst_size);
+			printf("\n#     Number of bursts: %"PRIu64,
+					b_idx);
+
+			printf("\n#");
+			printf("\n#          \t       Total\t   Average\t   Maximum\t "
+					"  Minimum");
+			printf("\n#  enqueued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64, enqd_tot, enqd_avg, enqd_max, enqd_min);
+			printf("\n#  dequeued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64, deqd_tot, deqd_avg, deqd_max, deqd_min);
+			printf("\n#    cycles\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64, tsc_tot, tsc_avg, tsc_max, tsc_min);
+			printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f", time_tot,
+				time_avg, time_max, time_min);
+			printf("\n\n");
 
-			deqd_tot += ops_deqd;
-			deqd_max = max(ops_deqd, deqd_max);
-			deqd_min = min(ops_deqd, deqd_min);
 		}
-	}
 
-	for (i = 0; i < tsc_idx; i++) {
-		tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
-		tsc_max = max(tsc_val, tsc_max);
-		tsc_min = min(tsc_val, tsc_min);
-		tsc_tot += tsc_val;
+		/* Get next size from range or list */
+		if (ctx->options->inc_burst_size != 0)
+			test_burst_size += ctx->options->inc_burst_size;
+		else {
+			if (++burst_size_idx == ctx->options->burst_size_count)
+				break;
+			test_burst_size =
+				ctx->options->burst_size_list[burst_size_idx];
+		}
 	}
 
-	ctx->results.enqd_tot = enqd_tot;
-	ctx->results.enqd_max = enqd_max;
-	ctx->results.enqd_min = enqd_min;
-
-	ctx->results.deqd_tot = deqd_tot;
-	ctx->results.deqd_max = deqd_max;
-	ctx->results.deqd_min = deqd_min;
-
-	ctx->results.cycles_tot = tsc_tot;
-	ctx->results.cycles_max = tsc_max;
-	ctx->results.cycles_min = tsc_min;
-
-	ctx->results.burst_num = b_idx;
-	ctx->results.num = tsc_idx;
-
 	return 0;
 }
 
@@ -504,74 +544,10 @@ void
 cperf_latency_test_destructor(void *arg)
 {
 	struct cperf_latency_ctx *ctx = arg;
-	uint64_t i;
+
 	if (ctx == NULL)
 		return;
-	static int only_once;
-	uint64_t etot, eavg, emax, emin;
-	uint64_t dtot, davg, dmax, dmin;
-	uint64_t ctot, cavg, cmax, cmin;
-	double ttot, tavg, tmax, tmin;
-
-	const uint64_t tunit = 1000000; /* us */
-	const uint64_t tsc_hz = rte_get_tsc_hz();
-
-	etot = ctx->results.enqd_tot;
-	eavg = ctx->results.enqd_tot / ctx->results.burst_num;
-	emax = ctx->results.enqd_max;
-	emin = ctx->results.enqd_min;
-
-	dtot = ctx->results.deqd_tot;
-	davg = ctx->results.deqd_tot / ctx->results.burst_num;
-	dmax = ctx->results.deqd_max;
-	dmin = ctx->results.deqd_min;
-
-	ctot = ctx->results.cycles_tot;
-	cavg = ctx->results.cycles_tot / ctx->results.num;
-	cmax = ctx->results.cycles_max;
-	cmin = ctx->results.cycles_min;
-
-	ttot = tunit*(double)(ctot) / tsc_hz;
-	tavg = tunit*(double)(cavg) / tsc_hz;
-	tmax = tunit*(double)(cmax) / tsc_hz;
-	tmin = tunit*(double)(cmin) / tsc_hz;
-
-	if (ctx->options->csv) {
-		if (!only_once)
-			printf("\n# lcore, Pakt Seq #, Packet Size, cycles,"
-					" time (us)");
-
-		for (i = 0; i < ctx->options->total_ops; i++) {
-
-			printf("\n%u;%"PRIu64";%"PRIu64";%.3f",
-				ctx->lcore_id, i + 1,
-				ctx->res[i].tsc_end - ctx->res[i].tsc_start,
-				tunit * (double) (ctx->res[i].tsc_end
-						- ctx->res[i].tsc_start)
-					/ tsc_hz);
 
-		}
-		only_once = 1;
-	} else {
-		printf("\n# Device %d on lcore %u\n", ctx->dev_id,
-			ctx->lcore_id);
-		printf("\n# total operations: %u", ctx->options->total_ops);
-		printf("\n#     burst number: %"PRIu64,
-				ctx->results.burst_num);
-		printf("\n#");
-		printf("\n#          \t       Total\t   Average\t   Maximum\t "
-				"  Minimum");
-		printf("\n#  enqueued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64, etot, eavg, emax, emin);
-		printf("\n#  dequeued\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64, dtot, davg, dmax, dmin);
-		printf("\n#    cycles\t%12"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64, ctot, cavg, cmax, cmin);
-		printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f", ttot,
-			tavg, tmax, tmin);
-		printf("\n\n");
-
-	}
 	cperf_latency_test_free(ctx, ctx->options->pool_sz);
 
 }
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 7108075..183ff2a 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -38,18 +38,6 @@
 #include "cperf_test_throughput.h"
 #include "cperf_ops.h"
 
-struct cperf_throughput_results {
-	uint64_t ops_enqueued;
-	uint64_t ops_dequeued;
-
-	uint64_t ops_enqueued_failed;
-	uint64_t ops_dequeued_failed;
-
-	double ops_per_second;
-	double throughput_gbps;
-	double cycles_per_byte;
-};
-
 struct cperf_throughput_ctx {
 	uint8_t dev_id;
 	uint16_t qp_id;
@@ -68,8 +56,6 @@ struct cperf_throughput_ctx {
 
 	const struct cperf_options *options;
 	const struct cperf_test_vector *test_vector;
-	struct cperf_throughput_results results;
-
 };
 
 static void
@@ -117,8 +103,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -220,8 +206,8 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -229,9 +215,6 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 		goto err;
 
 	/* Generate mbufs_in with plaintext populated for test */
-	if (ctx->options->pool_sz % ctx->options->burst_sz)
-		goto err;
-
 	ctx->mbufs_in = rte_malloc(NULL,
 			(sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
 
@@ -252,7 +235,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -296,16 +279,14 @@ int
 cperf_throughput_test_runner(void *test_ctx)
 {
 	struct cperf_throughput_ctx *ctx = test_ctx;
+	uint32_t i;
+	uint16_t test_burst_size;
+	uint8_t burst_size_idx = 0;
 
-	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
-	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
-
-	uint64_t i, m_idx = 0, tsc_start, tsc_end, tsc_duration;
-
-	uint16_t ops_unused = 0;
+	static int only_once;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 
 	uint32_t lcore = rte_lcore_id();
 
@@ -324,131 +305,189 @@ cperf_throughput_test_runner(void *test_ctx)
 
 	ctx->lcore_id = lcore;
 
-	if (!ctx->options->csv)
-		printf("\n# Running throughput test on device: %u, lcore: %u\n",
-			ctx->dev_id, lcore);
-
 	/* Warm up the host CPU before starting the test */
 	for (i = 0; i < ctx->options->total_ops; i++)
 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-	tsc_start = rte_rdtsc_precise();
+	/* Get first size from range or list */
+	if (ctx->options->inc_burst_size != 0)
+		test_burst_size = ctx->options->min_burst_size;
+	else
+		test_burst_size = ctx->options->burst_size_list[0];
+
+	while (test_burst_size <= ctx->options->max_burst_size) {
+		uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+		uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+
+		uint64_t i, m_idx = 0, tsc_start, tsc_end, tsc_duration;
+
+		uint16_t ops_unused = 0;
 
-	while (ops_enqd_total < ctx->options->total_ops) {
+		tsc_start = rte_rdtsc_precise();
 
-		uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
-				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
-						ctx->options->total_ops -
-						ops_enqd_total;
+		while (ops_enqd_total < ctx->options->total_ops) {
 
-		uint16_t ops_needed = burst_size - ops_unused;
+			uint16_t burst_size = ((ops_enqd_total + test_burst_size
+					<= ctx->options->total_ops) ?
+							test_burst_size :
+							ctx->options->total_ops -
+							ops_enqd_total);
 
-		/* Allocate crypto ops from pool */
-		if (ops_needed != rte_crypto_op_bulk_alloc(
-				ctx->crypto_op_pool,
-				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-				ops, ops_needed))
-			return -1;
+			uint16_t ops_needed = burst_size - ops_unused;
 
-		/* Setup crypto op, attach mbuf etc */
-		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
-				&ctx->mbufs_out[m_idx],
-				ops_needed, ctx->sess, ctx->options,
-				ctx->test_vector);
+			/* Allocate crypto ops from pool */
+			if (ops_needed != rte_crypto_op_bulk_alloc(
+					ctx->crypto_op_pool,
+					RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+					ops, ops_needed))
+				return -1;
+
+			/* Setup crypto op, attach mbuf etc */
+			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+					&ctx->mbufs_out[m_idx],
+					ops_needed, ctx->sess, ctx->options,
+					ctx->test_vector);
 
 #ifdef CPERF_LINEARIZATION_ENABLE
-		if (linearize) {
-			/* PMD doesn't support scatter-gather and source buffer
-			 * is segmented.
-			 * We need to linearize it before enqueuing.
-			 */
-			for (i = 0; i < burst_size; i++)
-				rte_pktmbuf_linearize(ops[i]->sym->m_src);
-		}
+			if (linearize) {
+				/* PMD doesn't support scatter-gather and source buffer
+				 * is segmented.
+				 * We need to linearize it before enqueuing.
+				 */
+				for (i = 0; i < burst_size; i++)
+					rte_pktmbuf_linearize(ops[i]->sym->m_src);
+			}
 #endif /* CPERF_LINEARIZATION_ENABLE */
 
-		/* Enqueue burst of ops on crypto device */
-		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
-				ops, burst_size);
-		if (ops_enqd < burst_size)
-			ops_enqd_failed++;
-
-		/**
-		 * Calculate number of ops not enqueued (mainly for hw
-		 * accelerators whose ingress queue can fill up).
-		 */
-		ops_unused = burst_size - ops_enqd;
-		ops_enqd_total += ops_enqd;
-
-
-		/* Dequeue processed burst of ops from crypto device */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
-
-		if (likely(ops_deqd))  {
-			/* free crypto ops so they can be reused. We don't free
-			 * the mbufs here as we don't want to reuse them as
-			 * the crypto operation will change the data and cause
-			 * failures.
-			 */
-			for (i = 0; i < ops_deqd; i++)
-				rte_crypto_op_free(ops_processed[i]);
+			/* Enqueue burst of ops on crypto device */
+			ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+					ops, burst_size);
+			if (ops_enqd < burst_size)
+				ops_enqd_failed++;
 
-			ops_deqd_total += ops_deqd;
-		} else {
 			/**
-			 * Count dequeue polls which didn't return any
-			 * processed operations. This statistic is mainly
-			 * relevant to hw accelerators.
+			 * Calculate number of ops not enqueued (mainly for hw
+			 * accelerators whose ingress queue can fill up).
 			 */
-			ops_deqd_failed++;
-		}
+			ops_unused = burst_size - ops_enqd;
+			ops_enqd_total += ops_enqd;
+
+
+			/* Dequeue processed burst of ops from crypto device */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
+
+			if (likely(ops_deqd))  {
+				/* free crypto ops so they can be reused. We don't free
+				 * the mbufs here as we don't want to reuse them as
+				 * the crypto operation will change the data and cause
+				 * failures.
+				 */
+				for (i = 0; i < ops_deqd; i++)
+					rte_crypto_op_free(ops_processed[i]);
+
+				ops_deqd_total += ops_deqd;
+			} else {
+				/**
+				 * Count dequeue polls which didn't return any
+				 * processed operations. This statistic is mainly
+				 * relevant to hw accelerators.
+				 */
+				ops_deqd_failed++;
+			}
 
-		m_idx += ops_needed;
-		m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
-				0 : m_idx;
-	}
+			m_idx += ops_needed;
+			m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+					0 : m_idx;
+		}
 
-	/* Dequeue any operations still in the crypto device */
+		/* Dequeue any operations still in the crypto device */
 
-	while (ops_deqd_total < ctx->options->total_ops) {
-		/* Sending 0 length burst to flush sw crypto device */
-		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+		while (ops_deqd_total < ctx->options->total_ops) {
+			/* Sending 0 length burst to flush sw crypto device */
+			rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-		/* dequeue burst */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
-		if (ops_deqd == 0)
-			ops_deqd_failed++;
-		else {
-			for (i = 0; i < ops_deqd; i++)
-				rte_crypto_op_free(ops_processed[i]);
+			/* dequeue burst */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
+			if (ops_deqd == 0)
+				ops_deqd_failed++;
+			else {
+				for (i = 0; i < ops_deqd; i++)
+					rte_crypto_op_free(ops_processed[i]);
 
-			ops_deqd_total += ops_deqd;
+				ops_deqd_total += ops_deqd;
+			}
 		}
-	}
-
-	tsc_end = rte_rdtsc_precise();
-	tsc_duration = (tsc_end - tsc_start);
 
-	/* Calculate average operations processed per second */
-	ctx->results.ops_per_second = ((double)ctx->options->total_ops /
-			tsc_duration) * rte_get_tsc_hz();
-
-	/* Calculate average throughput (Gbps) in bits per second */
-	ctx->results.throughput_gbps = ((ctx->results.ops_per_second *
-			ctx->options->buffer_sz * 8) / 1000000000);
-
-	/* Calculate average cycles per byte */
-	ctx->results.cycles_per_byte =  ((double)tsc_duration /
-			ctx->options->total_ops) / ctx->options->buffer_sz;
+		tsc_end = rte_rdtsc_precise();
+		tsc_duration = (tsc_end - tsc_start);
+
+		/* Calculate average operations processed per second */
+		double ops_per_second = ((double)ctx->options->total_ops /
+				tsc_duration) * rte_get_tsc_hz();
+
+		/* Calculate average throughput (Gbps) in bits per second */
+		double throughput_gbps = ((ops_per_second *
+				ctx->options->test_buffer_size * 8) / 1000000000);
+
+		/* Calculate average cycles per packet */
+		double cycles_per_packet = ((double)tsc_duration /
+				ctx->options->total_ops) / ctx->options->test_buffer_size;
+
+		if (!ctx->options->csv) {
+			if (!only_once)
+				printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+					"lcore id", "Buf Size", "Burst Size",
+					"Enqueued", "Dequeued", "Failed Enq",
+					"Failed Deq", "MOps", "Gbps",
+					"Cycles/Buf");
+			only_once = 1;
+
+			printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+					"%12"PRIu64"%12.4f%12.4f%12.2f\n",
+					ctx->lcore_id,
+					ctx->options->test_buffer_size,
+					test_burst_size,
+					ops_enqd_total,
+					ops_deqd_total,
+					ops_enqd_failed,
+					ops_deqd_failed,
+					ops_per_second/1000000,
+					throughput_gbps,
+					cycles_per_packet);
+		} else {
+			if (!only_once)
+				printf("# lcore id, Buffer Size(B),"
+					"Burst Size,Enqueued,Dequeued,Failed Enq,"
+					"Failed Deq,Ops(Millions),Throughput(Gbps),"
+					"Cycles/Buf\n\n");
+			only_once = 1;
+
+			printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+					"%.f3;%.f3;%.f3\n",
+					ctx->lcore_id,
+					ctx->options->test_buffer_size,
+					test_burst_size,
+					ops_enqd_total,
+					ops_deqd_total,
+					ops_enqd_failed,
+					ops_deqd_failed,
+					ops_per_second/1000000,
+					throughput_gbps,
+					cycles_per_packet);
+		}
 
-	ctx->results.ops_enqueued = ops_enqd_total;
-	ctx->results.ops_dequeued = ops_deqd_total;
+		/* Get next size from range or list */
+		if (ctx->options->inc_burst_size != 0)
+			test_burst_size += ctx->options->inc_burst_size;
+		else {
+			if (++burst_size_idx == ctx->options->burst_size_count)
+				break;
+			test_burst_size = ctx->options->burst_size_list[burst_size_idx];
+		}
 
-	ctx->results.ops_enqueued_failed = ops_enqd_failed;
-	ctx->results.ops_dequeued_failed = ops_deqd_failed;
+	}
 
 	return 0;
 }
@@ -458,50 +497,9 @@ void
 cperf_throughput_test_destructor(void *arg)
 {
 	struct cperf_throughput_ctx *ctx = arg;
-	struct cperf_throughput_results *results = &ctx->results;
-	static int only_once;
 
 	if (ctx == NULL)
 		return;
 
-	if (!ctx->options->csv) {
-		printf("\n# Device %d on lcore %u\n",
-				ctx->dev_id, ctx->lcore_id);
-		printf("# Buffer Size(B)\t  Enqueued\t  Dequeued\tFailed Enq"
-				"\tFailed Deq\tOps(Millions)\tThroughput(Gbps)"
-				"\tCycles Per Byte\n");
-
-		printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64"\t%16.4f\t%16.4f\t%15.2f\n",
-				ctx->options->buffer_sz,
-				results->ops_enqueued,
-				results->ops_dequeued,
-				results->ops_enqueued_failed,
-				results->ops_dequeued_failed,
-				results->ops_per_second/1000000,
-				results->throughput_gbps,
-				results->cycles_per_byte);
-	} else {
-		if (!only_once)
-			printf("\n# CPU lcore id, Burst Size(B), "
-				"Buffer Size(B),Enqueued,Dequeued,Failed Enq,"
-				"Failed Deq,Ops(Millions),Throughput(Gbps),"
-				"Cycles Per Byte\n");
-		only_once = 1;
-
-		printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
-				"%.f3;%.f3;%.f3\n",
-				ctx->lcore_id,
-				ctx->options->burst_sz,
-				ctx->options->buffer_sz,
-				results->ops_enqueued,
-				results->ops_dequeued,
-				results->ops_enqueued_failed,
-				results->ops_dequeued_failed,
-				results->ops_per_second/1000000,
-				results->throughput_gbps,
-				results->cycles_per_byte);
-	}
-
 	cperf_throughput_test_free(ctx, ctx->options->pool_sz);
 }
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index d1c01d2..f384e3d 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -264,12 +264,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
 		if (tc_found)
 			vector->plaintext.length = data_length;
 		else {
-			if (opts->buffer_sz > data_length) {
+			if (opts->max_buffer_size > data_length) {
 				printf("Global plaintext shorter than "
 					"buffer_sz\n");
 				return -1;
 			}
-			vector->plaintext.length = opts->buffer_sz;
+			vector->plaintext.length = opts->max_buffer_size;
 		}
 
 	} else if (strstr(key_token, "cipher_key")) {
@@ -321,12 +321,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
 		if (tc_found)
 			vector->ciphertext.length = data_length;
 		else {
-			if (opts->buffer_sz > data_length) {
+			if (opts->max_buffer_size > data_length) {
 				printf("Global ciphertext shorter than "
 					"buffer_sz\n");
 				return -1;
 			}
-			vector->ciphertext.length = opts->buffer_sz;
+			vector->ciphertext.length = opts->max_buffer_size;
 		}
 
 	} else if (strstr(key_token, "aad")) {
@@ -498,10 +498,10 @@ cperf_test_vector_get_from_file(struct cperf_options *opts)
 
 	/* other values not included in the file */
 	test_vector->data.cipher_offset = 0;
-	test_vector->data.cipher_length = opts->buffer_sz;
+	test_vector->data.cipher_length = opts->max_buffer_size;
 
 	test_vector->data.auth_offset = 0;
-	test_vector->data.auth_length = opts->buffer_sz;
+	test_vector->data.auth_length = opts->max_buffer_size;
 
 	return test_vector;
 }
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 6307f25..d421432 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -399,7 +399,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 		return t_vec;
 
 	t_vec->plaintext.data = plaintext;
-	t_vec->plaintext.length = options->buffer_sz;
+	t_vec->plaintext.length = options->max_buffer_size;
 
 	if (options->op_type ==	CPERF_CIPHER_ONLY ||
 			options->op_type == CPERF_CIPHER_THEN_AUTH ||
@@ -422,11 +422,11 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 			}
 			memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
 		}
-		t_vec->ciphertext.length = options->buffer_sz;
+		t_vec->ciphertext.length = options->max_buffer_size;
 		t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
 		t_vec->iv.length = options->cipher_iv_sz;
 		t_vec->data.cipher_offset = 0;
-		t_vec->data.cipher_length = options->buffer_sz;
+		t_vec->data.cipher_length = options->max_buffer_size;
 	}
 
 	if (options->op_type ==	CPERF_AUTH_ONLY ||
@@ -493,7 +493,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 		t_vec->digest.length = options->auth_digest_sz;
 		memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
 		t_vec->data.auth_offset = 0;
-		t_vec->data.auth_length = options->buffer_sz;
+		t_vec->data.auth_length = options->max_buffer_size;
 	}
 
 	return t_vec;
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index be157e6..3245704 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -38,16 +38,6 @@
 #include "cperf_test_verify.h"
 #include "cperf_ops.h"
 
-struct cperf_verify_results {
-	uint64_t ops_enqueued;
-	uint64_t ops_dequeued;
-
-	uint64_t ops_enqueued_failed;
-	uint64_t ops_dequeued_failed;
-
-	uint64_t ops_failed;
-};
-
 struct cperf_verify_ctx {
 	uint8_t dev_id;
 	uint16_t qp_id;
@@ -66,8 +56,6 @@ struct cperf_verify_ctx {
 
 	const struct cperf_options *options;
 	const struct cperf_test_vector *test_vector;
-	struct cperf_verify_results results;
-
 };
 
 struct cperf_op_result {
@@ -119,8 +107,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -222,8 +210,8 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -231,9 +219,6 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 		goto err;
 
 	/* Generate mbufs_in with plaintext populated for test */
-	if (ctx->options->pool_sz % ctx->options->burst_sz)
-		goto err;
-
 	ctx->mbufs_in = rte_malloc(NULL,
 			(sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
 
@@ -254,7 +239,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -401,12 +386,15 @@ cperf_verify_test_runner(void *test_ctx)
 
 	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
 	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+	uint64_t ops_failed = 0;
+
+	static int only_once;
 
 	uint64_t i, m_idx = 0;
 	uint16_t ops_unused = 0;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 
 	uint32_t lcore = rte_lcore_id();
 
@@ -431,9 +419,9 @@ cperf_verify_test_runner(void *test_ctx)
 
 	while (ops_enqd_total < ctx->options->total_ops) {
 
-		uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
+		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
 				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
+						ctx->options->max_burst_size :
 						ctx->options->total_ops -
 						ops_enqd_total;
 
@@ -479,10 +467,10 @@ cperf_verify_test_runner(void *test_ctx)
 
 		/* Dequeue processed burst of ops from crypto device */
 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+				ops_processed, ctx->options->max_burst_size);
 
 		m_idx += ops_needed;
-		if (m_idx + ctx->options->burst_sz > ctx->options->pool_sz)
+		if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
 			m_idx = 0;
 
 		if (ops_deqd == 0) {
@@ -498,7 +486,7 @@ cperf_verify_test_runner(void *test_ctx)
 		for (i = 0; i < ops_deqd; i++) {
 			if (cperf_verify_op(ops_processed[i], ctx->options,
 						ctx->test_vector))
-				ctx->results.ops_failed++;
+				ops_failed++;
 			/* free crypto ops so they can be reused. We don't free
 			 * the mbufs here as we don't want to reuse them as
 			 * the crypto operation will change the data and cause
@@ -517,7 +505,7 @@ cperf_verify_test_runner(void *test_ctx)
 
 		/* dequeue burst */
 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+				ops_processed, ctx->options->max_burst_size);
 		if (ops_deqd == 0) {
 			ops_deqd_failed++;
 			continue;
@@ -526,7 +514,7 @@ cperf_verify_test_runner(void *test_ctx)
 		for (i = 0; i < ops_deqd; i++) {
 			if (cperf_verify_op(ops_processed[i], ctx->options,
 						ctx->test_vector))
-				ctx->results.ops_failed++;
+				ops_failed++;
 			/* free crypto ops so they can be reused. We don't free
 			 * the mbufs here as we don't want to reuse them as
 			 * the crypto operation will change the data and cause
@@ -537,59 +525,52 @@ cperf_verify_test_runner(void *test_ctx)
 		}
 	}
 
-	ctx->results.ops_enqueued = ops_enqd_total;
-	ctx->results.ops_dequeued = ops_deqd_total;
-
-	ctx->results.ops_enqueued_failed = ops_enqd_failed;
-	ctx->results.ops_dequeued_failed = ops_deqd_failed;
-
-	return 0;
-}
-
-
-
-void
-cperf_verify_test_destructor(void *arg)
-{
-	struct cperf_verify_ctx *ctx = arg;
-	struct cperf_verify_results *results = &ctx->results;
-	static int only_once;
-
-	if (ctx == NULL)
-		return;
-
 	if (!ctx->options->csv) {
 		printf("\n# Device %d on lcore %u\n",
 				ctx->dev_id, ctx->lcore_id);
-		printf("# Buffer Size(B)\t  Enqueued\t  Dequeued\tFailed Enq"
+		printf("# Buffer Size(B)\t  Burst Size\t Enqueued\t  Dequeued\tFailed Enq"
 				"\tFailed Deq\tEmpty Polls\n");
 
-		printf("\n%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
+		printf("\n%16u\t%16u\t%10"PRIu64"\t%10"PRIu64"\t%10"PRIu64"\t"
 				"%10"PRIu64"\t%10"PRIu64"\n",
-				ctx->options->buffer_sz,
-				results->ops_enqueued,
-				results->ops_dequeued,
-				results->ops_enqueued_failed,
-				results->ops_dequeued_failed,
-				results->ops_failed);
+				ctx->options->max_buffer_size,
+				ctx->options->max_burst_size,
+				ops_enqd_total,
+				ops_deqd_total,
+				ops_enqd_failed,
+				ops_deqd_failed,
+				ops_failed);
 	} else {
 		if (!only_once)
-			printf("\n# CPU lcore id, Burst Size(B), "
-				"Buffer Size(B),Enqueued,Dequeued,Failed Enq,"
+			printf("\n# CPU lcore id, Buffer Size(B), "
+				"Burst Size(B), Enqueued,Dequeued,Failed Enq,"
 				"Failed Deq,Empty Polls\n");
 		only_once = 1;
 
 		printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
 				"%"PRIu64"\n",
 				ctx->lcore_id,
-				ctx->options->burst_sz,
-				ctx->options->buffer_sz,
-				results->ops_enqueued,
-				results->ops_dequeued,
-				results->ops_enqueued_failed,
-				results->ops_dequeued_failed,
-				results->ops_failed);
+				ctx->options->max_buffer_size,
+				ctx->options->max_burst_size,
+				ops_enqd_total,
+				ops_deqd_total,
+				ops_enqd_failed,
+				ops_deqd_failed,
+				ops_failed);
 	}
 
+	return 0;
+}
+
+
+
+void
+cperf_verify_test_destructor(void *arg)
+{
+	struct cperf_verify_ctx *ctx = arg;
+
+	if (ctx == NULL)
+		return;
+
 	cperf_verify_test_free(ctx, ctx->options->pool_sz);
 }
diff --git a/app/test-crypto-perf/cperf_verify_parser.c b/app/test-crypto-perf/cperf_verify_parser.c
index 5640d84..422f92d 100644
--- a/app/test-crypto-perf/cperf_verify_parser.c
+++ b/app/test-crypto-perf/cperf_verify_parser.c
@@ -305,10 +305,10 @@ cperf_test_vector_get_from_file(struct cperf_options *opts)
 
 	/* other values not included in the file */
 	test_vector->data.cipher_offset = 0;
-	test_vector->data.cipher_length = opts->buffer_sz;
+	test_vector->data.cipher_length = opts->max_buffer_size;
 
 	test_vector->data.auth_offset = 0;
-	test_vector->data.auth_length = opts->buffer_sz;
+	test_vector->data.auth_length = opts->max_buffer_size;
 
 	return test_vector;
 }
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index da37972..94230d9 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -179,11 +179,11 @@ cperf_check_test_vector(struct cperf_options *opts,
 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length != opts->max_buffer_size)
 				return -1;
 			if (test_vec->ciphertext.data == NULL)
 				return -1;
-			if (test_vec->ciphertext.length != opts->buffer_sz)
+			if (test_vec->ciphertext.length != opts->max_buffer_size)
 				return -1;
 			if (test_vec->iv.data == NULL)
 				return -1;
@@ -198,7 +198,7 @@ cperf_check_test_vector(struct cperf_options *opts,
 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length != opts->max_buffer_size)
 				return -1;
 			if (test_vec->auth_key.data == NULL)
 				return -1;
@@ -215,16 +215,16 @@ cperf_check_test_vector(struct cperf_options *opts,
 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length != opts->max_buffer_size)
 				return -1;
 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length != opts->max_buffer_size)
 				return -1;
 			if (test_vec->ciphertext.data == NULL)
 				return -1;
-			if (test_vec->ciphertext.length != opts->buffer_sz)
+			if (test_vec->ciphertext.length != opts->max_buffer_size)
 				return -1;
 			if (test_vec->iv.data == NULL)
 				return -1;
@@ -248,7 +248,7 @@ cperf_check_test_vector(struct cperf_options *opts,
 	} else if (opts->op_type == CPERF_AEAD) {
 		if (test_vec->plaintext.data == NULL)
 			return -1;
-		if (test_vec->plaintext.length != opts->buffer_sz)
+		if (test_vec->plaintext.length != opts->max_buffer_size)
 			return -1;
 		if (test_vec->aad.data == NULL)
 			return -1;
@@ -275,6 +275,8 @@ main(int argc, char **argv)
 	uint8_t cdev_id, i;
 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
 
+	uint8_t buffer_size_idx = 0;
+
 	int ret;
 	uint32_t lcore_id;
 
@@ -370,21 +372,37 @@ main(int argc, char **argv)
 		i++;
 	}
 
-	i = 0;
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	/* Get first size from range or list */
+	if (opts.inc_buffer_size != 0)
+		opts.test_buffer_size = opts.min_buffer_size;
+	else
+		opts.test_buffer_size = opts.buffer_size_list[0];
 
-		if (i == nb_cryptodevs)
-			break;
+	while (opts.test_buffer_size <= opts.max_buffer_size) {
+		i = 0;
+		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 
-		cdev_id = enabled_cdevs[i];
+			if (i == nb_cryptodevs)
+				break;
 
-		rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+			cdev_id = enabled_cdevs[i];
+
+			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
 				ctx[cdev_id], lcore_id);
-		i++;
+			i++;
+		}
+		rte_eal_mp_wait_lcore();
+
+		/* Get next size from range or list */
+		if (opts.inc_buffer_size != 0)
+			opts.test_buffer_size += opts.inc_buffer_size;
+		else {
+			if (++buffer_size_idx == opts.buffer_size_count)
+				break;
+			opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
+		}
 	}
 
-	rte_eal_mp_wait_lcore();
-
 	i = 0;
 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 6328dab..1389db6 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -150,10 +150,25 @@ The following are the appication command-line options:
 
         Set the number of packets per burst.
 
+        This can be set as:
+          * Single value (i.e. ``--burst-sz 16``)
+          * Range of values, using the following structure ``min:inc:max``,
+            where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+            is the maximum size (i.e. ``--burst-sz 16:2:32``)
+          * List of values, up to 32 values, separated in commas (i.e. ``--burst-sz 16,24,32``)
+
 * ``--buffer-sz <n>``
 
         Set the size of single packet (plaintext or ciphertext in it).
 
+        This can be set as:
+          * Single value (i.e. ``--buffer-sz 16``)
+          * Range of values, using the following structure ``min:inc:max``,
+            where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+            is the maximum size (i.e. ``--buffer-sz 16:2:32``)
+          * List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
+
+
 * ``--segments-nb <n>``
 
         Set the number of segments per packet.
-- 
2.7.4

  parent reply	other threads:[~2017-03-03 16:11 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-03 16:13 [PATCH 0/3] Crypto performance app improvements Pablo de Lara
2017-03-03 16:13 ` [PATCH 1/3] app/crypto-perf: move verify as single test type Pablo de Lara
2017-03-03 16:13 ` [PATCH 2/3] app/crypto-perf: do not append digest if not used Pablo de Lara
2017-03-03 16:13 ` Pablo de Lara [this message]
2017-03-27 11:25 ` [PATCH v2 0/9] Crypto performance app improvements Pablo de Lara
2017-03-27 11:25   ` [PATCH v2 1/9] app/crypto-perf: remove cyclecount test type Pablo de Lara
2017-03-27 11:25   ` [PATCH v2 2/9] app/crypto-perf: remove unused file Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 3/9] app/crypto-perf: fix AES CBC 128 test vectors Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 4/9] app/crypto-perf: move verify as single test type Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 5/9] app/crypto-perf: do not append digest if not used Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 6/9] app/crypto-perf: display results in test runner Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 7/9] app/crypto-perf: add range/list of sizes Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 8/9] app/crypto-perf: add extra option checks Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 9/9] app/crypto-perf: reorg options structure Pablo de Lara
2017-03-27 12:29   ` [PATCH v2 0/9] Crypto performance app improvements De Lara Guarch, Pablo
2017-03-29 15:24     ` Sergio Gonzalez Monroy
2017-03-29 22:22       ` De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1488557592-46193-4-git-send-email-pablo.de.lara.guarch@intel.com \
    --to=pablo.de.lara.guarch@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.