All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: declan.doherty@intel.com
Cc: dev@dpdk.org, Pablo de Lara <pablo.de.lara.guarch@intel.com>
Subject: [PATCH v2 7/9] app/crypto-perf: add range/list of sizes
Date: Mon, 27 Mar 2017 12:26:04 +0100	[thread overview]
Message-ID: <1490613966-74180-8-git-send-email-pablo.de.lara.guarch@intel.com> (raw)
In-Reply-To: <1490613966-74180-1-git-send-email-pablo.de.lara.guarch@intel.com>

So far, the crypto performance application was only able to
test one buffer size and one burst size.

With this commit, multiple sizes can be passed, either as a range
of values or as a list of values.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 app/test-crypto-perf/cperf_ops.c                 |  20 +-
 app/test-crypto-perf/cperf_options.h             |  17 +-
 app/test-crypto-perf/cperf_options_parsing.c     | 234 ++++++++++++++--
 app/test-crypto-perf/cperf_test_latency.c        | 327 ++++++++++++-----------
 app/test-crypto-perf/cperf_test_throughput.c     | 308 +++++++++++----------
 app/test-crypto-perf/cperf_test_vector_parsing.c |  12 +-
 app/test-crypto-perf/cperf_test_vectors.c        |   8 +-
 app/test-crypto-perf/cperf_test_verify.c         |  44 +--
 app/test-crypto-perf/main.c                      |  60 +++--
 doc/guides/tools/cryptoperf.rst                  |  15 ++
 10 files changed, 662 insertions(+), 383 deletions(-)

diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 1795a37..0387354 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -53,7 +53,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* cipher parameters */
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 	}
 
@@ -78,7 +78,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
 		sym_op->m_dst = bufs_out[i];
 
 		/* auth parameters */
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -107,7 +107,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 	}
 
@@ -139,7 +139,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		} else {
 
-			uint32_t offset = options->buffer_sz;
+			uint32_t offset = options->test_buffer_size;
 			struct rte_mbuf *buf, *tbuf;
 
 			if (options->out_of_place) {
@@ -166,7 +166,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -195,7 +195,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 
 		/* authentication parameters */
@@ -206,7 +206,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		} else {
 
-			uint32_t offset = options->buffer_sz;
+			uint32_t offset = options->test_buffer_size;
 			struct rte_mbuf *buf, *tbuf;
 
 			if (options->out_of_place) {
@@ -232,7 +232,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 			sym_op->auth.aad.length = options->auth_aad_sz;
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -261,7 +261,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 		sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
 		sym_op->cipher.iv.length = test_vector->iv.length;
 
-		sym_op->cipher.data.length = options->buffer_sz;
+		sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset =
 				RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
 
@@ -302,7 +302,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 			sym_op->auth.digest.length = options->auth_digest_sz;
 		}
 
-		sym_op->auth.data.length = options->buffer_sz;
+		sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = options->auth_aad_sz;
 	}
 
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 573f6ea..24699ed 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -32,6 +32,7 @@
 #define CPERF_AUTH_AAD_SZ	("auth-aad-sz")
 #define CPERF_CSV		("csv-friendly")
 
+#define MAX_LIST 32
 
 enum cperf_perf_test_type {
 	CPERF_TEST_TYPE_THROUGHPUT,
@@ -57,8 +58,7 @@ struct cperf_options {
 
 	uint32_t pool_sz;
 	uint32_t total_ops;
-	uint32_t burst_sz;
-	uint32_t buffer_sz;
+	uint32_t test_buffer_size;
 	uint32_t segments_nb;
 
 	char device_type[RTE_CRYPTODEV_NAME_LEN];
@@ -84,6 +84,19 @@ struct cperf_options {
 	uint16_t auth_key_sz;
 	uint16_t auth_digest_sz;
 	uint16_t auth_aad_sz;
+
+	uint32_t buffer_size_list[MAX_LIST];
+	uint8_t buffer_size_count;
+	uint32_t max_buffer_size;
+	uint32_t min_buffer_size;
+	uint32_t inc_buffer_size;
+
+	uint32_t burst_size_list[MAX_LIST];
+	uint8_t burst_size_count;
+	uint32_t max_burst_size;
+	uint32_t min_burst_size;
+	uint32_t inc_burst_size;
+
 };
 
 void
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index 4c94bde..e458f6d 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -124,6 +124,132 @@ parse_uint16_t(uint16_t *value, const char *arg)
 }
 
 static int
+parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
+{
+	char *token;
+	uint32_t number;
+
+	char *copy_arg = strdup(arg);
+
+	if (copy_arg == NULL)
+		return -1;
+
+	token = strtok(copy_arg, ":");
+
+	/* Parse minimum value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_range;
+
+		*min = number;
+	} else
+		goto err_range;
+
+	token = strtok(NULL, ":");
+
+	/* Parse increment value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_range;
+
+		*inc = number;
+	} else
+		goto err_range;
+
+	token = strtok(NULL, ":");
+
+	/* Parse maximum value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0 ||
+				number < *min)
+			goto err_range;
+
+		*max = number;
+	} else
+		goto err_range;
+
+	if (strtok(NULL, ":") != NULL)
+		goto err_range;
+
+	free(copy_arg);
+	return 0;
+
+err_range:
+	free(copy_arg);
+	return -1;
+}
+
+static int
+parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
+{
+	char *token;
+	uint32_t number;
+	uint8_t count = 0;
+
+	char *copy_arg = strdup(arg);
+
+	if (copy_arg == NULL)
+		return -1;
+
+	token = strtok(copy_arg, ",");
+
+	/* Parse first value */
+	if (token != NULL) {
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_list;
+
+		list[count++] = number;
+		*min = number;
+		*max = number;
+	} else
+		goto err_list;
+
+	token = strtok(NULL, ",");
+
+	while (token != NULL) {
+		if (count == MAX_LIST) {
+			RTE_LOG(WARNING, USER1, "Using only the first %u sizes\n",
+					MAX_LIST);
+			break;
+		}
+
+		number = strtoul(token, NULL, 10);
+
+		if (errno == EINVAL || errno == ERANGE ||
+				number == 0)
+			goto err_list;
+
+		list[count++] = number;
+
+		if (number < *min)
+			*min = number;
+		if (number > *max)
+			*max = number;
+
+		token = strtok(NULL, ",");
+	}
+
+	free(copy_arg);
+	return count;
+
+err_list:
+	free(copy_arg);
+	return -1;
+}
+
+static int
 parse_total_ops(struct cperf_options *opts, const char *arg)
 {
 	int ret = parse_uint32_t(&opts->total_ops, arg);
@@ -153,32 +279,43 @@ parse_pool_sz(struct cperf_options *opts, const char *arg)
 static int
 parse_burst_sz(struct cperf_options *opts, const char *arg)
 {
-	int ret = parse_uint32_t(&opts->burst_sz, arg);
+	int ret;
+
+	/* Try parsing the argument as a range, if it fails, parse it as a list */
+	if (parse_range(arg, &opts->min_burst_size, &opts->max_burst_size,
+			&opts->inc_burst_size) < 0) {
+		ret = parse_list(arg, opts->burst_size_list,
+					&opts->min_burst_size,
+					&opts->max_burst_size);
+		if (ret < 0) {
+			RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+			return -1;
+		}
+		opts->burst_size_count = ret;
+	}
 
-	if (ret)
-		RTE_LOG(ERR, USER1, "failed to parse burst size");
-	return ret;
+	return 0;
 }
 
 static int
 parse_buffer_sz(struct cperf_options *opts, const char *arg)
 {
-	uint32_t i, valid_buf_sz[] = {
-			32, 64, 128, 256, 384, 512, 768, 1024, 1280, 1536, 1792,
-			2048
-	};
-
-	if (parse_uint32_t(&opts->buffer_sz, arg)) {
-		RTE_LOG(ERR, USER1, "failed to parse buffer size");
-		return -1;
+	int ret;
+
+	/* Try parsing the argument as a range, if it fails, parse it as a list */
+	if (parse_range(arg, &opts->min_buffer_size, &opts->max_buffer_size,
+			&opts->inc_buffer_size) < 0) {
+		ret = parse_list(arg, opts->buffer_size_list,
+					&opts->min_buffer_size,
+					&opts->max_buffer_size);
+		if (ret < 0) {
+			RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+			return -1;
+		}
+		opts->buffer_size_count = ret;
 	}
 
-	for (i = 0; i < RTE_DIM(valid_buf_sz); i++)
-		if (valid_buf_sz[i] == opts->buffer_sz)
-			return 0;
-
-	RTE_LOG(ERR, USER1, "invalid buffer size specified");
-	return -1;
+	return 0;
 }
 
 static int
@@ -474,8 +611,19 @@ cperf_options_default(struct cperf_options *opts)
 
 	opts->pool_sz = 8192;
 	opts->total_ops = 10000000;
-	opts->burst_sz = 32;
-	opts->buffer_sz = 64;
+
+	opts->buffer_size_list[0] = 64;
+	opts->buffer_size_count = 1;
+	opts->max_buffer_size = 64;
+	opts->min_buffer_size = 64;
+	opts->inc_buffer_size = 0;
+
+	opts->burst_size_list[0] = 32;
+	opts->burst_size_count = 1;
+	opts->max_burst_size = 32;
+	opts->min_burst_size = 32;
+	opts->inc_burst_size = 0;
+
 	opts->segments_nb = 1;
 
 	strncpy(opts->device_type, "crypto_aesni_mb",
@@ -569,7 +717,7 @@ cperf_options_parse(struct cperf_options *options, int argc, char **argv)
 int
 cperf_options_check(struct cperf_options *options)
 {
-	if (options->segments_nb > options->buffer_sz) {
+	if (options->segments_nb > options->min_buffer_size) {
 		RTE_LOG(ERR, USER1,
 				"Segments number greater than buffer size.\n");
 		return -EINVAL;
@@ -602,6 +750,22 @@ cperf_options_check(struct cperf_options *options)
 		return -EINVAL;
 	}
 
+	if (options->test == CPERF_TEST_TYPE_VERIFY &&
+			(options->inc_buffer_size != 0 ||
+			options->buffer_size_count > 1)) {
+		RTE_LOG(ERR, USER1, "Only one buffer size is allowed when "
+				"using the verify test.\n");
+		return -EINVAL;
+	}
+
+	if (options->test == CPERF_TEST_TYPE_VERIFY &&
+			(options->inc_burst_size != 0 ||
+			options->burst_size_count > 1)) {
+		RTE_LOG(ERR, USER1, "Only one burst size is allowed when "
+				"using the verify test.\n");
+		return -EINVAL;
+	}
+
 	if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
 		if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
 				options->auth_op !=
@@ -649,15 +813,37 @@ cperf_options_check(struct cperf_options *options)
 void
 cperf_options_dump(struct cperf_options *opts)
 {
+	uint8_t size_idx;
+
 	printf("# Crypto Performance Application Options:\n");
 	printf("#\n");
 	printf("# cperf test: %s\n", cperf_test_type_strs[opts->test]);
 	printf("#\n");
 	printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
 	printf("# total number of ops: %u\n", opts->total_ops);
-	printf("# burst size: %u\n", opts->burst_sz);
-	printf("# buffer size: %u\n", opts->buffer_sz);
-	printf("# segments per buffer: %u\n", opts->segments_nb);
+	if (opts->inc_buffer_size != 0) {
+		printf("# buffer size:\n");
+		printf("#\t min: %u\n", opts->min_buffer_size);
+		printf("#\t max: %u\n", opts->max_buffer_size);
+		printf("#\t inc: %u\n", opts->inc_buffer_size);
+	} else {
+		printf("# buffer sizes: ");
+		for (size_idx = 0; size_idx < opts->buffer_size_count; size_idx++)
+			printf("%u ", opts->buffer_size_list[size_idx]);
+		printf("\n");
+	}
+	if (opts->inc_burst_size != 0) {
+		printf("# burst size:\n");
+		printf("#\t min: %u\n", opts->min_burst_size);
+		printf("#\t max: %u\n", opts->max_burst_size);
+		printf("#\t inc: %u\n", opts->inc_burst_size);
+	} else {
+		printf("# burst sizes: ");
+		for (size_idx = 0; size_idx < opts->burst_size_count; size_idx++)
+			printf("%u ", opts->burst_size_list[size_idx]);
+		printf("\n");
+	}
+	printf("\n# segments per buffer: %u\n", opts->segments_nb);
 	printf("#\n");
 	printf("# cryptodev type: %s\n", opts->device_type);
 	printf("#\n");
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 20d7069..3275b4b 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -115,8 +115,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -218,8 +218,8 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -249,7 +249,7 @@ cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -300,14 +300,16 @@ cperf_latency_test_runner(void *arg)
 {
 	struct cperf_latency_ctx *ctx = arg;
 	struct cperf_op_result *pres;
+	uint16_t test_burst_size;
+	uint8_t burst_size_idx = 0;
 
 	static int only_once;
 
 	if (ctx == NULL)
 		return 0;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 	uint64_t i;
 
 	uint32_t lcore = rte_lcore_id();
@@ -331,188 +333,207 @@ cperf_latency_test_runner(void *arg)
 	for (i = 0; i < ctx->options->total_ops; i++)
 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-	uint64_t ops_enqd = 0, ops_deqd = 0;
-	uint64_t m_idx = 0, b_idx = 0;
+	/* Get first size from range or list */
+	if (ctx->options->inc_burst_size != 0)
+		test_burst_size = ctx->options->min_burst_size;
+	else
+		test_burst_size = ctx->options->burst_size_list[0];
 
-	uint64_t tsc_val, tsc_end, tsc_start;
-	uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
-	uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
-	uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+	while (test_burst_size <= ctx->options->max_burst_size) {
+		uint64_t ops_enqd = 0, ops_deqd = 0;
+		uint64_t m_idx = 0, b_idx = 0;
 
-	while (enqd_tot < ctx->options->total_ops) {
-		uint16_t burst_size = ((enqd_tot + ctx->options->burst_sz)
-				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
-						ctx->options->total_ops -
-						enqd_tot;
+		uint64_t tsc_val, tsc_end, tsc_start;
+		uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
+		uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
+		uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
 
-		/* Allocate crypto ops from pool */
-		if (burst_size != rte_crypto_op_bulk_alloc(
-				ctx->crypto_op_pool,
-				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-				ops, burst_size))
-			return -1;
+		while (enqd_tot < ctx->options->total_ops) {
 
-		/* Setup crypto op, attach mbuf etc */
-		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
-				&ctx->mbufs_out[m_idx],
-				burst_size, ctx->sess, ctx->options,
-				ctx->test_vector);
+			uint16_t burst_size = ((enqd_tot + test_burst_size)
+					<= ctx->options->total_ops) ?
+							test_burst_size :
+							ctx->options->total_ops -
+							enqd_tot;
 
-		tsc_start = rte_rdtsc_precise();
+			/* Allocate crypto ops from pool */
+			if (burst_size != rte_crypto_op_bulk_alloc(
+					ctx->crypto_op_pool,
+					RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+					ops, burst_size))
+				return -1;
+
+			/* Setup crypto op, attach mbuf etc */
+			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+					&ctx->mbufs_out[m_idx],
+					burst_size, ctx->sess, ctx->options,
+					ctx->test_vector);
+
+			tsc_start = rte_rdtsc_precise();
 
 #ifdef CPERF_LINEARIZATION_ENABLE
-		if (linearize) {
-			/* PMD doesn't support scatter-gather and source buffer
-			 * is segmented.
-			 * We need to linearize it before enqueuing.
-			 */
-			for (i = 0; i < burst_size; i++)
-				rte_pktmbuf_linearize(ops[i]->sym->m_src);
-		}
+			if (linearize) {
+				/* PMD doesn't support scatter-gather and source buffer
+				 * is segmented.
+				 * We need to linearize it before enqueuing.
+				 */
+				for (i = 0; i < burst_size; i++)
+					rte_pktmbuf_linearize(ops[i]->sym->m_src);
+			}
 #endif /* CPERF_LINEARIZATION_ENABLE */
 
-		/* Enqueue burst of ops on crypto device */
-		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
-				ops, burst_size);
+			/* Enqueue burst of ops on crypto device */
+			ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+					ops, burst_size);
 
-		/* Dequeue processed burst of ops from crypto device */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+			/* Dequeue processed burst of ops from crypto device */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
 
-		tsc_end = rte_rdtsc_precise();
+			tsc_end = rte_rdtsc_precise();
 
-		for (i = 0; i < ops_enqd; i++) {
-			ctx->res[tsc_idx].tsc_start = tsc_start;
-			ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
-			tsc_idx++;
-		}
+			/* Free memory for not enqueued operations */
+			for (i = ops_enqd; i < burst_size; i++)
+				rte_crypto_op_free(ops[i]);
 
-		/* Free memory for not enqueued operations */
-		for (i = ops_enqd; i < burst_size; i++)
-			rte_crypto_op_free(ops[i]);
-
-		if (likely(ops_deqd))  {
-			/*
-			 * free crypto ops so they can be reused. We don't free
-			 * the mbufs here as we don't want to reuse them as
-			 * the crypto operation will change the data and cause
-			 * failures.
-			 */
-			for (i = 0; i < ops_deqd; i++) {
-				pres = (struct cperf_op_result *)
-						(ops_processed[i]->opaque_data);
-				pres->status = ops_processed[i]->status;
-				pres->tsc_end = tsc_end;
-
-				rte_crypto_op_free(ops_processed[i]);
+			for (i = 0; i < burst_size; i++) {
+				ctx->res[tsc_idx].tsc_start = tsc_start;
+				ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+				tsc_idx++;
 			}
 
-			deqd_tot += ops_deqd;
-			deqd_max = max(ops_deqd, deqd_max);
-			deqd_min = min(ops_deqd, deqd_min);
-		}
+			if (likely(ops_deqd))  {
+				/*
+				 * free crypto ops so they can be reused. We don't free
+				 * the mbufs here as we don't want to reuse them as
+				 * the crypto operation will change the data and cause
+				 * failures.
+				 */
+				for (i = 0; i < ops_deqd; i++) {
+					pres = (struct cperf_op_result *)
+							(ops_processed[i]->opaque_data);
+					pres->status = ops_processed[i]->status;
+					pres->tsc_end = tsc_end;
+
+					rte_crypto_op_free(ops_processed[i]);
+				}
+
+				deqd_tot += ops_deqd;
+				deqd_max = max(ops_deqd, deqd_max);
+				deqd_min = min(ops_deqd, deqd_min);
+			}
 
-		enqd_tot += ops_enqd;
-		enqd_max = max(ops_enqd, enqd_max);
-		enqd_min = min(ops_enqd, enqd_min);
+			enqd_tot += ops_enqd;
+			enqd_max = max(ops_enqd, enqd_max);
+			enqd_min = min(ops_enqd, enqd_min);
 
-		m_idx += ops_enqd;
-		m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
-				0 : m_idx;
-		b_idx++;
-	}
+			m_idx += ops_enqd;
+			m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+					0 : m_idx;
+			b_idx++;
+		}
 
-	/* Dequeue any operations still in the crypto device */
-	while (deqd_tot < ctx->options->total_ops) {
-		/* Sending 0 length burst to flush sw crypto device */
-		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+		/* Dequeue any operations still in the crypto device */
+		while (deqd_tot < ctx->options->total_ops) {
+			/* Sending 0 length burst to flush sw crypto device */
+			rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+			/* dequeue burst */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
 
-		/* dequeue burst */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+			tsc_end = rte_rdtsc_precise();
 
-		tsc_end = rte_rdtsc_precise();
+			if (ops_deqd != 0) {
+				for (i = 0; i < ops_deqd; i++) {
+					pres = (struct cperf_op_result *)
+							(ops_processed[i]->opaque_data);
+					pres->status = ops_processed[i]->status;
+					pres->tsc_end = tsc_end;
 
-		if (ops_deqd != 0) {
-			for (i = 0; i < ops_deqd; i++) {
-				pres = (struct cperf_op_result *)
-						(ops_processed[i]->opaque_data);
-				pres->status = ops_processed[i]->status;
-				pres->tsc_end = tsc_end;
+					rte_crypto_op_free(ops_processed[i]);
+				}
 
-				rte_crypto_op_free(ops_processed[i]);
+				deqd_tot += ops_deqd;
+				deqd_max = max(ops_deqd, deqd_max);
+				deqd_min = min(ops_deqd, deqd_min);
 			}
+		}
 
-			deqd_tot += ops_deqd;
-			deqd_max = max(ops_deqd, deqd_max);
-			deqd_min = min(ops_deqd, deqd_min);
+		for (i = 0; i < tsc_idx; i++) {
+			tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
+			tsc_max = max(tsc_val, tsc_max);
+			tsc_min = min(tsc_val, tsc_min);
+			tsc_tot += tsc_val;
 		}
-	}
 
-	for (i = 0; i < tsc_idx; i++) {
-		tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
-		tsc_max = max(tsc_val, tsc_max);
-		tsc_min = min(tsc_val, tsc_min);
-		tsc_tot += tsc_val;
-	}
+		double time_tot, time_avg, time_max, time_min;
 
-	double time_tot, time_avg, time_max, time_min;
+		const uint64_t tunit = 1000000; /* us */
+		const uint64_t tsc_hz = rte_get_tsc_hz();
 
-	const uint64_t tunit = 1000000; /* us */
-	const uint64_t tsc_hz = rte_get_tsc_hz();
+		uint64_t enqd_avg = enqd_tot / b_idx;
+		uint64_t deqd_avg = deqd_tot / b_idx;
+		uint64_t tsc_avg = tsc_tot / tsc_idx;
 
-	uint64_t enqd_avg = enqd_tot / b_idx;
-	uint64_t deqd_avg = deqd_tot / b_idx;
-	uint64_t tsc_avg = tsc_tot / tsc_idx;
+		time_tot = tunit*(double)(tsc_tot) / tsc_hz;
+		time_avg = tunit*(double)(tsc_avg) / tsc_hz;
+		time_max = tunit*(double)(tsc_max) / tsc_hz;
+		time_min = tunit*(double)(tsc_min) / tsc_hz;
 
-	time_tot = tunit*(double)(tsc_tot) / tsc_hz;
-	time_avg = tunit*(double)(tsc_avg) / tsc_hz;
-	time_max = tunit*(double)(tsc_max) / tsc_hz;
-	time_min = tunit*(double)(tsc_min) / tsc_hz;
+		if (ctx->options->csv) {
+			if (!only_once)
+				printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
+						"Packet Size, cycles, time (us)");
 
-	if (ctx->options->csv) {
-		if (!only_once)
-			printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
-					"Packet Size, cycles, time (us)");
+			for (i = 0; i < ctx->options->total_ops; i++) {
 
-		for (i = 0; i < ctx->options->total_ops; i++) {
+				printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
+					ctx->lcore_id, ctx->options->test_buffer_size,
+					test_burst_size, i + 1,
+					ctx->res[i].tsc_end - ctx->res[i].tsc_start,
+					tunit * (double) (ctx->res[i].tsc_end
+							- ctx->res[i].tsc_start)
+						/ tsc_hz);
 
-			printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
-				ctx->lcore_id, ctx->options->buffer_sz,
-				ctx->options->burst_sz, i + 1,
-				ctx->res[i].tsc_end - ctx->res[i].tsc_start,
-				tunit * (double) (ctx->res[i].tsc_end
-						- ctx->res[i].tsc_start)
-					/ tsc_hz);
+			}
+			only_once = 1;
+		} else {
+			printf("\n# Device %d on lcore %u\n", ctx->dev_id,
+				ctx->lcore_id);
+			printf("\n# total operations: %u", ctx->options->total_ops);
+			printf("\n# Buffer size: %u", ctx->options->test_buffer_size);
+			printf("\n# Burst size: %u", test_burst_size);
+			printf("\n#     Number of bursts: %"PRIu64,
+					b_idx);
+
+			printf("\n#");
+			printf("\n#          \t       Total\t   Average\t   "
+					"Maximum\t   Minimum");
+			printf("\n#  enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64"\t%10"PRIu64, enqd_tot,
+					enqd_avg, enqd_max, enqd_min);
+			printf("\n#  dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64"\t%10"PRIu64, deqd_tot,
+					deqd_avg, deqd_max, deqd_min);
+			printf("\n#    cycles\t%12"PRIu64"\t%10"PRIu64"\t"
+					"%10"PRIu64"\t%10"PRIu64, tsc_tot,
+					tsc_avg, tsc_max, tsc_min);
+			printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
+					time_tot, time_avg, time_max, time_min);
+			printf("\n\n");
 
 		}
-		only_once = 1;
-	} else {
-		printf("\n# Device %d on lcore %u\n", ctx->dev_id,
-			ctx->lcore_id);
-		printf("\n# total operations: %u", ctx->options->total_ops);
-		printf("\n# Buffer size: %u", ctx->options->buffer_sz);
-		printf("\n# Burst size: %u", ctx->options->burst_sz);
-		printf("\n#     Number of bursts: %"PRIu64,
-				b_idx);
-
-		printf("\n#");
-		printf("\n#          \t       Total\t   Average\t   "
-				"Maximum\t   Minimum");
-		printf("\n#  enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64"\t%10"PRIu64, enqd_tot,
-				enqd_avg, enqd_max, enqd_min);
-		printf("\n#  dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64"\t%10"PRIu64, deqd_tot,
-				deqd_avg, deqd_max, deqd_min);
-		printf("\n#    cycles\t%12"PRIu64"\t%10"PRIu64"\t"
-				"%10"PRIu64"\t%10"PRIu64, tsc_tot,
-				tsc_avg, tsc_max, tsc_min);
-		printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
-				time_tot, time_avg, time_max, time_min);
-		printf("\n\n");
 
+		/* Get next size from range or list */
+		if (ctx->options->inc_burst_size != 0)
+			test_burst_size += ctx->options->inc_burst_size;
+		else {
+			if (++burst_size_idx == ctx->options->burst_size_count)
+				break;
+			test_burst_size =
+				ctx->options->burst_size_list[burst_size_idx];
+		}
 	}
 
 	return 0;
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 32c5bad..70ec4ff 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -103,8 +103,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -206,8 +206,8 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -235,7 +235,7 @@ cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -279,11 +279,13 @@ int
 cperf_throughput_test_runner(void *test_ctx)
 {
 	struct cperf_throughput_ctx *ctx = test_ctx;
+	uint16_t test_burst_size;
+	uint8_t burst_size_idx = 0;
 
 	static int only_once;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 	uint64_t i;
 
 	uint32_t lcore = rte_lcore_id();
@@ -307,164 +309,184 @@ cperf_throughput_test_runner(void *test_ctx)
 	for (i = 0; i < ctx->options->total_ops; i++)
 		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-	uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
-	uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
-	uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+	/* Get first size from range or list */
+	if (ctx->options->inc_burst_size != 0)
+		test_burst_size = ctx->options->min_burst_size;
+	else
+		test_burst_size = ctx->options->burst_size_list[0];
 
-	tsc_start = rte_rdtsc_precise();
-	while (ops_enqd_total < ctx->options->total_ops) {
+	while (test_burst_size <= ctx->options->max_burst_size) {
+		uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+		uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+
+		uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
 
 		uint16_t ops_unused = 0;
 
-		uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
-				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
-						ctx->options->total_ops -
-						ops_enqd_total;
+		tsc_start = rte_rdtsc_precise();
+
+		while (ops_enqd_total < ctx->options->total_ops) {
+
+			uint16_t burst_size = ((ops_enqd_total + test_burst_size)
+					<= ctx->options->total_ops) ?
+							test_burst_size :
+							ctx->options->total_ops -
+							ops_enqd_total;
 
-		uint16_t ops_needed = burst_size - ops_unused;
+			uint16_t ops_needed = burst_size - ops_unused;
 
-		/* Allocate crypto ops from pool */
-		if (ops_needed != rte_crypto_op_bulk_alloc(
-				ctx->crypto_op_pool,
-				RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-				ops, ops_needed))
-			return -1;
+			/* Allocate crypto ops from pool */
+			if (ops_needed != rte_crypto_op_bulk_alloc(
+					ctx->crypto_op_pool,
+					RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+					ops, ops_needed))
+				return -1;
 
-		/* Setup crypto op, attach mbuf etc */
-		(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
-				&ctx->mbufs_out[m_idx],
-				ops_needed, ctx->sess, ctx->options,
-				ctx->test_vector);
+			/* Setup crypto op, attach mbuf etc */
+			(ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+					&ctx->mbufs_out[m_idx],
+					ops_needed, ctx->sess, ctx->options,
+					ctx->test_vector);
 
 #ifdef CPERF_LINEARIZATION_ENABLE
-		if (linearize) {
-			/* PMD doesn't support scatter-gather and source buffer
-			 * is segmented.
-			 * We need to linearize it before enqueuing.
-			 */
-			for (i = 0; i < burst_size; i++)
-				rte_pktmbuf_linearize(ops[i]->sym->m_src);
-		}
+			if (linearize) {
+				/* PMD doesn't support scatter-gather and source buffer
+				 * is segmented.
+				 * We need to linearize it before enqueuing.
+				 */
+				for (i = 0; i < burst_size; i++)
+					rte_pktmbuf_linearize(ops[i]->sym->m_src);
+			}
 #endif /* CPERF_LINEARIZATION_ENABLE */
 
-		/* Enqueue burst of ops on crypto device */
-		ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
-				ops, burst_size);
-		if (ops_enqd < burst_size)
-			ops_enqd_failed++;
-
-		/**
-		 * Calculate number of ops not enqueued (mainly for hw
-		 * accelerators whose ingress queue can fill up).
-		 */
-		ops_unused = burst_size - ops_enqd;
-		ops_enqd_total += ops_enqd;
-
-
-		/* Dequeue processed burst of ops from crypto device */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
-
-		if (likely(ops_deqd))  {
-			/* free crypto ops so they can be reused. We don't free
-			 * the mbufs here as we don't want to reuse them as
-			 * the crypto operation will change the data and cause
-			 * failures.
-			 */
-			for (i = 0; i < ops_deqd; i++)
-				rte_crypto_op_free(ops_processed[i]);
+			/* Enqueue burst of ops on crypto device */
+			ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+					ops, burst_size);
+			if (ops_enqd < burst_size)
+				ops_enqd_failed++;
 
-			ops_deqd_total += ops_deqd;
-		} else {
 			/**
-			 * Count dequeue polls which didn't return any
-			 * processed operations. This statistic is mainly
-			 * relevant to hw accelerators.
+			 * Calculate number of ops not enqueued (mainly for hw
+			 * accelerators whose ingress queue can fill up).
 			 */
-			ops_deqd_failed++;
+			ops_unused = burst_size - ops_enqd;
+			ops_enqd_total += ops_enqd;
+
+
+			/* Dequeue processed burst of ops from crypto device */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
+
+			if (likely(ops_deqd))  {
+				/* free crypto ops so they can be reused. We don't free
+				 * the mbufs here as we don't want to reuse them as
+				 * the crypto operation will change the data and cause
+				 * failures.
+				 */
+				for (i = 0; i < ops_deqd; i++)
+					rte_crypto_op_free(ops_processed[i]);
+
+				ops_deqd_total += ops_deqd;
+			} else {
+				/**
+				 * Count dequeue polls which didn't return any
+				 * processed operations. This statistic is mainly
+				 * relevant to hw accelerators.
+				 */
+				ops_deqd_failed++;
+			}
+
+			m_idx += ops_needed;
+			m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+					0 : m_idx;
 		}
 
-		m_idx += ops_needed;
-		m_idx = m_idx + ctx->options->burst_sz > ctx->options->pool_sz ?
-				0 : m_idx;
-	}
+		/* Dequeue any operations still in the crypto device */
 
-	/* Dequeue any operations still in the crypto device */
+		while (ops_deqd_total < ctx->options->total_ops) {
+			/* Sending 0 length burst to flush sw crypto device */
+			rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
 
-	while (ops_deqd_total < ctx->options->total_ops) {
-		/* Sending 0 length burst to flush sw crypto device */
-		rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+			/* dequeue burst */
+			ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+					ops_processed, test_burst_size);
+			if (ops_deqd == 0)
+				ops_deqd_failed++;
+			else {
+				for (i = 0; i < ops_deqd; i++)
+					rte_crypto_op_free(ops_processed[i]);
 
-		/* dequeue burst */
-		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
-		if (ops_deqd == 0)
-			ops_deqd_failed++;
-		else {
-			for (i = 0; i < ops_deqd; i++)
-				rte_crypto_op_free(ops_processed[i]);
+				ops_deqd_total += ops_deqd;
+			}
+		}
 
-			ops_deqd_total += ops_deqd;
+		tsc_end = rte_rdtsc_precise();
+		tsc_duration = (tsc_end - tsc_start);
+
+		/* Calculate average operations processed per second */
+		double ops_per_second = ((double)ctx->options->total_ops /
+				tsc_duration) * rte_get_tsc_hz();
+
+		/* Calculate average throughput (Gbps) in bits per second */
+		double throughput_gbps = ((ops_per_second *
+				ctx->options->test_buffer_size * 8) / 1000000000);
+
+		/* Calculate average cycles per packet */
+		double cycles_per_packet = ((double)tsc_duration /
+				ctx->options->total_ops);
+
+		if (!ctx->options->csv) {
+			if (!only_once)
+				printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+					"lcore id", "Buf Size", "Burst Size",
+					"Enqueued", "Dequeued", "Failed Enq",
+					"Failed Deq", "MOps", "Gbps",
+					"Cycles/Buf");
+			only_once = 1;
+
+			printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+					"%12"PRIu64"%12.4f%12.4f%12.2f\n",
+					ctx->lcore_id,
+					ctx->options->test_buffer_size,
+					test_burst_size,
+					ops_enqd_total,
+					ops_deqd_total,
+					ops_enqd_failed,
+					ops_deqd_failed,
+					ops_per_second/1000000,
+					throughput_gbps,
+					cycles_per_packet);
+		} else {
+			if (!only_once)
+				printf("# lcore id, Buffer Size(B),"
+					"Burst Size,Enqueued,Dequeued,Failed Enq,"
+					"Failed Deq,Ops(Millions),Throughput(Gbps),"
+					"Cycles/Buf\n\n");
+			only_once = 1;
+
+			printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+					"%.f3;%.f3;%.f3\n",
+					ctx->lcore_id,
+					ctx->options->test_buffer_size,
+					test_burst_size,
+					ops_enqd_total,
+					ops_deqd_total,
+					ops_enqd_failed,
+					ops_deqd_failed,
+					ops_per_second/1000000,
+					throughput_gbps,
+					cycles_per_packet);
+		}
+
+		/* Get next size from range or list */
+		if (ctx->options->inc_burst_size != 0)
+			test_burst_size += ctx->options->inc_burst_size;
+		else {
+			if (++burst_size_idx == ctx->options->burst_size_count)
+				break;
+			test_burst_size = ctx->options->burst_size_list[burst_size_idx];
 		}
-	}
 
-	tsc_end = rte_rdtsc_precise();
-	tsc_duration = (tsc_end - tsc_start);
-
-	/* Calculate average operations processed per second */
-	double ops_per_second = ((double)ctx->options->total_ops /
-			tsc_duration) * rte_get_tsc_hz();
-
-	/* Calculate average throughput (Gbps) in bits per second */
-	double throughput_gbps = ((ops_per_second *
-			ctx->options->buffer_sz * 8) / 1000000000);
-
-	/* Calculate average cycles per packet */
-	double cycles_per_packet = ((double)tsc_duration /
-			ctx->options->total_ops);
-
-	if (!ctx->options->csv) {
-		if (!only_once)
-			printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
-				"lcore id", "Buf Size", "Burst Size",
-				"Enqueued", "Dequeued", "Failed Enq",
-				"Failed Deq", "MOps", "Gbps",
-				"Cycles/Buf");
-		only_once = 1;
-
-		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
-				"%12"PRIu64"%12.4f%12.4f%12.2f\n",
-				ctx->lcore_id,
-				ctx->options->buffer_sz,
-				ctx->options->burst_sz,
-				ops_enqd_total,
-				ops_deqd_total,
-				ops_enqd_failed,
-				ops_deqd_failed,
-				ops_per_second/1000000,
-				throughput_gbps,
-				cycles_per_packet);
-	} else {
-		if (!only_once)
-			printf("# lcore id, Buffer Size(B),"
-				"Burst Size,Enqueued,Dequeued,Failed Enq,"
-				"Failed Deq,Ops(Millions),Throughput(Gbps),"
-				"Cycles/Buf\n\n");
-		only_once = 1;
-
-		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
-				"%.f3;%.f3;%.f3\n",
-				ctx->lcore_id,
-				ctx->options->buffer_sz,
-				ctx->options->burst_sz,
-				ops_enqd_total,
-				ops_deqd_total,
-				ops_enqd_failed,
-				ops_deqd_failed,
-				ops_per_second/1000000,
-				throughput_gbps,
-				cycles_per_packet);
 	}
 
 	return 0;
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index d1c01d2..f384e3d 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -264,12 +264,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
 		if (tc_found)
 			vector->plaintext.length = data_length;
 		else {
-			if (opts->buffer_sz > data_length) {
+			if (opts->max_buffer_size > data_length) {
 				printf("Global plaintext shorter than "
 					"buffer_sz\n");
 				return -1;
 			}
-			vector->plaintext.length = opts->buffer_sz;
+			vector->plaintext.length = opts->max_buffer_size;
 		}
 
 	} else if (strstr(key_token, "cipher_key")) {
@@ -321,12 +321,12 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
 		if (tc_found)
 			vector->ciphertext.length = data_length;
 		else {
-			if (opts->buffer_sz > data_length) {
+			if (opts->max_buffer_size > data_length) {
 				printf("Global ciphertext shorter than "
 					"buffer_sz\n");
 				return -1;
 			}
-			vector->ciphertext.length = opts->buffer_sz;
+			vector->ciphertext.length = opts->max_buffer_size;
 		}
 
 	} else if (strstr(key_token, "aad")) {
@@ -498,10 +498,10 @@ cperf_test_vector_get_from_file(struct cperf_options *opts)
 
 	/* other values not included in the file */
 	test_vector->data.cipher_offset = 0;
-	test_vector->data.cipher_length = opts->buffer_sz;
+	test_vector->data.cipher_length = opts->max_buffer_size;
 
 	test_vector->data.auth_offset = 0;
-	test_vector->data.auth_length = opts->buffer_sz;
+	test_vector->data.auth_length = opts->max_buffer_size;
 
 	return test_vector;
 }
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index f7b3aa9..f87bb8e 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -399,7 +399,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 		return t_vec;
 
 	t_vec->plaintext.data = plaintext;
-	t_vec->plaintext.length = options->buffer_sz;
+	t_vec->plaintext.length = options->max_buffer_size;
 
 	if (options->op_type ==	CPERF_CIPHER_ONLY ||
 			options->op_type == CPERF_CIPHER_THEN_AUTH ||
@@ -422,11 +422,11 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 			}
 			memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
 		}
-		t_vec->ciphertext.length = options->buffer_sz;
+		t_vec->ciphertext.length = options->max_buffer_size;
 		t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
 		t_vec->iv.length = options->cipher_iv_sz;
 		t_vec->data.cipher_offset = 0;
-		t_vec->data.cipher_length = options->buffer_sz;
+		t_vec->data.cipher_length = options->max_buffer_size;
 	}
 
 	if (options->op_type ==	CPERF_AUTH_ONLY ||
@@ -493,7 +493,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 		t_vec->digest.length = options->auth_digest_sz;
 		memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
 		t_vec->data.auth_offset = 0;
-		t_vec->data.auth_length = options->buffer_sz;
+		t_vec->data.auth_length = options->max_buffer_size;
 	}
 
 	return t_vec;
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index b58b86c..454221e 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -107,8 +107,8 @@ cperf_mbuf_create(struct rte_mempool *mempool,
 		const struct cperf_test_vector *test_vector)
 {
 	struct rte_mbuf *mbuf;
-	uint32_t segment_sz = options->buffer_sz / segments_nb;
-	uint32_t last_sz = options->buffer_sz % segments_nb;
+	uint32_t segment_sz = options->max_buffer_size / segments_nb;
+	uint32_t last_sz = options->max_buffer_size % segments_nb;
 	uint8_t *mbuf_data;
 	uint8_t *test_data =
 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -210,8 +210,8 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 			options->pool_sz * options->segments_nb, 0, 0,
 			RTE_PKTMBUF_HEADROOM +
 			RTE_CACHE_LINE_ROUNDUP(
-				(options->buffer_sz / options->segments_nb) +
-				(options->buffer_sz % options->segments_nb) +
+				(options->max_buffer_size / options->segments_nb) +
+				(options->max_buffer_size % options->segments_nb) +
 					options->auth_digest_sz),
 			rte_socket_id());
 
@@ -239,7 +239,7 @@ cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
 				pool_name, options->pool_sz, 0, 0,
 				RTE_PKTMBUF_HEADROOM +
 				RTE_CACHE_LINE_ROUNDUP(
-					options->buffer_sz +
+					options->max_buffer_size +
 					options->auth_digest_sz),
 				rte_socket_id());
 
@@ -336,25 +336,25 @@ cperf_verify_op(struct rte_crypto_op *op,
 		cipher = 1;
 		cipher_offset = 0;
 		auth = 1;
-		auth_offset = vector->plaintext.length;
+		auth_offset = options->test_buffer_size;
 		break;
 	case CPERF_AUTH_ONLY:
 		cipher = 0;
 		cipher_offset = 0;
 		auth = 1;
-		auth_offset = vector->plaintext.length;
+		auth_offset = options->test_buffer_size;
 		break;
 	case CPERF_AUTH_THEN_CIPHER:
 		cipher = 1;
 		cipher_offset = 0;
 		auth = 1;
-		auth_offset = vector->plaintext.length;
+		auth_offset = options->test_buffer_size;
 		break;
 	case CPERF_AEAD:
 		cipher = 1;
 		cipher_offset = vector->aad.length;
 		auth = 1;
-		auth_offset = vector->aad.length + vector->plaintext.length;
+		auth_offset = vector->aad.length + options->test_buffer_size;
 		break;
 	}
 
@@ -362,11 +362,11 @@ cperf_verify_op(struct rte_crypto_op *op,
 		if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
 			res += memcmp(data + cipher_offset,
 					vector->ciphertext.data,
-					vector->ciphertext.length);
+					options->test_buffer_size);
 		else
 			res += memcmp(data + cipher_offset,
 					vector->plaintext.data,
-					vector->plaintext.length);
+					options->test_buffer_size);
 	}
 
 	if (auth == 1) {
@@ -393,8 +393,8 @@ cperf_verify_test_runner(void *test_ctx)
 	uint64_t i, m_idx = 0;
 	uint16_t ops_unused = 0;
 
-	struct rte_crypto_op *ops[ctx->options->burst_sz];
-	struct rte_crypto_op *ops_processed[ctx->options->burst_sz];
+	struct rte_crypto_op *ops[ctx->options->max_burst_size];
+	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
 
 	uint32_t lcore = rte_lcore_id();
 
@@ -419,9 +419,9 @@ cperf_verify_test_runner(void *test_ctx)
 
 	while (ops_enqd_total < ctx->options->total_ops) {
 
-		uint16_t burst_size = ((ops_enqd_total + ctx->options->burst_sz)
+		uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
 				<= ctx->options->total_ops) ?
-						ctx->options->burst_sz :
+						ctx->options->max_burst_size :
 						ctx->options->total_ops -
 						ops_enqd_total;
 
@@ -467,10 +467,10 @@ cperf_verify_test_runner(void *test_ctx)
 
 		/* Dequeue processed burst of ops from crypto device */
 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+				ops_processed, ctx->options->max_burst_size);
 
 		m_idx += ops_needed;
-		if (m_idx + ctx->options->burst_sz > ctx->options->pool_sz)
+		if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
 			m_idx = 0;
 
 		if (ops_deqd == 0) {
@@ -505,7 +505,7 @@ cperf_verify_test_runner(void *test_ctx)
 
 		/* dequeue burst */
 		ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
-				ops_processed, ctx->options->burst_sz);
+				ops_processed, ctx->options->max_burst_size);
 		if (ops_deqd == 0) {
 			ops_deqd_failed++;
 			continue;
@@ -536,8 +536,8 @@ cperf_verify_test_runner(void *test_ctx)
 		printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
 				"%12"PRIu64"%12"PRIu64"\n",
 				ctx->lcore_id,
-				ctx->options->buffer_sz,
-				ctx->options->burst_sz,
+				ctx->options->max_buffer_size,
+				ctx->options->max_burst_size,
 				ops_enqd_total,
 				ops_deqd_total,
 				ops_enqd_failed,
@@ -553,8 +553,8 @@ cperf_verify_test_runner(void *test_ctx)
 		printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
 				"%"PRIu64"\n",
 				ctx->lcore_id,
-				ctx->options->buffer_sz,
-				ctx->options->burst_sz,
+				ctx->options->max_buffer_size,
+				ctx->options->max_burst_size,
 				ops_enqd_total,
 				ops_deqd_total,
 				ops_enqd_failed,
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 3a80350..9ec2a4b 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -179,11 +179,11 @@ cperf_check_test_vector(struct cperf_options *opts,
 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length < opts->max_buffer_size)
 				return -1;
 			if (test_vec->ciphertext.data == NULL)
 				return -1;
-			if (test_vec->ciphertext.length != opts->buffer_sz)
+			if (test_vec->ciphertext.length < opts->max_buffer_size)
 				return -1;
 			if (test_vec->iv.data == NULL)
 				return -1;
@@ -198,7 +198,7 @@ cperf_check_test_vector(struct cperf_options *opts,
 		if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length < opts->max_buffer_size)
 				return -1;
 			if (test_vec->auth_key.data == NULL)
 				return -1;
@@ -206,7 +206,7 @@ cperf_check_test_vector(struct cperf_options *opts,
 				return -1;
 			if (test_vec->digest.data == NULL)
 				return -1;
-			if (test_vec->digest.length != opts->auth_digest_sz)
+			if (test_vec->digest.length < opts->auth_digest_sz)
 				return -1;
 		}
 
@@ -215,16 +215,16 @@ cperf_check_test_vector(struct cperf_options *opts,
 		if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length < opts->max_buffer_size)
 				return -1;
 		} else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
 			if (test_vec->plaintext.data == NULL)
 				return -1;
-			if (test_vec->plaintext.length != opts->buffer_sz)
+			if (test_vec->plaintext.length < opts->max_buffer_size)
 				return -1;
 			if (test_vec->ciphertext.data == NULL)
 				return -1;
-			if (test_vec->ciphertext.length != opts->buffer_sz)
+			if (test_vec->ciphertext.length < opts->max_buffer_size)
 				return -1;
 			if (test_vec->iv.data == NULL)
 				return -1;
@@ -242,13 +242,17 @@ cperf_check_test_vector(struct cperf_options *opts,
 				return -1;
 			if (test_vec->digest.data == NULL)
 				return -1;
-			if (test_vec->digest.length != opts->auth_digest_sz)
+			if (test_vec->digest.length < opts->auth_digest_sz)
 				return -1;
 		}
 	} else if (opts->op_type == CPERF_AEAD) {
 		if (test_vec->plaintext.data == NULL)
 			return -1;
-		if (test_vec->plaintext.length != opts->buffer_sz)
+		if (test_vec->plaintext.length < opts->max_buffer_size)
+			return -1;
+		if (test_vec->ciphertext.data == NULL)
+			return -1;
+		if (test_vec->ciphertext.length < opts->max_buffer_size)
 			return -1;
 		if (test_vec->aad.data == NULL)
 			return -1;
@@ -256,7 +260,7 @@ cperf_check_test_vector(struct cperf_options *opts,
 			return -1;
 		if (test_vec->digest.data == NULL)
 			return -1;
-		if (test_vec->digest.length != opts->auth_digest_sz)
+		if (test_vec->digest.length < opts->auth_digest_sz)
 			return -1;
 	}
 	return 0;
@@ -275,6 +279,8 @@ main(int argc, char **argv)
 	uint8_t cdev_id, i;
 	uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
 
+	uint8_t buffer_size_idx = 0;
+
 	int ret;
 	uint32_t lcore_id;
 
@@ -370,21 +376,37 @@ main(int argc, char **argv)
 		i++;
 	}
 
-	i = 0;
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	/* Get first size from range or list */
+	if (opts.inc_buffer_size != 0)
+		opts.test_buffer_size = opts.min_buffer_size;
+	else
+		opts.test_buffer_size = opts.buffer_size_list[0];
 
-		if (i == nb_cryptodevs)
-			break;
+	while (opts.test_buffer_size <= opts.max_buffer_size) {
+		i = 0;
+		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 
-		cdev_id = enabled_cdevs[i];
+			if (i == nb_cryptodevs)
+				break;
+
+			cdev_id = enabled_cdevs[i];
 
-		rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+			rte_eal_remote_launch(cperf_testmap[opts.test].runner,
 				ctx[cdev_id], lcore_id);
-		i++;
+			i++;
+		}
+		rte_eal_mp_wait_lcore();
+
+		/* Get next size from range or list */
+		if (opts.inc_buffer_size != 0)
+			opts.test_buffer_size += opts.inc_buffer_size;
+		else {
+			if (++buffer_size_idx == opts.buffer_size_count)
+				break;
+			opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
+		}
 	}
 
-	rte_eal_mp_wait_lcore();
-
 	i = 0;
 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index dbf5672..2d225d5 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -151,10 +151,25 @@ The following are the appication command-line options:
 
         Set the number of packets per burst.
 
+        This can be set as:
+          * Single value (i.e. ``--burst-sz 16``)
+          * Range of values, using the following structure ``min:inc:max``,
+            where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+            is the maximum size (i.e. ``--burst-sz 16:2:32``)
+          * List of values, up to 32 values, separated in commas (i.e. ``--burst-sz 16,24,32``)
+
 * ``--buffer-sz <n>``
 
         Set the size of single packet (plaintext or ciphertext in it).
 
+        This can be set as:
+          * Single value (i.e. ``--buffer-sz 16``)
+          * Range of values, using the following structure ``min:inc:max``,
+            where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+            is the maximum size (i.e. ``--buffer-sz 16:2:32``)
+          * List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
+
+
 * ``--segments-nb <n>``
 
         Set the number of segments per packet.
-- 
2.7.4

  parent reply	other threads:[~2017-03-27 11:26 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-03 16:13 [PATCH 0/3] Crypto performance app improvements Pablo de Lara
2017-03-03 16:13 ` [PATCH 1/3] app/crypto-perf: move verify as single test type Pablo de Lara
2017-03-03 16:13 ` [PATCH 2/3] app/crypto-perf: do not append digest if not used Pablo de Lara
2017-03-03 16:13 ` [PATCH 3/3] app/crypto-perf: add range/list of sizes Pablo de Lara
2017-03-27 11:25 ` [PATCH v2 0/9] Crypto performance app improvements Pablo de Lara
2017-03-27 11:25   ` [PATCH v2 1/9] app/crypto-perf: remove cyclecount test type Pablo de Lara
2017-03-27 11:25   ` [PATCH v2 2/9] app/crypto-perf: remove unused file Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 3/9] app/crypto-perf: fix AES CBC 128 test vectors Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 4/9] app/crypto-perf: move verify as single test type Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 5/9] app/crypto-perf: do not append digest if not used Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 6/9] app/crypto-perf: display results in test runner Pablo de Lara
2017-03-27 11:26   ` Pablo de Lara [this message]
2017-03-27 11:26   ` [PATCH v2 8/9] app/crypto-perf: add extra option checks Pablo de Lara
2017-03-27 11:26   ` [PATCH v2 9/9] app/crypto-perf: reorg options structure Pablo de Lara
2017-03-27 12:29   ` [PATCH v2 0/9] Crypto performance app improvements De Lara Guarch, Pablo
2017-03-29 15:24     ` Sergio Gonzalez Monroy
2017-03-29 22:22       ` De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1490613966-74180-8-git-send-email-pablo.de.lara.guarch@intel.com \
    --to=pablo.de.lara.guarch@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.