All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] crypto/scheduler: change enqueue and dequeue functions
@ 2017-03-02 11:12 Fan Zhang
  2017-03-20 14:18 ` Declan Doherty
  0 siblings, 1 reply; 3+ messages in thread
From: Fan Zhang @ 2017-03-02 11:12 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, sergio.gonzalez.monroy, declan.doherty

This patch changes the enqueue and dequeue methods to cryptodev
scheduler PMD. Originally a 2-layer function call is carried out
upon enqueuing or dequeuing a burst of crypto ops. This patch
removes one layer to improve the performance.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/scheduler/scheduler_pmd.c         | 29 --------------
 drivers/crypto/scheduler/scheduler_pmd_private.h |  3 --
 drivers/crypto/scheduler/scheduler_roundrobin.c  | 49 ++++++++++++------------
 3 files changed, 24 insertions(+), 57 deletions(-)

diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index eeafbe6..f5038c9 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -61,32 +61,6 @@ const char *scheduler_valid_params[] = {
 	RTE_CRYPTODEV_VDEV_SOCKET_ID
 };
 
-static uint16_t
-scheduler_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct scheduler_qp_ctx *qp_ctx = queue_pair;
-	uint16_t processed_ops;
-
-	processed_ops = (*qp_ctx->schedule_enqueue)(qp_ctx, ops,
-			nb_ops);
-
-	return processed_ops;
-}
-
-static uint16_t
-scheduler_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct scheduler_qp_ctx *qp_ctx = queue_pair;
-	uint16_t processed_ops;
-
-	processed_ops = (*qp_ctx->schedule_dequeue)(qp_ctx, ops,
-			nb_ops);
-
-	return processed_ops;
-}
-
 static int
 attach_init_slaves(uint8_t scheduler_id,
 		const uint8_t *slaves, const uint8_t nb_slaves)
@@ -146,9 +120,6 @@ cryptodev_scheduler_create(const char *name,
 	dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
 	dev->dev_ops = rte_crypto_scheduler_pmd_ops;
 
-	dev->enqueue_burst = scheduler_enqueue_burst;
-	dev->dequeue_burst = scheduler_dequeue_burst;
-
 	sched_ctx = dev->data->dev_private;
 	sched_ctx->max_nb_queue_pairs =
 			init_params->def_p.max_nb_queue_pairs;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index ac4690e..e3ea21a 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -98,9 +98,6 @@ struct scheduler_ctx {
 struct scheduler_qp_ctx {
 	void *private_qp_ctx;
 
-	rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue;
-	rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue;
-
 	struct rte_reorder_buffer *reorder_buf;
 	uint32_t seqn;
 } __rte_cache_aligned;
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 9545aa9..4990c74 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -45,10 +45,10 @@ struct rr_scheduler_qp_ctx {
 };
 
 static uint16_t
-schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rr_scheduler_qp_ctx *rr_qp_ctx =
-			((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
 	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
 	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
 	uint16_t i, processed_ops;
@@ -112,12 +112,11 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
 }
 
 static uint16_t
-schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
-	struct rr_scheduler_qp_ctx *rr_qp_ctx =
-			gen_qp_ctx->private_qp_ctx;
+	struct scheduler_qp_ctx *qp_ctx = qp;
+	struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
 	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
 	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
 	uint16_t i, processed_ops;
@@ -148,13 +147,13 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
 		sessions[i + 3] = ops[i + 3]->sym->session;
 
 		ops[i]->sym->session = sess0->sessions[slave_idx];
-		ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
 		ops[i + 1]->sym->session = sess1->sessions[slave_idx];
-		ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
 		ops[i + 2]->sym->session = sess2->sessions[slave_idx];
-		ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
 		ops[i + 3]->sym->session = sess3->sessions[slave_idx];
-		ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
 
 		rte_prefetch0(ops[i + 4]->sym->session);
 		rte_prefetch0(ops[i + 4]->sym->m_src);
@@ -171,7 +170,7 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
 				ops[i]->sym->session->_private;
 		sessions[i] = ops[i]->sym->session;
 		ops[i]->sym->session = sess0->sessions[slave_idx];
-		ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
 	}
 
 	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
@@ -193,10 +192,10 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
 
 
 static uint16_t
-schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rr_scheduler_qp_ctx *rr_qp_ctx =
-			((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
 	struct scheduler_slave *slave;
 	uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
 	uint16_t nb_deq_ops;
@@ -230,13 +229,13 @@ schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
 }
 
 static uint16_t
-schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
-	struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
+	struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp;
+	struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx);
 	struct scheduler_slave *slave;
-	struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
+	struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
 	uint16_t nb_deq_ops, nb_drained_mbufs;
 	const uint16_t nb_op_ops = nb_ops;
@@ -354,6 +353,14 @@ scheduler_start(struct rte_cryptodev *dev)
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint16_t i;
 
+	if (sched_ctx->reordering_enabled) {
+		dev->enqueue_burst = &schedule_enqueue_ordering;
+		dev->dequeue_burst = &schedule_dequeue_ordering;
+	} else {
+		dev->enqueue_burst = &schedule_enqueue;
+		dev->dequeue_burst = &schedule_dequeue;
+	}
+
 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
 		struct rr_scheduler_qp_ctx *rr_qp_ctx =
@@ -372,14 +379,6 @@ scheduler_start(struct rte_cryptodev *dev)
 
 		rr_qp_ctx->last_enq_slave_idx = 0;
 		rr_qp_ctx->last_deq_slave_idx = 0;
-
-		if (sched_ctx->reordering_enabled) {
-			qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
-			qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
-		} else {
-			qp_ctx->schedule_enqueue = &schedule_enqueue;
-			qp_ctx->schedule_dequeue = &schedule_dequeue;
-		}
 	}
 
 	return 0;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] crypto/scheduler: change enqueue and dequeue functions
  2017-03-02 11:12 [PATCH] crypto/scheduler: change enqueue and dequeue functions Fan Zhang
@ 2017-03-20 14:18 ` Declan Doherty
  2017-03-21 17:17   ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 3+ messages in thread
From: Declan Doherty @ 2017-03-20 14:18 UTC (permalink / raw)
  To: Fan Zhang, dev; +Cc: pablo.de.lara.guarch, sergio.gonzalez.monroy

On 02/03/17 11:12, Fan Zhang wrote:
> This patch changes the enqueue and dequeue methods to cryptodev
> scheduler PMD. Originally a 2-layer function call is carried out
> upon enqueuing or dequeuing a burst of crypto ops. This patch
> removes one layer to improve the performance.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---
...
>

Acked-by: Declan Doherty <declan.doherty@intel.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] crypto/scheduler: change enqueue and dequeue functions
  2017-03-20 14:18 ` Declan Doherty
@ 2017-03-21 17:17   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 3+ messages in thread
From: De Lara Guarch, Pablo @ 2017-03-21 17:17 UTC (permalink / raw)
  To: Doherty, Declan, Zhang, Roy Fan, dev; +Cc: Gonzalez Monroy, Sergio



> -----Original Message-----
> From: Doherty, Declan
> Sent: Monday, March 20, 2017 2:18 PM
> To: Zhang, Roy Fan; dev@dpdk.org
> Cc: De Lara Guarch, Pablo; Gonzalez Monroy, Sergio
> Subject: Re: [PATCH] crypto/scheduler: change enqueue and dequeue
> functions
> 
> On 02/03/17 11:12, Fan Zhang wrote:
> > This patch changes the enqueue and dequeue methods to cryptodev
> > scheduler PMD. Originally a 2-layer function call is carried out
> > upon enqueuing or dequeuing a burst of crypto ops. This patch
> > removes one layer to improve the performance.
> >
> > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > ---
> ...
> >
> 
> Acked-by: Declan Doherty <declan.doherty@intel.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-03-21 17:17 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-02 11:12 [PATCH] crypto/scheduler: change enqueue and dequeue functions Fan Zhang
2017-03-20 14:18 ` Declan Doherty
2017-03-21 17:17   ` De Lara Guarch, Pablo

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.