* [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res()
@ 2022-07-05 14:52 Xiao Yang
2022-07-05 14:52 ` [PATCH v2 2/2] RDMA/rxe: Rename rxe_atomic_reply to atomic_reply Xiao Yang
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Xiao Yang @ 2022-07-05 14:52 UTC (permalink / raw)
To: linux-rdma; +Cc: leon, jgg, rpearsonhpe, zyjzyj2000, Xiao Yang
It's redundant to prepare resources for Read and Atomic
requests by different functions. Replace them by a common
rxe_prepare_res() with different parameters. In addition,
the common rxe_prepare_res() can also be used by new Flush
and Atomic Write requests in the future.
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe_resp.c | 71 +++++++++++++---------------
1 file changed, 32 insertions(+), 39 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ccdfc1a6b659..5536582b8fe4 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -553,27 +553,48 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
return rc;
}
-/* Guarantee atomicity of atomic operations at the machine level. */
-static DEFINE_SPINLOCK(atomic_ops_lock);
-
-static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ int type)
{
struct resp_res *res;
+ u32 pkts;
res = &qp->resp.resources[qp->resp.res_head];
rxe_advance_resp_resource(qp);
free_rd_atomic_resource(qp, res);
- res->type = RXE_ATOMIC_MASK;
- res->first_psn = pkt->psn;
- res->last_psn = pkt->psn;
- res->cur_psn = pkt->psn;
+ res->type = type;
res->replay = 0;
+ switch (type) {
+ case RXE_READ_MASK:
+ res->read.va = qp->resp.va + qp->resp.offset;
+ res->read.va_org = qp->resp.va + qp->resp.offset;
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
+ res->first_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
+
+ res->state = rdatm_res_state_new;
+ break;
+ case RXE_ATOMIC_MASK:
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+ break;
+ }
+
return res;
}
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
@@ -584,7 +605,7 @@ static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
u64 value;
if (!res) {
- res = rxe_prepare_atomic_res(qp, pkt);
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
qp->resp.res = res;
}
@@ -680,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
return skb;
}
-static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res;
- u32 pkts;
-
- res = &qp->resp.resources[qp->resp.res_head];
- rxe_advance_resp_resource(qp);
- free_rd_atomic_resource(qp, res);
-
- res->type = RXE_READ_MASK;
- res->replay = 0;
- res->read.va = qp->resp.va + qp->resp.offset;
- res->read.va_org = qp->resp.va + qp->resp.offset;
- res->read.resid = qp->resp.resid;
- res->read.length = qp->resp.resid;
- res->read.rkey = qp->resp.rkey;
-
- pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
- res->first_psn = pkt->psn;
- res->cur_psn = pkt->psn;
- res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
-
- res->state = rdatm_res_state_new;
-
- return res;
-}
-
/**
* rxe_recheck_mr - revalidate MR from rkey and get a reference
* @qp: the qp
@@ -778,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
struct rxe_mr *mr;
if (!res) {
- res = rxe_prepare_read_res(qp, req_pkt);
+ res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
qp->resp.res = res;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 2/2] RDMA/rxe: Rename rxe_atomic_reply to atomic_reply
2022-07-05 14:52 [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Xiao Yang
@ 2022-07-05 14:52 ` Xiao Yang
2022-07-14 17:08 ` [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Bob Pearson
2022-07-18 11:37 ` Leon Romanovsky
2 siblings, 0 replies; 4+ messages in thread
From: Xiao Yang @ 2022-07-05 14:52 UTC (permalink / raw)
To: linux-rdma; +Cc: leon, jgg, rpearsonhpe, zyjzyj2000, Xiao Yang
It's better to use the unified naming format.
Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe_resp.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5536582b8fe4..265e46fe050f 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -595,7 +595,7 @@ static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
-static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
+static enum resp_states atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
u64 *vaddr;
@@ -1333,7 +1333,7 @@ int rxe_responder(void *arg)
state = read_reply(qp, pkt);
break;
case RESPST_ATOMIC_REPLY:
- state = rxe_atomic_reply(qp, pkt);
+ state = atomic_reply(qp, pkt);
break;
case RESPST_ACKNOWLEDGE:
state = acknowledge(qp, pkt);
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res()
2022-07-05 14:52 [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Xiao Yang
2022-07-05 14:52 ` [PATCH v2 2/2] RDMA/rxe: Rename rxe_atomic_reply to atomic_reply Xiao Yang
@ 2022-07-14 17:08 ` Bob Pearson
2022-07-18 11:37 ` Leon Romanovsky
2 siblings, 0 replies; 4+ messages in thread
From: Bob Pearson @ 2022-07-14 17:08 UTC (permalink / raw)
To: Xiao Yang, linux-rdma; +Cc: leon, jgg, zyjzyj2000
On 7/5/22 09:52, Xiao Yang wrote:
> It's redundant to prepare resources for Read and Atomic
> requests by different functions. Replace them by a common
> rxe_prepare_res() with different parameters. In addition,
> the common rxe_prepare_res() can also be used by new Flush
> and Atomic Write requests in the future.
>
> Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
> ---
> drivers/infiniband/sw/rxe/rxe_resp.c | 71 +++++++++++++---------------
> 1 file changed, 32 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
> index ccdfc1a6b659..5536582b8fe4 100644
> --- a/drivers/infiniband/sw/rxe/rxe_resp.c
> +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
> @@ -553,27 +553,48 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
> return rc;
> }
>
> -/* Guarantee atomicity of atomic operations at the machine level. */
> -static DEFINE_SPINLOCK(atomic_ops_lock);
> -
> -static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
> - struct rxe_pkt_info *pkt)
> +static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
> + struct rxe_pkt_info *pkt,
> + int type)
> {
> struct resp_res *res;
> + u32 pkts;
>
> res = &qp->resp.resources[qp->resp.res_head];
> rxe_advance_resp_resource(qp);
> free_rd_atomic_resource(qp, res);
>
> - res->type = RXE_ATOMIC_MASK;
> - res->first_psn = pkt->psn;
> - res->last_psn = pkt->psn;
> - res->cur_psn = pkt->psn;
> + res->type = type;
> res->replay = 0;
>
> + switch (type) {
> + case RXE_READ_MASK:
> + res->read.va = qp->resp.va + qp->resp.offset;
> + res->read.va_org = qp->resp.va + qp->resp.offset;
> + res->read.resid = qp->resp.resid;
> + res->read.length = qp->resp.resid;
> + res->read.rkey = qp->resp.rkey;
> +
> + pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
> + res->first_psn = pkt->psn;
> + res->cur_psn = pkt->psn;
> + res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
> +
> + res->state = rdatm_res_state_new;
> + break;
> + case RXE_ATOMIC_MASK:
> + res->first_psn = pkt->psn;
> + res->last_psn = pkt->psn;
> + res->cur_psn = pkt->psn;
> + break;
> + }
> +
> return res;
> }
>
> +/* Guarantee atomicity of atomic operations at the machine level. */
> +static DEFINE_SPINLOCK(atomic_ops_lock);
> +
> static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
> struct rxe_pkt_info *pkt)
> {
> @@ -584,7 +605,7 @@ static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
> u64 value;
>
> if (!res) {
> - res = rxe_prepare_atomic_res(qp, pkt);
> + res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
> qp->resp.res = res;
> }
>
> @@ -680,34 +701,6 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
> return skb;
> }
>
> -static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
> - struct rxe_pkt_info *pkt)
> -{
> - struct resp_res *res;
> - u32 pkts;
> -
> - res = &qp->resp.resources[qp->resp.res_head];
> - rxe_advance_resp_resource(qp);
> - free_rd_atomic_resource(qp, res);
> -
> - res->type = RXE_READ_MASK;
> - res->replay = 0;
> - res->read.va = qp->resp.va + qp->resp.offset;
> - res->read.va_org = qp->resp.va + qp->resp.offset;
> - res->read.resid = qp->resp.resid;
> - res->read.length = qp->resp.resid;
> - res->read.rkey = qp->resp.rkey;
> -
> - pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
> - res->first_psn = pkt->psn;
> - res->cur_psn = pkt->psn;
> - res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
> -
> - res->state = rdatm_res_state_new;
> -
> - return res;
> -}
> -
> /**
> * rxe_recheck_mr - revalidate MR from rkey and get a reference
> * @qp: the qp
> @@ -778,7 +771,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
> struct rxe_mr *mr;
>
> if (!res) {
> - res = rxe_prepare_read_res(qp, req_pkt);
> + res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
> qp->resp.res = res;
> }
>
Looks good.
Reviewed-by: Bob Pearson <rpearsonhpe@gmail.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res()
2022-07-05 14:52 [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Xiao Yang
2022-07-05 14:52 ` [PATCH v2 2/2] RDMA/rxe: Rename rxe_atomic_reply to atomic_reply Xiao Yang
2022-07-14 17:08 ` [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Bob Pearson
@ 2022-07-18 11:37 ` Leon Romanovsky
2 siblings, 0 replies; 4+ messages in thread
From: Leon Romanovsky @ 2022-07-18 11:37 UTC (permalink / raw)
To: Xiao Yang; +Cc: linux-rdma, jgg, rpearsonhpe, zyjzyj2000
On Tue, Jul 05, 2022 at 10:52:11PM +0800, Xiao Yang wrote:
> It's redundant to prepare resources for Read and Atomic
> requests by different functions. Replace them by a common
> rxe_prepare_res() with different parameters. In addition,
> the common rxe_prepare_res() can also be used by new Flush
> and Atomic Write requests in the future.
>
> Signed-off-by: Xiao Yang <yangx.jy@fujitsu.com>
> ---
> drivers/infiniband/sw/rxe/rxe_resp.c | 71 +++++++++++++---------------
> 1 file changed, 32 insertions(+), 39 deletions(-)
>
Thanks, applied.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-07-18 11:37 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-05 14:52 [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Xiao Yang
2022-07-05 14:52 ` [PATCH v2 2/2] RDMA/rxe: Rename rxe_atomic_reply to atomic_reply Xiao Yang
2022-07-14 17:08 ` [PATCH v2 1/2] RDMA/rxe: Add common rxe_prepare_res() Bob Pearson
2022-07-18 11:37 ` Leon Romanovsky
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.