From: Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
To: linux-rdma@vger.kernel.org, leonro@nvidia.com, jgg@nvidia.com,
zyjzyj2000@gmail.com
Cc: nvdimm@lists.linux.dev, linux-kernel@vger.kernel.org,
rpearsonhpe@gmail.com, yangx.jy@fujitsu.com,
lizhijian@fujitsu.com, y-goto@fujitsu.com,
Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
Subject: [RFC PATCH 3/7] RDMA/rxe: Cleanup code for responder Atomic operations
Date: Wed, 7 Sep 2022 11:43:01 +0900 [thread overview]
Message-ID: <861f3f8f8a07ce066a05cc5a2210bde76740f870.1662461897.git.matsuda-daisuke@fujitsu.com> (raw)
In-Reply-To: <cover.1662461897.git.matsuda-daisuke@fujitsu.com>
Currently, rxe_responder() directly calls the function to execute Atomic
operations. This need to be modified to insert some conditional branches
for the new RDMA Write operation and the ODP feature.
Signed-off-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
---
drivers/infiniband/sw/rxe/rxe_resp.c | 102 +++++++++++++++++----------
1 file changed, 64 insertions(+), 38 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index e97c55b292f0..cadc8fa64dd0 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -591,60 +591,86 @@ static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);
-static enum resp_states atomic_reply(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+enum resp_states rxe_process_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt, u64 *vaddr)
{
- u64 *vaddr;
enum resp_states ret;
- struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
u64 value;
- if (!res) {
- res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
- qp->resp.res = res;
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
}
- if (!res->replay) {
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
- }
+ spin_lock(&atomic_ops_lock);
+ res->atomic.orig_val = value = *vaddr;
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
- sizeof(u64));
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == atmeth_comp(pkt))
+ value = atmeth_swap_add(pkt);
+ } else {
+ value += atmeth_swap_add(pkt);
+ }
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
+ *vaddr = value;
+ spin_unlock(&atomic_ops_lock);
- spin_lock_bh(&atomic_ops_lock);
- res->atomic.orig_val = value = *vaddr;
+ qp->resp.msn++;
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (value == atmeth_comp(pkt))
- value = atmeth_swap_add(pkt);
- } else {
- value += atmeth_swap_add(pkt);
- }
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
- *vaddr = value;
- spin_unlock_bh(&atomic_ops_lock);
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
- qp->resp.msn++;
+ ret = RESPST_ACKNOWLEDGE;
+out:
+ return ret;
+}
- /* next expected psn, read handles this separately */
- qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
- qp->resp.ack_psn = qp->resp.psn;
+static enum resp_states rxe_atomic_ops(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_mr *mr)
+{
+ u64 *vaddr;
+ int ret;
- qp->resp.opcode = pkt->opcode;
- qp->resp.status = IB_WC_SUCCESS;
+ vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+ sizeof(u64));
+
+ if (pkt->mask & RXE_ATOMIC_MASK) {
+ ret = rxe_process_atomic(qp, pkt, vaddr);
+ } else {
+ /*ATOMIC WRITE operation will come here. */
+ ret = RESPST_ERR_UNSUPPORTED_OPCODE;
}
- ret = RESPST_ACKNOWLEDGE;
-out:
+ return ret;
+}
+
+static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_mr *mr = qp->resp.mr;
+ struct resp_res *res = qp->resp.res;
+ int ret;
+
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+ qp->resp.res = res;
+ }
+
+ if (!res->replay) {
+ if (mr->state != RXE_MR_STATE_VALID)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ ret = rxe_atomic_ops(qp, pkt, mr);
+ } else
+ ret = RESPST_ACKNOWLEDGE;
+
return ret;
}
@@ -1327,7 +1353,7 @@ int rxe_responder(void *arg)
state = read_reply(qp, pkt);
break;
case RESPST_ATOMIC_REPLY:
- state = atomic_reply(qp, pkt);
+ state = rxe_atomic_reply(qp, pkt);
break;
case RESPST_ACKNOWLEDGE:
state = acknowledge(qp, pkt);
--
2.31.1
next prev parent reply other threads:[~2022-09-07 2:45 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-07 2:42 [RFC PATCH 0/7] RDMA/rxe: On-Demand Paging on SoftRoCE Daisuke Matsuda
2022-09-07 2:42 ` [RFC PATCH 1/7] IB/mlx5: Change ib_umem_odp_map_dma_single_page() to retain umem_mutex Daisuke Matsuda
2022-09-07 2:43 ` [RFC PATCH 2/7] RDMA/rxe: Convert the triple tasklets to workqueues Daisuke Matsuda
2022-09-09 19:39 ` Bob Pearson
2022-09-12 8:27 ` matsuda-daisuke
2022-09-11 7:10 ` Yanjun Zhu
2022-09-11 15:08 ` Bart Van Assche
2022-09-12 7:58 ` matsuda-daisuke
2022-09-12 8:29 ` Yanjun Zhu
2022-09-12 19:52 ` Bob Pearson
2022-09-28 6:40 ` matsuda-daisuke
2022-09-12 8:25 ` Yanjun Zhu
2022-09-07 2:43 ` Daisuke Matsuda [this message]
2022-09-07 2:43 ` [RFC PATCH 4/7] RDMA/rxe: Add page invalidation support Daisuke Matsuda
2022-09-07 2:43 ` [RFC PATCH 5/7] RDMA/rxe: Allow registering MRs for On-Demand Paging Daisuke Matsuda
2022-09-08 16:57 ` Haris Iqbal
2022-09-09 0:55 ` matsuda-daisuke
2022-09-07 2:43 ` [RFC PATCH 6/7] RDMA/rxe: Add support for Send/Recv/Write/Read operations with ODP Daisuke Matsuda
2022-09-08 8:29 ` Leon Romanovsky
2022-09-09 2:45 ` matsuda-daisuke
2022-09-07 2:43 ` [RFC PATCH 7/7] RDMA/rxe: Add support for the traditional Atomic " Daisuke Matsuda
2022-09-08 8:40 ` [RFC PATCH 0/7] RDMA/rxe: On-Demand Paging on SoftRoCE Zhu Yanjun
2022-09-08 10:25 ` matsuda-daisuke
2022-09-09 3:07 ` Li Zhijian
2022-09-12 9:21 ` matsuda-daisuke
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=861f3f8f8a07ce066a05cc5a2210bde76740f870.1662461897.git.matsuda-daisuke@fujitsu.com \
--to=matsuda-daisuke@fujitsu.com \
--cc=jgg@nvidia.com \
--cc=leonro@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=lizhijian@fujitsu.com \
--cc=nvdimm@lists.linux.dev \
--cc=rpearsonhpe@gmail.com \
--cc=y-goto@fujitsu.com \
--cc=yangx.jy@fujitsu.com \
--cc=zyjzyj2000@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).