All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lang Cheng <chenglang@huawei.com>
To: Weihang Li <liweihang@huawei.com>, <dledford@redhat.com>, <jgg@ziepe.ca>
Cc: <linux-rdma@vger.kernel.org>, <linuxarm@huawei.com>, <leon@kernel.org>
Subject: Re: [PATCH for-next 1/9] RDMA/hns: Refactor process about opcode in post_send()
Date: Wed, 9 Sep 2020 10:39:39 +0800	[thread overview]
Message-ID: <b29f111d-7c46-c7e2-66fa-1085f20e3220@huawei.com> (raw)
In-Reply-To: <1599485808-29940-2-git-send-email-liweihang@huawei.com>



On 2020/9/7 21:36, Weihang Li wrote:
> According to the IB specifications, the verbs should return an immediate
> error when the users set an unsupported opcode. Furthermore, refactor codes
> about opcode in process of post_send to make the difference between opcodes
> clearer.
> 
> Signed-off-by: Weihang Li <liweihang@huawei.com>
> ---
>   drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 135 ++++++++++++++++++-----------
>   1 file changed, 83 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> index 96e08b4..9a9639b 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
> @@ -292,6 +292,33 @@ static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
>   	return valid_num;
>   }
>   
> +static __le32 get_immtdata(const struct ib_send_wr *wr)
> +{
> +	switch (wr->opcode) {
> +	case IB_WR_SEND_WITH_IMM:
> +	case IB_WR_RDMA_WRITE_WITH_IMM:
> +		return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
> +	default:
> +		return 0;
> +	}
> +}
> +
> +static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
> +			 const struct ib_send_wr *wr)
> +{
> +	u32 ib_op = wr->opcode;
> +
> +	if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
> +		return -EINVAL;
> +
> +	ud_sq_wqe->immtdata = get_immtdata(wr);
> +
> +	roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
> +		       V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
> +
> +	return 0;
> +}
> +
>   static inline int set_ud_wqe(struct hns_roce_qp *qp,
>   			     const struct ib_send_wr *wr,
>   			     void *wqe, unsigned int *sge_idx,
> @@ -300,15 +327,24 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
>   	struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
>   	struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
>   	struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
> +	struct ib_device *ibdev = &hr_dev->ib_dev;
>   	unsigned int curr_idx = *sge_idx;
>   	int valid_num_sge;
>   	u32 msg_len = 0;
>   	bool loopback;
>   	u8 *smac;
> +	int ret;
>   
>   	valid_num_sge = calc_wr_sge_num(wr, &msg_len);
>   	memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
>   
> +	ret = set_ud_opcode(ud_sq_wqe, wr);
> +	if (unlikely(ret)) {
> +		ibdev_err(ibdev, "unsupported opcode, opcode = %d.\n",
> +			  wr->opcode);
> +		return ret;
> +	}
> +
>   	roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
>   		       V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
>   	roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
> @@ -336,16 +372,6 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
>   
>   	ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
>   
> -	switch (wr->opcode) {
> -	case IB_WR_SEND_WITH_IMM:
> -	case IB_WR_RDMA_WRITE_WITH_IMM:
> -		ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
> -		break;
> -	default:
> -		ud_sq_wqe->immtdata = 0;
> -		break;
> -	}
> -
>   	/* Set sig attr */
>   	roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
>   		     (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
> @@ -402,33 +428,68 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
>   	return 0;
>   }
>   
> +static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
> +			 const struct ib_send_wr *wr)
> +{
> +	u32 ib_op = wr->opcode;
> +
> +	rc_sq_wqe->immtdata = get_immtdata(wr);
> +
> +	switch (ib_op) {
> +	case IB_WR_RDMA_READ:
> +	case IB_WR_RDMA_WRITE:
> +	case IB_WR_RDMA_WRITE_WITH_IMM:
> +		rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
> +		rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
> +		break;
> +	case IB_WR_SEND:
> +	case IB_WR_SEND_WITH_IMM:
> +		break;
> +	case IB_WR_ATOMIC_CMP_AND_SWP:
> +	case IB_WR_ATOMIC_FETCH_AND_ADD:
> +		rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
> +		rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
> +		break;
> +	case IB_WR_REG_MR:
> +		set_frmr_seg(rc_sq_wqe, reg_wr(wr));
> +		break;
> +	case IB_WR_LOCAL_INV:
> +		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
> +		fallthrough;
> +	case IB_WR_SEND_WITH_INV:
> +		rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
> +		       V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
> +
> +	return 0;
> +}
>   static inline int set_rc_wqe(struct hns_roce_qp *qp,
>   			     const struct ib_send_wr *wr,
>   			     void *wqe, unsigned int *sge_idx,
>   			     unsigned int owner_bit)
>   {
> +	struct ib_device *ibdev = &to_hr_dev(qp->ibqp.device)->ib_dev;
>   	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
>   	unsigned int curr_idx = *sge_idx;
>   	unsigned int valid_num_sge;
>   	u32 msg_len = 0;
> -	int ret = 0;
> +	int ret;
>   
>   	valid_num_sge = calc_wr_sge_num(wr, &msg_len);
>   	memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
>   
>   	rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
>   
> -	switch (wr->opcode) {
> -	case IB_WR_SEND_WITH_IMM:
> -	case IB_WR_RDMA_WRITE_WITH_IMM:
> -		rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
> -		break;
> -	case IB_WR_SEND_WITH_INV:
> -		rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
> -		break;
> -	default:
> -		rc_sq_wqe->immtdata = 0;
> -		break;
> +	ret = set_rc_opcode(rc_sq_wqe, wr);
> +	if (unlikely(ret)) {
> +		ibdev_err(ibdev, "unsupported opcode, opcode = %d.\n",
> +			  wr->opcode);
> +		return ret;
>   	}
>   
>   	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
> @@ -440,36 +501,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
>   	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
>   		     (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
>   
> -	roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
> -		     owner_bit);

Seems we lost this field.

> -
> -	switch (wr->opcode) {
> -	case IB_WR_RDMA_READ:
> -	case IB_WR_RDMA_WRITE:
> -	case IB_WR_RDMA_WRITE_WITH_IMM:
> -		rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
> -		rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
> -		break;
> -	case IB_WR_LOCAL_INV:
> -		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
> -		rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
> -		break;
> -	case IB_WR_REG_MR:
> -		set_frmr_seg(rc_sq_wqe, reg_wr(wr));
> -		break;
> -	case IB_WR_ATOMIC_CMP_AND_SWP:
> -	case IB_WR_ATOMIC_FETCH_AND_ADD:
> -		rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
> -		rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
> -		break;
> -	default:
> -		break;
> -	}
> -
> -	roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
> -		       V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
> -		       to_hr_opcode(wr->opcode));
> -
>   	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
>   	    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
>   		set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
> 

  reply	other threads:[~2020-09-09  2:39 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-07 13:36 [PATCH for-next 0/9] RDMA/hns: Misc Updates Weihang Li
2020-09-07 13:36 ` [PATCH for-next 1/9] RDMA/hns: Refactor process about opcode in post_send() Weihang Li
2020-09-09  2:39   ` Lang Cheng [this message]
2020-09-09  3:41     ` liweihang
2020-09-07 13:36 ` [PATCH for-next 2/9] RDMA/hns: Add type check in get/set hw field Weihang Li
2020-09-07 13:36 ` [PATCH for-next 3/9] RDMA/hns: Add interception for resizing SRQs Weihang Li
2020-09-07 13:36 ` [PATCH for-next 4/9] RDMA/hns: Correct typo of hns_roce_create_cq() Weihang Li
2020-09-07 13:36 ` [PATCH for-next 5/9] RDMA/hns: Add check for the validity of sl configuration Weihang Li
2020-09-07 13:36 ` [PATCH for-next 6/9] RDMA/hns: Solve the overflow of the calc_pg_sz() Weihang Li
2020-09-07 13:36 ` [PATCH for-next 7/9] RDMA/hns: Fix the wrong value of rnr_retry when querying qp Weihang Li
2020-09-07 13:36 ` [PATCH for-next 8/9] RDMA/hns: Fix configuration of ack_req_freq in QPC Weihang Li
2020-09-07 13:36 ` [PATCH for-next 9/9] RDMA/hns: Fix missing sq_sig_type when querying QP Weihang Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b29f111d-7c46-c7e2-66fa-1085f20e3220@huawei.com \
    --to=chenglang@huawei.com \
    --cc=dledford@redhat.com \
    --cc=jgg@ziepe.ca \
    --cc=leon@kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=linuxarm@huawei.com \
    --cc=liweihang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.