linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Maor Gottlieb <maorg@mellanox.com>
To: Jack Wang <xjtuwjp@gmail.com>, Leon Romanovsky <leon@kernel.org>
Cc: Doug Ledford <dledford@redhat.com>,
	Jason Gunthorpe <jgg@mellanox.com>,
	linux-rdma@vger.kernel.org
Subject: Re: [PATCH rdma-next 2/2] RDMA/core: Optimize XRC target lookup
Date: Sun, 21 Jun 2020 17:41:55 +0300	[thread overview]
Message-ID: <481b4111-55d8-6c49-2c1c-e15d39ba1129@mellanox.com> (raw)
In-Reply-To: <CAD+HZHUnW53ni=16=XL6hY1AHoNtsa88_V5P+XOHb55Fm83zZQ@mail.gmail.com>


On 6/21/2020 2:34 PM, Jack Wang wrote:
>
> Hi
>
>
> Leon Romanovsky <leon@kernel.org <mailto:leon@kernel.org>>于2020年6月21日 
> 周日12:42写道:
>
>     From: Maor Gottlieb <maorg@mellanox.com <mailto:maorg@mellanox.com>>
>
>     Replace the mutex with read write semaphore and use xarray instead
>     of linked list for XRC target QPs. This will give faster XRC target
>     lookup. In addition, when QP is closed, don't insert it back to the
>     xarray if the destroy command failed
>
> Just curious, why not use RCU,xarray is RCU friendly?
>
> Thanks

The lock protects against parallel close and open of the same XRC target 
QP and not the access to the xarray. In addition RCU can't be taken 
since there is a sleepable function that called under the lock, using 
SRCU locking shceme looks overkill for me in this case.
>
>
>
>     Signed-off-by: Maor Gottlieb <maorg@mellanox.com
>     <mailto:maorg@mellanox.com>>
>     Signed-off-by: Leon Romanovsky <leonro@mellanox.com
>     <mailto:leonro@mellanox.com>>
>     ---
>      drivers/infiniband/core/verbs.c | 50
>     +++++++++++++++------------------
>      include/rdma/ib_verbs.h         |  5 ++--
>      2 files changed, 24 insertions(+), 31 deletions(-)
>
>     diff --git a/drivers/infiniband/core/verbs.c
>     b/drivers/infiniband/core/verbs.c
>     index d66a0ad62077..ef980124f7e6 100644
>     --- a/drivers/infiniband/core/verbs.c
>     +++ b/drivers/infiniband/core/verbs.c
>     @@ -1090,13 +1090,6 @@ static void
>     __ib_shared_qp_event_handler(struct ib_event *event, void *context)
>     spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
>      }
>
>     -static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct
>     ib_qp *qp)
>     -{
>     -       mutex_lock(&xrcd->tgt_qp_mutex);
>     -       list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
>     -       mutex_unlock(&xrcd->tgt_qp_mutex);
>     -}
>     -
>      static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
>                                       void (*event_handler)(struct
>     ib_event *, void *),
>                                       void *qp_context)
>     @@ -1139,16 +1132,15 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
>             if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
>                     return ERR_PTR(-EINVAL);
>
>     -       qp = ERR_PTR(-EINVAL);
>     -       mutex_lock(&xrcd->tgt_qp_mutex);
>     -       list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
>     -               if (real_qp->qp_num == qp_open_attr->qp_num) {
>     -                       qp = __ib_open_qp(real_qp,
>     qp_open_attr->event_handler,
>     -  qp_open_attr->qp_context);
>     -                       break;
>     -               }
>     +       down_read(&xrcd->tgt_qps_rwsem);
>     +       real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
>     +       if (!real_qp) {
>     +               up_read(&xrcd->tgt_qps_rwsem);
>     +               return ERR_PTR(-EINVAL);
>             }
>     -       mutex_unlock(&xrcd->tgt_qp_mutex);
>     +       qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
>     +                         qp_open_attr->qp_context);
>     +       up_read(&xrcd->tgt_qps_rwsem);
>             return qp;
>      }
>      EXPORT_SYMBOL(ib_open_qp);
>     @@ -1157,6 +1149,7 @@ static struct ib_qp
>     *create_xrc_qp_user(struct ib_qp *qp,
>                                             struct ib_qp_init_attr
>     *qp_init_attr)
>      {
>             struct ib_qp *real_qp = qp;
>     +       int err;
>
>             qp->event_handler = __ib_shared_qp_event_handler;
>             qp->qp_context = qp;
>     @@ -1172,7 +1165,12 @@ static struct ib_qp
>     *create_xrc_qp_user(struct ib_qp *qp,
>             if (IS_ERR(qp))
>                     return qp;
>
>     -       __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
>     +       err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps,
>     real_qp->qp_num,
>     +                             real_qp, GFP_KERNEL));
>     +       if (err) {
>     +               ib_close_qp(qp);
>     +               return ERR_PTR(err);
>     +       }
>             return qp;
>      }
>
>     @@ -1888,21 +1886,18 @@ static int __ib_destroy_shared_qp(struct
>     ib_qp *qp)
>
>             real_qp = qp->real_qp;
>             xrcd = real_qp->xrcd;
>     -
>     -       mutex_lock(&xrcd->tgt_qp_mutex);
>     +       down_write(&xrcd->tgt_qps_rwsem);
>             ib_close_qp(qp);
>             if (atomic_read(&real_qp->usecnt) == 0)
>     -               list_del(&real_qp->xrcd_list);
>     +               xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
>             else
>                     real_qp = NULL;
>     -       mutex_unlock(&xrcd->tgt_qp_mutex);
>     +       up_write(&xrcd->tgt_qps_rwsem);
>
>             if (real_qp) {
>                     ret = ib_destroy_qp(real_qp);
>                     if (!ret)
>                             atomic_dec(&xrcd->usecnt);
>     -               else
>     -                       __ib_insert_xrcd_qp(xrcd, real_qp);
>             }
>
>             return 0;
>     @@ -2308,8 +2303,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct
>     ib_device *device,
>                     xrcd->device = device;
>                     xrcd->inode = inode;
>                     atomic_set(&xrcd->usecnt, 0);
>     -               mutex_init(&xrcd->tgt_qp_mutex);
>     -               INIT_LIST_HEAD(&xrcd->tgt_qp_list);
>     +               init_rwsem(&xrcd->tgt_qps_rwsem);
>     +               xa_init(&xrcd->tgt_qps);
>             }
>
>             return xrcd;
>     @@ -2318,19 +2313,18 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user);
>
>      int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata
>     *udata)
>      {
>     +       unsigned long index;
>             struct ib_qp *qp;
>             int ret;
>
>             if (atomic_read(&xrcd->usecnt))
>                     return -EBUSY;
>
>     -       while (!list_empty(&xrcd->tgt_qp_list)) {
>     -               qp = list_entry(xrcd->tgt_qp_list.next, struct
>     ib_qp, xrcd_list);
>     +       xa_for_each(&xrcd->tgt_qps, index, qp) {
>                     ret = ib_destroy_qp(qp);
>                     if (ret)
>                             return ret;
>             }
>     -       mutex_destroy(&xrcd->tgt_qp_mutex);
>
>             return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
>      }
>     diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
>     index f785a4f1e58b..9b973b3b6f4c 100644
>     --- a/include/rdma/ib_verbs.h
>     +++ b/include/rdma/ib_verbs.h
>     @@ -1568,9 +1568,8 @@ struct ib_xrcd {
>             struct ib_device       *device;
>             atomic_t                usecnt; /* count all exposed
>     resources */
>             struct inode           *inode;
>     -
>     -       struct mutex            tgt_qp_mutex;
>     -       struct list_head        tgt_qp_list;
>     +       struct rw_semaphore     tgt_qps_rwsem;
>     +       struct xarray           tgt_qps;
>      };
>
>      struct ib_ah {
>     -- 
>     2.26.2
>

  parent reply	other threads:[~2020-06-21 14:42 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-21 10:41 [PATCH rdma-next 0/2] Convert XRC to use xarray Leon Romanovsky
2020-06-21 10:41 ` [PATCH rdma-next 1/2] RDMA: Clean ib_alloc_xrcd() and reuse it to allocate XRC domain Leon Romanovsky
2020-06-21 10:41 ` [PATCH rdma-next 2/2] RDMA/core: Optimize XRC target lookup Leon Romanovsky
     [not found]   ` <CAD+HZHUnW53ni=16=XL6hY1AHoNtsa88_V5P+XOHb55Fm83zZQ@mail.gmail.com>
2020-06-21 14:41     ` Maor Gottlieb [this message]
2020-06-22 12:29   ` Jason Gunthorpe
2020-06-22 12:57     ` Maor Gottlieb
2020-06-22 13:05       ` Jason Gunthorpe
2020-06-22 13:39         ` Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=481b4111-55d8-6c49-2c1c-e15d39ba1129@mellanox.com \
    --to=maorg@mellanox.com \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leon@kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=xjtuwjp@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).