All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
@ 2021-02-06  0:24 Bob Pearson
  2021-02-08  3:08 ` Zhu Yanjun
  2021-02-09  0:41 ` Jason Gunthorpe
  0 siblings, 2 replies; 5+ messages in thread
From: Bob Pearson @ 2021-02-06  0:24 UTC (permalink / raw)
  To: jgg, zyjzyj2000, linux-rdma; +Cc: Bob Pearson

This patch changes the type of init_send_wqe in rxe_verbs.c to void
since it always returns 0. It also separates out the code that copies
inline data into the send wqe as copy_inline_data_to_wqe().

Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
 drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
 1 file changed, 19 insertions(+), 23 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 984909e03b35..dee5e0e919d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
 	}
 }
 
-static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+				    const struct ib_send_wr *ibwr)
+{
+	struct ib_sge *sge = ibwr->sg_list;
+	u8 *p = wqe->dma.inline_data;
+	int i;
+
+	for (i = 0; i < ibwr->num_sge; i++, sge++) {
+		memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+		p += sge->length;
+	}
+}
+
+static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 			 unsigned int mask, unsigned int length,
 			 struct rxe_send_wqe *wqe)
 {
 	int num_sge = ibwr->num_sge;
-	struct ib_sge *sge;
-	int i;
-	u8 *p;
 
 	init_send_wr(qp, &wqe->wr, ibwr);
 
@@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	if (unlikely(mask & WR_REG_MASK)) {
 		wqe->mask = mask;
 		wqe->state = wqe_state_posted;
-		return 0;
+		return;
 	}
 
 	if (qp_type(qp) == IB_QPT_UD ||
@@ -578,20 +588,11 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	    qp_type(qp) == IB_QPT_GSI)
 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
 
-	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
-		p = wqe->dma.inline_data;
-
-		sge = ibwr->sg_list;
-		for (i = 0; i < num_sge; i++, sge++) {
-			memcpy(p, (void *)(uintptr_t)sge->addr,
-					sge->length);
-
-			p += sge->length;
-		}
-	} else {
+	if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+		copy_inline_data_to_wqe(wqe, ibwr);
+	else
 		memcpy(wqe->dma.sge, ibwr->sg_list,
 		       num_sge * sizeof(struct ib_sge));
-	}
 
 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
@@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	wqe->dma.sge_offset	= 0;
 	wqe->state		= wqe_state_posted;
 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
-
-	return 0;
 }
 
 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	}
 
 	send_wqe = producer_addr(sq->queue);
-
-	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
-	if (unlikely(err))
-		goto err1;
+	init_send_wqe(qp, ibwr, mask, length, send_wqe);
 
 	advance_producer(sq->queue);
 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
  2021-02-06  0:24 [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe Bob Pearson
@ 2021-02-08  3:08 ` Zhu Yanjun
  2021-02-08 17:46   ` Pearson, Robert B
  2021-02-09  0:41 ` Jason Gunthorpe
  1 sibling, 1 reply; 5+ messages in thread
From: Zhu Yanjun @ 2021-02-08  3:08 UTC (permalink / raw)
  To: Bob Pearson; +Cc: Jason Gunthorpe, RDMA mailing list, Bob Pearson

On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> This patch changes the type of init_send_wqe in rxe_verbs.c to void
> since it always returns 0. It also separates out the code that copies
> inline data into the send wqe as copy_inline_data_to_wqe().
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 984909e03b35..dee5e0e919d2 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
>         }
>  }
>
> -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> +                                   const struct ib_send_wr *ibwr)
> +{
> +       struct ib_sge *sge = ibwr->sg_list;
> +       u8 *p = wqe->dma.inline_data;
> +       int i;
> +
> +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> +               p += sge->length;
> +       }
> +}
> +
> +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>                          unsigned int mask, unsigned int length,
>                          struct rxe_send_wqe *wqe)
>  {
>         int num_sge = ibwr->num_sge;
> -       struct ib_sge *sge;
> -       int i;
> -       u8 *p;
>
>         init_send_wr(qp, &wqe->wr, ibwr);
>
> @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         if (unlikely(mask & WR_REG_MASK)) {
>                 wqe->mask = mask;
>                 wqe->state = wqe_state_posted;
> -               return 0;
> +               return;
>         }
>
>         if (qp_type(qp) == IB_QPT_UD ||
> @@ -578,20 +588,11 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>             qp_type(qp) == IB_QPT_GSI)
>                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
>
> -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> -               p = wqe->dma.inline_data;
> -
> -               sge = ibwr->sg_list;
> -               for (i = 0; i < num_sge; i++, sge++) {
> -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> -                                       sge->length);
> -
> -                       p += sge->length;
> -               }
> -       } else {
> +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> +               copy_inline_data_to_wqe(wqe, ibwr);
> +       else
>                 memcpy(wqe->dma.sge, ibwr->sg_list,
>                        num_sge * sizeof(struct ib_sge));
> -       }

I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
But this commit can not be applied successfully.

Zhu Yanjun
>
>         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
>                 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
> @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         wqe->dma.sge_offset     = 0;
>         wqe->state              = wqe_state_posted;
>         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> -
> -       return 0;
>  }
>
>  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         }
>
>         send_wqe = producer_addr(sq->queue);
> -
> -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> -       if (unlikely(err))
> -               goto err1;
> +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
>
>         advance_producer(sq->queue);
>         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> --
> 2.27.0
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
  2021-02-08  3:08 ` Zhu Yanjun
@ 2021-02-08 17:46   ` Pearson, Robert B
  2021-02-09  1:02     ` Zhu Yanjun
  0 siblings, 1 reply; 5+ messages in thread
From: Pearson, Robert B @ 2021-02-08 17:46 UTC (permalink / raw)
  To: Zhu Yanjun, Bob Pearson; +Cc: Jason Gunthorpe, RDMA mailing list

Sorry for the confusion. There was a previous patch sent the same day that fixed some checkpatch warnings. It has to be installed first. There must be some way to indicate this type of dependency.

bob

-----Original Message-----
From: Zhu Yanjun <zyjzyj2000@gmail.com> 
Sent: Sunday, February 7, 2021 9:09 PM
To: Bob Pearson <rpearsonhpe@gmail.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>; RDMA mailing list <linux-rdma@vger.kernel.org>; Pearson, Robert B <robert.pearson2@hpe.com>
Subject: Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe

On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> This patch changes the type of init_send_wqe in rxe_verbs.c to void 
> since it always returns 0. It also separates out the code that copies 
> inline data into the send wqe as copy_inline_data_to_wqe().
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 
> ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c 
> b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 984909e03b35..dee5e0e919d2 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
>         }
>  }
>
> -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr 
> *ibwr,
> +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> +                                   const struct ib_send_wr *ibwr) {
> +       struct ib_sge *sge = ibwr->sg_list;
> +       u8 *p = wqe->dma.inline_data;
> +       int i;
> +
> +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> +               p += sge->length;
> +       }
> +}
> +
> +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr 
> +*ibwr,
>                          unsigned int mask, unsigned int length,
>                          struct rxe_send_wqe *wqe)  {
>         int num_sge = ibwr->num_sge;
> -       struct ib_sge *sge;
> -       int i;
> -       u8 *p;
>
>         init_send_wr(qp, &wqe->wr, ibwr);
>
> @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         if (unlikely(mask & WR_REG_MASK)) {
>                 wqe->mask = mask;
>                 wqe->state = wqe_state_posted;
> -               return 0;
> +               return;
>         }
>
>         if (qp_type(qp) == IB_QPT_UD || @@ -578,20 +588,11 @@ static 
> int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>             qp_type(qp) == IB_QPT_GSI)
>                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, 
> sizeof(wqe->av));
>
> -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> -               p = wqe->dma.inline_data;
> -
> -               sge = ibwr->sg_list;
> -               for (i = 0; i < num_sge; i++, sge++) {
> -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> -                                       sge->length);
> -
> -                       p += sge->length;
> -               }
> -       } else {
> +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> +               copy_inline_data_to_wqe(wqe, ibwr);
> +       else
>                 memcpy(wqe->dma.sge, ibwr->sg_list,
>                        num_sge * sizeof(struct ib_sge));
> -       }

I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
But this commit can not be applied successfully.

Zhu Yanjun
>
>         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
>                 mask & WR_READ_OR_WRITE_MASK ? 
> rdma_wr(ibwr)->remote_addr : 0; @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         wqe->dma.sge_offset     = 0;
>         wqe->state              = wqe_state_posted;
>         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> -
> -       return 0;
>  }
>
>  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr 
> *ibwr, @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         }
>
>         send_wqe = producer_addr(sq->queue);
> -
> -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> -       if (unlikely(err))
> -               goto err1;
> +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
>
>         advance_producer(sq->queue);
>         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> --
> 2.27.0
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
  2021-02-06  0:24 [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe Bob Pearson
  2021-02-08  3:08 ` Zhu Yanjun
@ 2021-02-09  0:41 ` Jason Gunthorpe
  1 sibling, 0 replies; 5+ messages in thread
From: Jason Gunthorpe @ 2021-02-09  0:41 UTC (permalink / raw)
  To: Bob Pearson; +Cc: zyjzyj2000, linux-rdma, Bob Pearson

On Fri, Feb 05, 2021 at 06:24:37PM -0600, Bob Pearson wrote:
> This patch changes the type of init_send_wqe in rxe_verbs.c to void
> since it always returns 0. It also separates out the code that copies
> inline data into the send wqe as copy_inline_data_to_wqe().
> 
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)

Applied to for-next, thanks

Jason

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
  2021-02-08 17:46   ` Pearson, Robert B
@ 2021-02-09  1:02     ` Zhu Yanjun
  0 siblings, 0 replies; 5+ messages in thread
From: Zhu Yanjun @ 2021-02-09  1:02 UTC (permalink / raw)
  To: Pearson, Robert B; +Cc: Bob Pearson, Jason Gunthorpe, RDMA mailing list

On Tue, Feb 9, 2021 at 1:46 AM Pearson, Robert B
<robert.pearson2@hpe.com> wrote:
>
> Sorry for the confusion. There was a previous patch sent the same day that fixed some checkpatch warnings. It has to be installed first. There must be some way to indicate this type of dependency.

It had better make this patch and the pervious patch as a patch
series. So this problem will not occur again.

Thanks.

Zhu Yanjun
>
> bob
>
> -----Original Message-----
> From: Zhu Yanjun <zyjzyj2000@gmail.com>
> Sent: Sunday, February 7, 2021 9:09 PM
> To: Bob Pearson <rpearsonhpe@gmail.com>
> Cc: Jason Gunthorpe <jgg@nvidia.com>; RDMA mailing list <linux-rdma@vger.kernel.org>; Pearson, Robert B <robert.pearson2@hpe.com>
> Subject: Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
>
> On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> >
> > This patch changes the type of init_send_wqe in rxe_verbs.c to void
> > since it always returns 0. It also separates out the code that copies
> > inline data into the send wqe as copy_inline_data_to_wqe().
> >
> > Signed-off-by: Bob Pearson <rpearson@hpe.com>
> > ---
> >  drivers/infiniband/sw/rxe/rxe_verbs.c | 42
> > ++++++++++++---------------
> >  1 file changed, 19 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c
> > b/drivers/infiniband/sw/rxe/rxe_verbs.c
> > index 984909e03b35..dee5e0e919d2 100644
> > --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> > +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> > @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
> >         }
> >  }
> >
> > -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr
> > *ibwr,
> > +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> > +                                   const struct ib_send_wr *ibwr) {
> > +       struct ib_sge *sge = ibwr->sg_list;
> > +       u8 *p = wqe->dma.inline_data;
> > +       int i;
> > +
> > +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> > +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> > +               p += sge->length;
> > +       }
> > +}
> > +
> > +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr
> > +*ibwr,
> >                          unsigned int mask, unsigned int length,
> >                          struct rxe_send_wqe *wqe)  {
> >         int num_sge = ibwr->num_sge;
> > -       struct ib_sge *sge;
> > -       int i;
> > -       u8 *p;
> >
> >         init_send_wr(qp, &wqe->wr, ibwr);
> >
> > @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         if (unlikely(mask & WR_REG_MASK)) {
> >                 wqe->mask = mask;
> >                 wqe->state = wqe_state_posted;
> > -               return 0;
> > +               return;
> >         }
> >
> >         if (qp_type(qp) == IB_QPT_UD || @@ -578,20 +588,11 @@ static
> > int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >             qp_type(qp) == IB_QPT_GSI)
> >                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av,
> > sizeof(wqe->av));
> >
> > -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> > -               p = wqe->dma.inline_data;
> > -
> > -               sge = ibwr->sg_list;
> > -               for (i = 0; i < num_sge; i++, sge++) {
> > -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> > -                                       sge->length);
> > -
> > -                       p += sge->length;
> > -               }
> > -       } else {
> > +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> > +               copy_inline_data_to_wqe(wqe, ibwr);
> > +       else
> >                 memcpy(wqe->dma.sge, ibwr->sg_list,
> >                        num_sge * sizeof(struct ib_sge));
> > -       }
>
> I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
> But this commit can not be applied successfully.
>
> Zhu Yanjun
> >
> >         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
> >                 mask & WR_READ_OR_WRITE_MASK ?
> > rdma_wr(ibwr)->remote_addr : 0; @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         wqe->dma.sge_offset     = 0;
> >         wqe->state              = wqe_state_posted;
> >         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> > -
> > -       return 0;
> >  }
> >
> >  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr
> > *ibwr, @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         }
> >
> >         send_wqe = producer_addr(sq->queue);
> > -
> > -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> > -       if (unlikely(err))
> > -               goto err1;
> > +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
> >
> >         advance_producer(sq->queue);
> >         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> > --
> > 2.27.0
> >

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-02-09  1:04 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-06  0:24 [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe Bob Pearson
2021-02-08  3:08 ` Zhu Yanjun
2021-02-08 17:46   ` Pearson, Robert B
2021-02-09  1:02     ` Zhu Yanjun
2021-02-09  0:41 ` Jason Gunthorpe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.