linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* linux-next: manual merge of the tip tree with the rdma tree
@ 2018-08-06  4:53 Stephen Rothwell
  2018-08-06 19:45 ` Jason Gunthorpe
  2018-08-15  1:51 ` Stephen Rothwell
  0 siblings, 2 replies; 4+ messages in thread
From: Stephen Rothwell @ 2018-08-06  4:53 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Doug Ledford, Jason Gunthorpe
  Cc: Linux-Next Mailing List, Linux Kernel Mailing List, Mark Rutland

[-- Attachment #1: Type: text/plain, Size: 7468 bytes --]

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  drivers/infiniband/core/rdma_core.c

between commit:

  9867f5c6695f ("IB/uverbs: Convert 'bool exclusive' into an enum")

from the rdma tree and commit:

  bfc18e389c7a ("atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()")

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/infiniband/core/rdma_core.c
index 4235b9ddc2ad,475910ffbcb6..000000000000
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@@ -122,206 -120,19 +122,206 @@@ static int uverbs_try_lock_object(struc
  	 * concurrently, setting the counter to zero is enough for releasing
  	 * this lock.
  	 */
 -	if (!exclusive)
 +	switch (mode) {
 +	case UVERBS_LOOKUP_READ:
- 		return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
+ 		return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
  			-EBUSY : 0;
 +	case UVERBS_LOOKUP_WRITE:
 +		/* lock is exclusive */
 +		return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
 +	case UVERBS_LOOKUP_DESTROY:
 +		return 0;
 +	}
 +	return 0;
 +}
 +
 +static void assert_uverbs_usecnt(struct ib_uobject *uobj,
 +				 enum rdma_lookup_mode mode)
 +{
 +#ifdef CONFIG_LOCKDEP
 +	switch (mode) {
 +	case UVERBS_LOOKUP_READ:
 +		WARN_ON(atomic_read(&uobj->usecnt) <= 0);
 +		break;
 +	case UVERBS_LOOKUP_WRITE:
 +		WARN_ON(atomic_read(&uobj->usecnt) != -1);
 +		break;
 +	case UVERBS_LOOKUP_DESTROY:
 +		break;
 +	}
 +#endif
 +}
 +
 +/*
 + * This must be called with the hw_destroy_rwsem locked for read or write,
 + * also the uobject itself must be locked for write.
 + *
 + * Upon return the HW object is guaranteed to be destroyed.
 + *
 + * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
 + * however the type's allocat_commit function cannot have been called and the
 + * uobject cannot be on the uobjects_lists
 + *
 + * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
 + * rdma_lookup_get_uobject) and the object is left in a state where the caller
 + * needs to call rdma_lookup_put_uobject.
 + *
 + * For all other destroy modes this function internally unlocks the uobject
 + * and consumes the kref on the uobj.
 + */
 +static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 +				  enum rdma_remove_reason reason)
 +{
 +	struct ib_uverbs_file *ufile = uobj->ufile;
 +	unsigned long flags;
 +	int ret;
 +
 +	lockdep_assert_held(&ufile->hw_destroy_rwsem);
 +	assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
 +
 +	if (uobj->object) {
 +		ret = uobj->type->type_class->destroy_hw(uobj, reason);
 +		if (ret) {
 +			if (ib_is_destroy_retryable(ret, reason, uobj))
 +				return ret;
 +
 +			/* Nothing to be done, dangle the memory and move on */
 +			WARN(true,
 +			     "ib_uverbs: failed to remove uobject id %d, driver err=%d",
 +			     uobj->id, ret);
 +		}
 +
 +		uobj->object = NULL;
 +	}
 +
 +	if (reason == RDMA_REMOVE_ABORT) {
 +		WARN_ON(!list_empty(&uobj->list));
 +		WARN_ON(!uobj->context);
 +		uobj->type->type_class->alloc_abort(uobj);
 +	}
 +
 +	uobj->context = NULL;
 +
 +	/*
 +	 * For DESTROY the usecnt is held write locked, the caller is expected
 +	 * to put it unlock and put the object when done with it. Only DESTROY
 +	 * can remove the IDR handle.
 +	 */
 +	if (reason != RDMA_REMOVE_DESTROY)
 +		atomic_set(&uobj->usecnt, 0);
 +	else
 +		uobj->type->type_class->remove_handle(uobj);
 +
 +	if (!list_empty(&uobj->list)) {
 +		spin_lock_irqsave(&ufile->uobjects_lock, flags);
 +		list_del_init(&uobj->list);
 +		spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
 +
 +		/*
 +		 * Pairs with the get in rdma_alloc_commit_uobject(), could
 +		 * destroy uobj.
 +		 */
 +		uverbs_uobject_put(uobj);
 +	}
  
 -	/* lock is either WRITE or DESTROY - should be exclusive */
 -	return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
 +	/*
 +	 * When aborting the stack kref remains owned by the core code, and is
 +	 * not transferred into the type. Pairs with the get in alloc_uobj
 +	 */
 +	if (reason == RDMA_REMOVE_ABORT)
 +		uverbs_uobject_put(uobj);
 +
 +	return 0;
  }
  
 -static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
 +/*
 + * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
 + * sequence. It should only be used from command callbacks. On success the
 + * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
 + * version requires the caller to have already obtained an
 + * LOOKUP_DESTROY uobject kref.
 + */
 +int uobj_destroy(struct ib_uobject *uobj)
 +{
 +	struct ib_uverbs_file *ufile = uobj->ufile;
 +	int ret;
 +
 +	down_read(&ufile->hw_destroy_rwsem);
 +
 +	ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
 +	if (ret)
 +		goto out_unlock;
 +
 +	ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
 +	if (ret) {
 +		atomic_set(&uobj->usecnt, 0);
 +		goto out_unlock;
 +	}
 +
 +out_unlock:
 +	up_read(&ufile->hw_destroy_rwsem);
 +	return ret;
 +}
 +
 +/*
 + * uobj_get_destroy destroys the HW object and returns a handle to the uobj
 + * with a NULL object pointer. The caller must pair this with
 + * uverbs_put_destroy.
 + */
 +struct ib_uobject *__uobj_get_destroy(const struct uverbs_obj_type *type,
 +				      u32 id, struct ib_uverbs_file *ufile)
 +{
 +	struct ib_uobject *uobj;
 +	int ret;
 +
 +	uobj = rdma_lookup_get_uobject(type, ufile, id, UVERBS_LOOKUP_DESTROY);
 +	if (IS_ERR(uobj))
 +		return uobj;
 +
 +	ret = uobj_destroy(uobj);
 +	if (ret) {
 +		rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
 +		return ERR_PTR(ret);
 +	}
 +
 +	return uobj;
 +}
 +
 +/*
 + * Does both uobj_get_destroy() and uobj_put_destroy().  Returns success_res
 + * on success (negative errno on failure). For use by callers that do not need
 + * the uobj.
 + */
 +int __uobj_perform_destroy(const struct uverbs_obj_type *type, u32 id,
 +			   struct ib_uverbs_file *ufile, int success_res)
 +{
 +	struct ib_uobject *uobj;
 +
 +	uobj = __uobj_get_destroy(type, id, ufile);
 +	if (IS_ERR(uobj))
 +		return PTR_ERR(uobj);
 +
 +	/*
 +	 * FIXME: After destroy this is not safe. We no longer hold the rwsem
 +	 * so disassociation could have completed and unloaded the module that
 +	 * backs the uobj->type pointer.
 +	 */
 +	rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
 +	return success_res;
 +}
 +
 +/* alloc_uobj must be undone by uverbs_destroy_uobject() */
 +static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
  				     const struct uverbs_obj_type *type)
  {
 -	struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
 +	struct ib_uobject *uobj;
 +	struct ib_ucontext *ucontext;
 +
 +	ucontext = ib_uverbs_get_ucontext(ufile);
 +	if (IS_ERR(ucontext))
 +		return ERR_CAST(ucontext);
  
 +	uobj = kzalloc(type->obj_size, GFP_KERNEL);
  	if (!uobj)
  		return ERR_PTR(-ENOMEM);
  	/*

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: linux-next: manual merge of the tip tree with the rdma tree
  2018-08-06  4:53 linux-next: manual merge of the tip tree with the rdma tree Stephen Rothwell
@ 2018-08-06 19:45 ` Jason Gunthorpe
  2018-08-15  1:51 ` Stephen Rothwell
  1 sibling, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2018-08-06 19:45 UTC (permalink / raw)
  To: Stephen Rothwell
  Cc: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Doug Ledford, Linux-Next Mailing List, Linux Kernel Mailing List,
	Mark Rutland

On Mon, Aug 06, 2018 at 02:53:31PM +1000, Stephen Rothwell wrote:
> Hi all,
> 
> Today's linux-next merge of the tip tree got a conflict in:
> 
>   drivers/infiniband/core/rdma_core.c
> 
> between commit:
> 
>   9867f5c6695f ("IB/uverbs: Convert 'bool exclusive' into an enum")
> 
> from the rdma tree and commit:
> 
>   bfc18e389c7a ("atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()")
> 
> from the tip tree.
> 
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging.  You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.

Gaaagh that is a big conflict

I was going to do this next cycle.. But I'll try to push a commit
consolidating this crazy use of __atomic_add_unless into one function
so this conflict should become easier to handle.

Jason

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: linux-next: manual merge of the tip tree with the rdma tree
  2018-08-06  4:53 linux-next: manual merge of the tip tree with the rdma tree Stephen Rothwell
  2018-08-06 19:45 ` Jason Gunthorpe
@ 2018-08-15  1:51 ` Stephen Rothwell
  1 sibling, 0 replies; 4+ messages in thread
From: Stephen Rothwell @ 2018-08-15  1:51 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Linux-Next Mailing List, Linux Kernel Mailing List, Mark Rutland

[-- Attachment #1: Type: text/plain, Size: 8136 bytes --]

Hi all,

On Mon, 6 Aug 2018 14:53:31 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Today's linux-next merge of the tip tree got a conflict in:
> 
>   drivers/infiniband/core/rdma_core.c
> 
> between commit:
> 
>   9867f5c6695f ("IB/uverbs: Convert 'bool exclusive' into an enum")
> 
> from the rdma tree and commit:
> 
>   bfc18e389c7a ("atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()")
> 
> from the tip tree.
> 
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging.  You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
> 
> -- 
> Cheers,
> Stephen Rothwell
> 
> diff --cc drivers/infiniband/core/rdma_core.c
> index 4235b9ddc2ad,475910ffbcb6..000000000000
> --- a/drivers/infiniband/core/rdma_core.c
> +++ b/drivers/infiniband/core/rdma_core.c
> @@@ -122,206 -120,19 +122,206 @@@ static int uverbs_try_lock_object(struc
>   	 * concurrently, setting the counter to zero is enough for releasing
>   	 * this lock.
>   	 */
>  -	if (!exclusive)
>  +	switch (mode) {
>  +	case UVERBS_LOOKUP_READ:
> - 		return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
> + 		return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
>   			-EBUSY : 0;
>  +	case UVERBS_LOOKUP_WRITE:
>  +		/* lock is exclusive */
>  +		return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
>  +	case UVERBS_LOOKUP_DESTROY:
>  +		return 0;
>  +	}
>  +	return 0;
>  +}
>  +
>  +static void assert_uverbs_usecnt(struct ib_uobject *uobj,
>  +				 enum rdma_lookup_mode mode)
>  +{
>  +#ifdef CONFIG_LOCKDEP
>  +	switch (mode) {
>  +	case UVERBS_LOOKUP_READ:
>  +		WARN_ON(atomic_read(&uobj->usecnt) <= 0);
>  +		break;
>  +	case UVERBS_LOOKUP_WRITE:
>  +		WARN_ON(atomic_read(&uobj->usecnt) != -1);
>  +		break;
>  +	case UVERBS_LOOKUP_DESTROY:
>  +		break;
>  +	}
>  +#endif
>  +}
>  +
>  +/*
>  + * This must be called with the hw_destroy_rwsem locked for read or write,
>  + * also the uobject itself must be locked for write.
>  + *
>  + * Upon return the HW object is guaranteed to be destroyed.
>  + *
>  + * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held,
>  + * however the type's allocat_commit function cannot have been called and the
>  + * uobject cannot be on the uobjects_lists
>  + *
>  + * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
>  + * rdma_lookup_get_uobject) and the object is left in a state where the caller
>  + * needs to call rdma_lookup_put_uobject.
>  + *
>  + * For all other destroy modes this function internally unlocks the uobject
>  + * and consumes the kref on the uobj.
>  + */
>  +static int uverbs_destroy_uobject(struct ib_uobject *uobj,
>  +				  enum rdma_remove_reason reason)
>  +{
>  +	struct ib_uverbs_file *ufile = uobj->ufile;
>  +	unsigned long flags;
>  +	int ret;
>  +
>  +	lockdep_assert_held(&ufile->hw_destroy_rwsem);
>  +	assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
>  +
>  +	if (uobj->object) {
>  +		ret = uobj->type->type_class->destroy_hw(uobj, reason);
>  +		if (ret) {
>  +			if (ib_is_destroy_retryable(ret, reason, uobj))
>  +				return ret;
>  +
>  +			/* Nothing to be done, dangle the memory and move on */
>  +			WARN(true,
>  +			     "ib_uverbs: failed to remove uobject id %d, driver err=%d",
>  +			     uobj->id, ret);
>  +		}
>  +
>  +		uobj->object = NULL;
>  +	}
>  +
>  +	if (reason == RDMA_REMOVE_ABORT) {
>  +		WARN_ON(!list_empty(&uobj->list));
>  +		WARN_ON(!uobj->context);
>  +		uobj->type->type_class->alloc_abort(uobj);
>  +	}
>  +
>  +	uobj->context = NULL;
>  +
>  +	/*
>  +	 * For DESTROY the usecnt is held write locked, the caller is expected
>  +	 * to put it unlock and put the object when done with it. Only DESTROY
>  +	 * can remove the IDR handle.
>  +	 */
>  +	if (reason != RDMA_REMOVE_DESTROY)
>  +		atomic_set(&uobj->usecnt, 0);
>  +	else
>  +		uobj->type->type_class->remove_handle(uobj);
>  +
>  +	if (!list_empty(&uobj->list)) {
>  +		spin_lock_irqsave(&ufile->uobjects_lock, flags);
>  +		list_del_init(&uobj->list);
>  +		spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
>  +
>  +		/*
>  +		 * Pairs with the get in rdma_alloc_commit_uobject(), could
>  +		 * destroy uobj.
>  +		 */
>  +		uverbs_uobject_put(uobj);
>  +	}
>   
>  -	/* lock is either WRITE or DESTROY - should be exclusive */
>  -	return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
>  +	/*
>  +	 * When aborting the stack kref remains owned by the core code, and is
>  +	 * not transferred into the type. Pairs with the get in alloc_uobj
>  +	 */
>  +	if (reason == RDMA_REMOVE_ABORT)
>  +		uverbs_uobject_put(uobj);
>  +
>  +	return 0;
>   }
>   
>  -static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
>  +/*
>  + * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
>  + * sequence. It should only be used from command callbacks. On success the
>  + * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
>  + * version requires the caller to have already obtained an
>  + * LOOKUP_DESTROY uobject kref.
>  + */
>  +int uobj_destroy(struct ib_uobject *uobj)
>  +{
>  +	struct ib_uverbs_file *ufile = uobj->ufile;
>  +	int ret;
>  +
>  +	down_read(&ufile->hw_destroy_rwsem);
>  +
>  +	ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
>  +	if (ret)
>  +		goto out_unlock;
>  +
>  +	ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY);
>  +	if (ret) {
>  +		atomic_set(&uobj->usecnt, 0);
>  +		goto out_unlock;
>  +	}
>  +
>  +out_unlock:
>  +	up_read(&ufile->hw_destroy_rwsem);
>  +	return ret;
>  +}
>  +
>  +/*
>  + * uobj_get_destroy destroys the HW object and returns a handle to the uobj
>  + * with a NULL object pointer. The caller must pair this with
>  + * uverbs_put_destroy.
>  + */
>  +struct ib_uobject *__uobj_get_destroy(const struct uverbs_obj_type *type,
>  +				      u32 id, struct ib_uverbs_file *ufile)
>  +{
>  +	struct ib_uobject *uobj;
>  +	int ret;
>  +
>  +	uobj = rdma_lookup_get_uobject(type, ufile, id, UVERBS_LOOKUP_DESTROY);
>  +	if (IS_ERR(uobj))
>  +		return uobj;
>  +
>  +	ret = uobj_destroy(uobj);
>  +	if (ret) {
>  +		rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
>  +		return ERR_PTR(ret);
>  +	}
>  +
>  +	return uobj;
>  +}
>  +
>  +/*
>  + * Does both uobj_get_destroy() and uobj_put_destroy().  Returns success_res
>  + * on success (negative errno on failure). For use by callers that do not need
>  + * the uobj.
>  + */
>  +int __uobj_perform_destroy(const struct uverbs_obj_type *type, u32 id,
>  +			   struct ib_uverbs_file *ufile, int success_res)
>  +{
>  +	struct ib_uobject *uobj;
>  +
>  +	uobj = __uobj_get_destroy(type, id, ufile);
>  +	if (IS_ERR(uobj))
>  +		return PTR_ERR(uobj);
>  +
>  +	/*
>  +	 * FIXME: After destroy this is not safe. We no longer hold the rwsem
>  +	 * so disassociation could have completed and unloaded the module that
>  +	 * backs the uobj->type pointer.
>  +	 */
>  +	rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
>  +	return success_res;
>  +}
>  +
>  +/* alloc_uobj must be undone by uverbs_destroy_uobject() */
>  +static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile,
>   				     const struct uverbs_obj_type *type)
>   {
>  -	struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
>  +	struct ib_uobject *uobj;
>  +	struct ib_ucontext *ucontext;
>  +
>  +	ucontext = ib_uverbs_get_ucontext(ufile);
>  +	if (IS_ERR(ucontext))
>  +		return ERR_CAST(ucontext);
>   
>  +	uobj = kzalloc(type->obj_size, GFP_KERNEL);
>   	if (!uobj)
>   		return ERR_PTR(-ENOMEM);
>   	/*

This is now a conflict between Linus' tree and the rdma tree.

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* linux-next: manual merge of the tip tree with the rdma tree
@ 2017-11-01  7:01 Stephen Rothwell
  0 siblings, 0 replies; 4+ messages in thread
From: Stephen Rothwell @ 2017-11-01  7:01 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Doug Ledford
  Cc: Linux-Next Mailing List, Linux Kernel Mailing List, Mark Rutland,
	Michael J. Ruhl

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  drivers/infiniband/hw/hfi1/file_ops.c

between commit:

  ecf799852bde ("IB/hfi1: Refactor reset_ctxt() IOCTL")

from the rdma tree and commit:

  6aa7de059173 ("locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()")

from the tip tree.

I fixed it up (the former included the update from the latter) and can
carry the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging.  You may
also want to consider cooperating with the maintainer of the conflicting
tree to minimise any particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-08-15  1:51 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-06  4:53 linux-next: manual merge of the tip tree with the rdma tree Stephen Rothwell
2018-08-06 19:45 ` Jason Gunthorpe
2018-08-15  1:51 ` Stephen Rothwell
  -- strict thread matches above, loose matches on Subject: below --
2017-11-01  7:01 Stephen Rothwell

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).