From: Bjorn Helgaas <helgaas@kernel.org>
To: Logan Gunthorpe <logang@deltatee.com>
Cc: linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-block@vger.kernel.org, linux-pci@vger.kernel.org,
linux-mm@kvack.org, iommu@lists.linux-foundation.org,
"Stephen Bates" <sbates@raithlin.com>,
"Christoph Hellwig" <hch@lst.de>,
"Dan Williams" <dan.j.williams@intel.com>,
"Jason Gunthorpe" <jgg@ziepe.ca>,
"Christian König" <christian.koenig@amd.com>,
"John Hubbard" <jhubbard@nvidia.com>,
"Don Dutile" <ddutile@redhat.com>,
"Matthew Wilcox" <willy@infradead.org>,
"Daniel Vetter" <daniel.vetter@ffwll.ch>,
"Jakowski Andrzej" <andrzej.jakowski@intel.com>,
"Minturn Dave B" <dave.b.minturn@intel.com>,
"Jason Ekstrand" <jason@jlekstrand.net>,
"Dave Hansen" <dave.hansen@linux.intel.com>,
"Xiong Jianxin" <jianxin.xiong@intel.com>,
"Ira Weiny" <ira.weiny@intel.com>,
"Robin Murphy" <robin.murphy@arm.com>,
"Martin Oliveira" <martin.oliveira@eideticom.com>,
"Chaitanya Kulkarni" <ckulkarnilinux@gmail.com>
Subject: Re: [PATCH v3 19/20] PCI/P2PDMA: introduce pci_mmap_p2pmem()
Date: Mon, 27 Sep 2021 13:49:32 -0500 [thread overview]
Message-ID: <20210927184932.GA667911@bhelgaas> (raw)
In-Reply-To: <20210916234100.122368-20-logang@deltatee.com>
On Thu, Sep 16, 2021 at 05:40:59PM -0600, Logan Gunthorpe wrote:
> Introduce pci_mmap_p2pmem() which is a helper to allocate and mmap
> a hunk of p2pmem into userspace.
>
> Pages are allocated from the genalloc in bulk and their reference count
> incremented. They are returned to the genalloc when the page is put.
>
> The VMA does not take a reference to the pages when they are inserted
> with vmf_insert_mixed() (which is necessary for zone device pages) so
> the backing P2P memory is stored in a structures in vm_private_data.
>
> A pseudo mount is used to allocate an inode for each PCI device. The
> inode's address_space is used in the file doing the mmap so that all
> VMAs are collected and can be unmapped if the PCI device is unbound.
> After unmapping, the VMAs are iterated through and their pages are
> put so the device can continue to be unbound. An active flag is used
> to signal to VMAs not to allocate any further P2P memory once the
> removal process starts. The flag is synchronized with concurrent
> access with an RCU lock.
>
> The VMAs and inode will survive after the unbind of the device, but no
> pages will be present in the VMA and a subsequent access will result
> in a SIGBUS error.
>
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Acked-by: Bjorn Helgaas <bhelgaas@google.com>
I would capitalize "Introduce" in the subject line.
> ---
> drivers/pci/p2pdma.c | 263 ++++++++++++++++++++++++++++++++++++-
> include/linux/pci-p2pdma.h | 11 ++
> include/uapi/linux/magic.h | 1 +
> 3 files changed, 273 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
> index 2422af5a529c..a5adf57af53a 100644
> --- a/drivers/pci/p2pdma.c
> +++ b/drivers/pci/p2pdma.c
> @@ -16,14 +16,19 @@
> #include <linux/genalloc.h>
> #include <linux/memremap.h>
> #include <linux/percpu-refcount.h>
> +#include <linux/pfn_t.h>
> +#include <linux/pseudo_fs.h>
> #include <linux/random.h>
> #include <linux/seq_buf.h>
> #include <linux/xarray.h>
> +#include <uapi/linux/magic.h>
>
> struct pci_p2pdma {
> struct gen_pool *pool;
> bool p2pmem_published;
> struct xarray map_types;
> + struct inode *inode;
> + bool active;
> };
>
> struct pci_p2pdma_pagemap {
> @@ -32,6 +37,14 @@ struct pci_p2pdma_pagemap {
> u64 bus_offset;
> };
>
> +struct pci_p2pdma_map {
> + struct kref ref;
> + struct pci_dev *pdev;
> + struct inode *inode;
> + void *kaddr;
> + size_t len;
> +};
> +
> static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
> {
> return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
> @@ -100,6 +113,26 @@ static const struct attribute_group p2pmem_group = {
> .name = "p2pmem",
> };
>
> +/*
> + * P2PDMA internal mount
> + * Fake an internal VFS mount-point in order to allocate struct address_space
> + * mappings to remove VMAs on unbind events.
> + */
> +static int pci_p2pdma_fs_cnt;
> +static struct vfsmount *pci_p2pdma_fs_mnt;
> +
> +static int pci_p2pdma_fs_init_fs_context(struct fs_context *fc)
> +{
> + return init_pseudo(fc, P2PDMA_MAGIC) ? 0 : -ENOMEM;
> +}
> +
> +static struct file_system_type pci_p2pdma_fs_type = {
> + .name = "p2dma",
> + .owner = THIS_MODULE,
> + .init_fs_context = pci_p2pdma_fs_init_fs_context,
> + .kill_sb = kill_anon_super,
> +};
> +
> static void p2pdma_page_free(struct page *page)
> {
> struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
> @@ -128,6 +161,9 @@ static void pci_p2pdma_release(void *data)
> gen_pool_destroy(p2pdma->pool);
> sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
> xa_destroy(&p2pdma->map_types);
> +
> + iput(p2pdma->inode);
> + simple_release_fs(&pci_p2pdma_fs_mnt, &pci_p2pdma_fs_cnt);
> }
>
> static int pci_p2pdma_setup(struct pci_dev *pdev)
> @@ -145,17 +181,32 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
> if (!p2p->pool)
> goto out;
>
> - error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
> + error = simple_pin_fs(&pci_p2pdma_fs_type, &pci_p2pdma_fs_mnt,
> + &pci_p2pdma_fs_cnt);
> if (error)
> goto out_pool_destroy;
>
> + p2p->inode = alloc_anon_inode(pci_p2pdma_fs_mnt->mnt_sb);
> + if (IS_ERR(p2p->inode)) {
> + error = -ENOMEM;
> + goto out_unpin_fs;
> + }
> +
> + error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
> + if (error)
> + goto out_put_inode;
> +
> error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
> if (error)
> - goto out_pool_destroy;
> + goto out_put_inode;
>
> rcu_assign_pointer(pdev->p2pdma, p2p);
> return 0;
>
> +out_put_inode:
> + iput(p2p->inode);
> +out_unpin_fs:
> + simple_release_fs(&pci_p2pdma_fs_mnt, &pci_p2pdma_fs_cnt);
> out_pool_destroy:
> gen_pool_destroy(p2p->pool);
> out:
> @@ -163,6 +214,45 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
> return error;
> }
>
> +static void pci_p2pdma_map_free_pages(struct pci_p2pdma_map *pmap)
> +{
> + int i;
> +
> + if (!pmap->kaddr)
> + return;
> +
> + for (i = 0; i < pmap->len; i += PAGE_SIZE)
> + put_page(virt_to_page(pmap->kaddr + i));
> +
> + pmap->kaddr = NULL;
> +}
> +
> +static void pci_p2pdma_free_mappings(struct address_space *mapping)
> +{
> + struct vm_area_struct *vma;
> +
> + i_mmap_lock_write(mapping);
> + if (RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
> + goto out;
> +
> + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, -1)
> + pci_p2pdma_map_free_pages(vma->vm_private_data);
> +
> +out:
> + i_mmap_unlock_write(mapping);
> +}
> +
> +static void pci_p2pdma_unmap_mappings(void *data)
> +{
> + struct pci_dev *pdev = data;
> + struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
> +
> + p2pdma->active = false;
> + synchronize_rcu();
> + unmap_mapping_range(p2pdma->inode->i_mapping, 0, 0, 1);
> + pci_p2pdma_free_mappings(p2pdma->inode->i_mapping);
> +}
> +
> /**
> * pci_p2pdma_add_resource - add memory for use as p2p memory
> * @pdev: the device to add the memory to
> @@ -221,6 +311,11 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
> goto pgmap_free;
> }
>
> + error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings,
> + pdev);
> + if (error)
> + goto pages_free;
> +
> p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
> error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
> pci_bus_address(pdev, bar) + offset,
> @@ -229,6 +324,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
> if (error)
> goto pages_free;
>
> + p2pdma->active = true;
> pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
> pgmap->range.start, pgmap->range.end);
>
> @@ -1029,3 +1125,166 @@ ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
> return sprintf(page, "%s\n", pci_name(p2p_dev));
> }
> EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
> +
> +static struct pci_p2pdma_map *pci_p2pdma_map_alloc(struct pci_dev *pdev,
> + size_t len)
> +{
> + struct pci_p2pdma_map *pmap;
> +
> + pmap = kzalloc(sizeof(*pmap), GFP_KERNEL);
> + if (!pmap)
> + return NULL;
> +
> + kref_init(&pmap->ref);
> + pmap->pdev = pci_dev_get(pdev);
> + pmap->len = len;
> +
> + return pmap;
> +}
> +
> +static void pci_p2pdma_map_free(struct kref *ref)
> +{
> + struct pci_p2pdma_map *pmap =
> + container_of(ref, struct pci_p2pdma_map, ref);
> +
> + pci_p2pdma_map_free_pages(pmap);
> + pci_dev_put(pmap->pdev);
> + iput(pmap->inode);
> + simple_release_fs(&pci_p2pdma_fs_mnt, &pci_p2pdma_fs_cnt);
> + kfree(pmap);
> +}
> +
> +static void pci_p2pdma_vma_open(struct vm_area_struct *vma)
> +{
> + struct pci_p2pdma_map *pmap = vma->vm_private_data;
> +
> + kref_get(&pmap->ref);
> +}
> +
> +static void pci_p2pdma_vma_close(struct vm_area_struct *vma)
> +{
> + struct pci_p2pdma_map *pmap = vma->vm_private_data;
> +
> + kref_put(&pmap->ref, pci_p2pdma_map_free);
> +}
> +
> +static vm_fault_t pci_p2pdma_vma_fault(struct vm_fault *vmf)
> +{
> + struct pci_p2pdma_map *pmap = vmf->vma->vm_private_data;
> + struct pci_p2pdma *p2pdma;
> + void *vaddr;
> + pfn_t pfn;
> + int i;
> +
> + if (!pmap->kaddr) {
> + rcu_read_lock();
> + p2pdma = rcu_dereference(pmap->pdev->p2pdma);
> + if (!p2pdma)
> + goto err_out;
> +
> + if (!p2pdma->active)
> + goto err_out;
> +
> + pmap->kaddr = (void *)gen_pool_alloc(p2pdma->pool, pmap->len);
> + if (!pmap->kaddr)
> + goto err_out;
> +
> + for (i = 0; i < pmap->len; i += PAGE_SIZE)
> + get_page(virt_to_page(pmap->kaddr + i));
> +
> + rcu_read_unlock();
> + }
> +
> + vaddr = pmap->kaddr + (vmf->pgoff << PAGE_SHIFT);
> + pfn = phys_to_pfn_t(virt_to_phys(vaddr), PFN_DEV | PFN_MAP);
> +
> + return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
> +
> +err_out:
> + rcu_read_unlock();
> + return VM_FAULT_SIGBUS;
> +}
> +static const struct vm_operations_struct pci_p2pdma_vmops = {
> + .open = pci_p2pdma_vma_open,
> + .close = pci_p2pdma_vma_close,
> + .fault = pci_p2pdma_vma_fault,
> +};
> +
> +/**
> + * pci_p2pdma_mmap_file_open - setup file mapping to store P2PMEM VMAs
> + * @pdev: the device to allocate memory from
> + * @vma: the userspace vma to map the memory to
> + *
> + * Set f_mapping of the file to the p2pdma inode so that mappings
> + * are can be torn down on device unbind.
> + *
> + * Returns 0 on success, or a negative error code on failure
> + */
> +void pci_p2pdma_mmap_file_open(struct pci_dev *pdev, struct file *file)
> +{
> + struct pci_p2pdma *p2pdma;
> +
> + rcu_read_lock();
> + p2pdma = rcu_dereference(pdev->p2pdma);
> + if (p2pdma)
> + file->f_mapping = p2pdma->inode->i_mapping;
> + rcu_read_unlock();
> +}
> +EXPORT_SYMBOL_GPL(pci_p2pdma_mmap_file_open);
> +
> +/**
> + * pci_mmap_p2pmem - setup an mmap region to be backed with P2PDMA memory
> + * that was registered with pci_p2pdma_add_resource()
> + * @pdev: the device to allocate memory from
> + * @vma: the userspace vma to map the memory to
> + *
> + * The file must call pci_p2pdma_mmap_file_open() in its open() operation.
> + *
> + * Returns 0 on success, or a negative error code on failure
> + */
> +int pci_mmap_p2pmem(struct pci_dev *pdev, struct vm_area_struct *vma)
> +{
> + struct pci_p2pdma_map *pmap;
> + struct pci_p2pdma *p2pdma;
> + int ret;
> +
> + /* prevent private mappings from being established */
> + if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
> + pci_info_ratelimited(pdev,
> + "%s: fail, attempted private mapping\n",
> + current->comm);
> + return -EINVAL;
> + }
> +
> + pmap = pci_p2pdma_map_alloc(pdev, vma->vm_end - vma->vm_start);
> + if (!pmap)
> + return -ENOMEM;
> +
> + rcu_read_lock();
> + p2pdma = rcu_dereference(pdev->p2pdma);
> + if (!p2pdma) {
> + ret = -ENODEV;
> + goto out;
> + }
> +
> + ret = simple_pin_fs(&pci_p2pdma_fs_type, &pci_p2pdma_fs_mnt,
> + &pci_p2pdma_fs_cnt);
> + if (ret)
> + goto out;
> +
> + ihold(p2pdma->inode);
> + pmap->inode = p2pdma->inode;
> + rcu_read_unlock();
> +
> + vma->vm_flags |= VM_MIXEDMAP;
> + vma->vm_private_data = pmap;
> + vma->vm_ops = &pci_p2pdma_vmops;
> +
> + return 0;
> +
> +out:
> + rcu_read_unlock();
> + kfree(pmap);
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(pci_mmap_p2pmem);
> diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
> index 0c33a40a86e7..f9f19f3db676 100644
> --- a/include/linux/pci-p2pdma.h
> +++ b/include/linux/pci-p2pdma.h
> @@ -81,6 +81,8 @@ int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
> bool *use_p2pdma);
> ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
> bool use_p2pdma);
> +void pci_p2pdma_mmap_file_open(struct pci_dev *pdev, struct file *file);
> +int pci_mmap_p2pmem(struct pci_dev *pdev, struct vm_area_struct *vma);
> #else /* CONFIG_PCI_P2PDMA */
> static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
> size_t size, u64 offset)
> @@ -152,6 +154,15 @@ static inline ssize_t pci_p2pdma_enable_show(char *page,
> {
> return sprintf(page, "none\n");
> }
> +static inline void pci_p2pdma_mmap_file_open(struct pci_dev *pdev,
> + struct file *file)
> +{
> +}
> +static inline int pci_mmap_p2pmem(struct pci_dev *pdev,
> + struct vm_area_struct *vma)
> +{
> + return -EOPNOTSUPP;
> +}
> #endif /* CONFIG_PCI_P2PDMA */
>
>
> diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
> index 35687dcb1a42..af737842c56f 100644
> --- a/include/uapi/linux/magic.h
> +++ b/include/uapi/linux/magic.h
> @@ -88,6 +88,7 @@
> #define BPF_FS_MAGIC 0xcafe4a11
> #define AAFS_MAGIC 0x5a3c69f0
> #define ZONEFS_MAGIC 0x5a4f4653
> +#define P2PDMA_MAGIC 0x70327064
>
> /* Since UDF 2.01 is ISO 13346 based... */
> #define UDF_SUPER_MAGIC 0x15013346
> --
> 2.30.2
>
next prev parent reply other threads:[~2021-09-27 18:49 UTC|newest]
Thread overview: 87+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-16 23:40 [PATCH v3 00/20] Userspace P2PDMA with O_DIRECT NVMe devices Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 01/20] lib/scatterlist: add flag for indicating P2PDMA segments in an SGL Logan Gunthorpe
2021-09-28 18:32 ` Jason Gunthorpe
2021-09-29 21:15 ` Logan Gunthorpe
2021-09-30 4:47 ` Chaitanya Kulkarni
2021-09-30 16:49 ` Logan Gunthorpe
2021-09-30 4:57 ` Chaitanya Kulkarni
2021-09-16 23:40 ` [PATCH v3 02/20] PCI/P2PDMA: attempt to set map_type if it has not been set Logan Gunthorpe
2021-09-27 18:50 ` Bjorn Helgaas
2021-09-16 23:40 ` [PATCH v3 03/20] PCI/P2PDMA: make pci_p2pdma_map_type() non-static Logan Gunthorpe
2021-09-27 18:46 ` Bjorn Helgaas
2021-09-28 18:48 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 04/20] PCI/P2PDMA: introduce helpers for dma_map_sg implementations Logan Gunthorpe
2021-09-27 18:53 ` Bjorn Helgaas
2021-09-27 19:59 ` Logan Gunthorpe
2021-09-28 18:55 ` Jason Gunthorpe
2021-09-29 21:26 ` Logan Gunthorpe
2021-09-28 22:05 ` [PATCH v3 4/20] " Jason Gunthorpe
2021-09-29 21:30 ` Logan Gunthorpe
2021-09-29 22:46 ` Jason Gunthorpe
2021-09-29 23:00 ` Logan Gunthorpe
2021-09-29 23:40 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 05/20] dma-mapping: allow EREMOTEIO return code for P2PDMA transfers Logan Gunthorpe
2021-09-28 18:57 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 06/20] dma-direct: support PCI P2PDMA pages in dma-direct map_sg Logan Gunthorpe
2021-09-28 19:08 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 07/20] dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support Logan Gunthorpe
2021-09-28 19:11 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 08/20] iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg Logan Gunthorpe
2021-09-28 19:15 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 09/20] nvme-pci: check DMA ops when indicating support for PCI P2PDMA Logan Gunthorpe
2021-09-30 5:06 ` Chaitanya Kulkarni
2021-09-30 16:51 ` Logan Gunthorpe
2021-09-30 17:19 ` Chaitanya Kulkarni
2021-09-16 23:40 ` [PATCH v3 10/20] nvme-pci: convert to using dma_map_sgtable() Logan Gunthorpe
2021-10-05 22:29 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 11/20] RDMA/core: introduce ib_dma_pci_p2p_dma_supported() Logan Gunthorpe
2021-09-28 19:17 ` Jason Gunthorpe
2021-10-05 22:31 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 12/20] RDMA/rw: use dma_map_sgtable() Logan Gunthorpe
2021-09-28 19:43 ` Jason Gunthorpe
2021-09-29 22:56 ` Logan Gunthorpe
2021-10-05 22:40 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 13/20] PCI/P2PDMA: remove pci_p2pdma_[un]map_sg() Logan Gunthorpe
2021-09-27 18:50 ` Bjorn Helgaas
2021-09-28 19:43 ` Jason Gunthorpe
2021-10-05 22:42 ` Max Gurtovoy
2021-09-16 23:40 ` [PATCH v3 14/20] mm: introduce FOLL_PCI_P2PDMA to gate getting PCI P2PDMA pages Logan Gunthorpe
2021-09-28 19:47 ` Jason Gunthorpe
2021-09-29 21:34 ` Logan Gunthorpe
2021-09-29 22:48 ` Jason Gunthorpe
2021-09-16 23:40 ` [PATCH v3 15/20] iov_iter: introduce iov_iter_get_pages_[alloc_]flags() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 16/20] block: set FOLL_PCI_P2PDMA in __bio_iov_iter_get_pages() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 17/20] block: set FOLL_PCI_P2PDMA in bio_map_user_iov() Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 18/20] mm: use custom page_free for P2PDMA pages Logan Gunthorpe
2021-09-16 23:40 ` [PATCH v3 19/20] PCI/P2PDMA: introduce pci_mmap_p2pmem() Logan Gunthorpe
2021-09-27 18:49 ` Bjorn Helgaas [this message]
2021-09-28 19:55 ` Jason Gunthorpe
2021-09-29 21:42 ` Logan Gunthorpe
2021-09-29 23:05 ` Jason Gunthorpe
2021-09-29 23:27 ` Logan Gunthorpe
2021-09-29 23:35 ` Jason Gunthorpe
2021-09-29 23:49 ` Logan Gunthorpe
2021-09-30 0:36 ` Jason Gunthorpe
2021-10-01 13:48 ` Jason Gunthorpe
2021-10-01 17:01 ` Logan Gunthorpe
2021-10-01 17:45 ` Jason Gunthorpe
2021-10-01 20:13 ` Logan Gunthorpe
2021-10-01 22:14 ` Jason Gunthorpe
2021-10-01 22:22 ` Logan Gunthorpe
2021-10-01 22:46 ` Jason Gunthorpe
2021-10-01 23:27 ` John Hubbard
2021-10-01 23:34 ` Logan Gunthorpe
2021-10-04 6:58 ` Christian König
2021-10-04 13:11 ` Jason Gunthorpe
2021-10-04 13:22 ` Christian König
2021-10-04 13:27 ` Jason Gunthorpe
2021-10-04 14:54 ` Christian König
2021-09-28 20:05 ` Jason Gunthorpe
2021-09-29 21:46 ` Logan Gunthorpe
2021-09-16 23:41 ` [PATCH v3 20/20] nvme-pci: allow mmaping the CMB in userspace Logan Gunthorpe
2021-09-28 20:02 ` [PATCH v3 00/20] Userspace P2PDMA with O_DIRECT NVMe devices Jason Gunthorpe
2021-09-29 21:50 ` Logan Gunthorpe
2021-09-29 23:21 ` Jason Gunthorpe
2021-09-29 23:28 ` Logan Gunthorpe
2021-09-29 23:36 ` Jason Gunthorpe
2021-09-29 23:52 ` Logan Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210927184932.GA667911@bhelgaas \
--to=helgaas@kernel.org \
--cc=andrzej.jakowski@intel.com \
--cc=christian.koenig@amd.com \
--cc=ckulkarnilinux@gmail.com \
--cc=dan.j.williams@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=dave.b.minturn@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=ddutile@redhat.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux-foundation.org \
--cc=ira.weiny@intel.com \
--cc=jason@jlekstrand.net \
--cc=jgg@ziepe.ca \
--cc=jhubbard@nvidia.com \
--cc=jianxin.xiong@intel.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-pci@vger.kernel.org \
--cc=logang@deltatee.com \
--cc=martin.oliveira@eideticom.com \
--cc=robin.murphy@arm.com \
--cc=sbates@raithlin.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).