All of lore.kernel.org
 help / color / mirror / Atom feed
From: Cornelia Huck <cohuck@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>
Cc: jack@suse.cz, kvm@vger.kernel.org, mst@redhat.com,
	jasowang@redhat.com, david@fromorbit.com, qemu-devel@nongnu.org,
	virtualization@lists.linux-foundation.org,
	adilger.kernel@dilger.ca, zwisler@kernel.org,
	aarcange@redhat.com, linux-nvdimm@lists.01.org, david@redhat.com,
	willy@infradead.org, hch@infradead.org,
	linux-acpi@vger.kernel.org, linux-ext4@vger.kernel.org,
	lenb@kernel.org, kilobyte@angband.pl, riel@surriel.com,
	yuval.shaia@oracle.com, stefanha@redhat.com, pbonzini@redhat.com,
	lcapitulino@redhat.com, kwolf@redhat.com, nilal@redhat.com,
	tytso@mit.edu, xiaoguangrong.eric@gmail.com,
	darrick.wong@oracle.com, rjw@rjwysocki.net,
	linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, imammedo@redhat.com
Subject: Re: [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver
Date: Wed, 10 Apr 2019 14:24:26 +0200	[thread overview]
Message-ID: <20190410142426.5bf0d9a4.cohuck@redhat.com> (raw)
In-Reply-To: <20190410040826.24371-3-pagupta@redhat.com>

On Wed, 10 Apr 2019 09:38:22 +0530
Pankaj Gupta <pagupta@redhat.com> wrote:

> This patch adds virtio-pmem driver for KVM guest.
> 
> Guest reads the persistent memory range information from
> Qemu over VIRTIO and registers it on nvdimm_bus. It also
> creates a nd_region object with the persistent memory
> range information so that existing 'nvdimm/pmem' driver
> can reserve this into system memory map. This way
> 'virtio-pmem' driver uses existing functionality of pmem
> driver to register persistent memory compatible for DAX
> capable filesystems.
> 
> This also provides function to perform guest flush over
> VIRTIO from 'pmem' driver when userspace performs flush
> on DAX memory range.
> 
> Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
> ---
>  drivers/nvdimm/virtio_pmem.c     |  88 ++++++++++++++++++++++
>  drivers/virtio/Kconfig           |  10 +++
>  drivers/virtio/Makefile          |   1 +
>  drivers/virtio/pmem.c            | 124 +++++++++++++++++++++++++++++++
>  include/linux/virtio_pmem.h      |  60 +++++++++++++++
>  include/uapi/linux/virtio_ids.h  |   1 +
>  include/uapi/linux/virtio_pmem.h |  10 +++
>  7 files changed, 294 insertions(+)
>  create mode 100644 drivers/nvdimm/virtio_pmem.c
>  create mode 100644 drivers/virtio/pmem.c
>  create mode 100644 include/linux/virtio_pmem.h
>  create mode 100644 include/uapi/linux/virtio_pmem.h
> 
(...)
> diff --git a/drivers/virtio/pmem.c b/drivers/virtio/pmem.c
> new file mode 100644
> index 000000000000..cc9de9589d56
> --- /dev/null
> +++ b/drivers/virtio/pmem.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * virtio_pmem.c: Virtio pmem Driver
> + *
> + * Discovers persistent memory range information
> + * from host and registers the virtual pmem device
> + * with libnvdimm core.
> + */
> +#include <linux/virtio_pmem.h>
> +#include <../../drivers/nvdimm/nd.h>
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> + /* Initialize virt queue */
> +static int init_vq(struct virtio_pmem *vpmem)

IMHO, you don't gain much by splitting off this function...

> +{
> +	struct virtqueue *vq;
> +
> +	/* single vq */
> +	vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev,
> +				host_ack, "flush_queue");
> +	if (IS_ERR(vq))
> +		return PTR_ERR(vq);

I'm personally not a fan of chained assignments... I think I'd just
drop the 'vq' variable and operate on vpmem->req_vq directly.

> +
> +	spin_lock_init(&vpmem->pmem_lock);
> +	INIT_LIST_HEAD(&vpmem->req_list);
> +
> +	return 0;
> +};
> +
> +static int virtio_pmem_probe(struct virtio_device *vdev)
> +{
> +	int err = 0;
> +	struct resource res;
> +	struct virtio_pmem *vpmem;
> +	struct nvdimm_bus *nvdimm_bus;
> +	struct nd_region_desc ndr_desc = {};
> +	int nid = dev_to_node(&vdev->dev);
> +	struct nd_region *nd_region;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config disabled\n",

Maybe s/config disabled/config access disabled/ ? That seems to be the
more common message.

> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem),
> +					GFP_KERNEL);

Here, the vpmem variable makes sense for convenience, but I'm again not
a fan of the chaining :)

> +	if (!vpmem) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	vpmem->vdev = vdev;
> +	err = init_vq(vpmem);
> +	if (err)
> +		goto out_err;
> +
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			start, &vpmem->start);
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			size, &vpmem->size);
> +
> +	res.start = vpmem->start;
> +	res.end   = vpmem->start + vpmem->size-1;
> +	vpmem->nd_desc.provider_name = "virtio-pmem";
> +	vpmem->nd_desc.module = THIS_MODULE;
> +
> +	vpmem->nvdimm_bus = nvdimm_bus = nvdimm_bus_register(&vdev->dev,
> +						&vpmem->nd_desc);

And here :)

> +	if (!nvdimm_bus)
> +		goto out_vq;
> +
> +	dev_set_drvdata(&vdev->dev, nvdimm_bus);
> +
> +	ndr_desc.res = &res;
> +	ndr_desc.numa_node = nid;
> +	ndr_desc.flush = virtio_pmem_flush;
> +	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
> +	set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
> +	nd_region = nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc);
> +	nd_region->provider_data =  dev_to_virtio
> +					(nd_region->dev.parent->parent);

Isn't it clear that this parent chain will always end up at &vdev->dev?
Maybe simply set ->provider_data to vdev directly? (Does it need to
grab a reference count of the device, BTW?)

> +
> +	if (!nd_region)
> +		goto out_nd;

Probably better to do this check before you access nd_region's
members :)

> +
> +	return 0;
> +out_nd:
> +	err = -ENXIO;
> +	nvdimm_bus_unregister(nvdimm_bus);
> +out_vq:
> +	vdev->config->del_vqs(vdev);
> +out_err:
> +	dev_err(&vdev->dev, "failed to register virtio pmem memory\n");
> +	return err;
> +}
> +
> +static void virtio_pmem_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pmem *vpmem = vdev->priv;
> +	struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
> +
> +	nvdimm_bus_unregister(nvdimm_bus);

I haven't followed this around the nvdimm code, but is the nd_region
you created during probe cleaned up automatically, or would you need to
do something here?

> +	vdev->config->del_vqs(vdev);
> +	vdev->config->reset(vdev);
> +	kfree(vpmem);

You allocated vpmem via devm_kzalloc; isn't it freed automatically on
remove?

> +}
> +
> +static struct virtio_driver virtio_pmem_driver = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.probe			= virtio_pmem_probe,
> +	.remove			= virtio_pmem_remove,
> +};
> +
> +module_virtio_driver(virtio_pmem_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio pmem driver");
> +MODULE_LICENSE("GPL");

Only looked at this from the general virtio driver angle; seems fine
apart from some easy-to-fix issues and some personal style preference
things.
_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Cornelia Huck <cohuck@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>
Cc: linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-acpi@vger.kernel.org,
	qemu-devel@nongnu.org, linux-ext4@vger.kernel.org,
	linux-xfs@vger.kernel.org, dan.j.williams@intel.com,
	zwisler@kernel.org, vishal.l.verma@intel.com,
	dave.jiang@intel.com, mst@redhat.com, jasowang@redhat.com,
	willy@infradead.org, rjw@rjwysocki.net, hch@infradead.org,
	lenb@kernel.org, jack@suse.cz, tytso@mit.edu,
	adilger.kernel@dilger.ca, darrick.wong@oracle.com,
	lcapitulino@redhat.com, kwolf@redhat.com, imammedo@redhat.com,
	jmoyer@redhat.com, nilal@redhat.com, riel@surriel.com,
	stefanha@redhat.com, aarcange@redhat.com, david@redhat.com,
	david@fromorbit.com, xiaoguangrong.eric@gmail.com,
	pbonzini@redhat.co
Subject: Re: [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver
Date: Wed, 10 Apr 2019 14:24:26 +0200	[thread overview]
Message-ID: <20190410142426.5bf0d9a4.cohuck@redhat.com> (raw)
In-Reply-To: <20190410040826.24371-3-pagupta@redhat.com>

On Wed, 10 Apr 2019 09:38:22 +0530
Pankaj Gupta <pagupta@redhat.com> wrote:

> This patch adds virtio-pmem driver for KVM guest.
> 
> Guest reads the persistent memory range information from
> Qemu over VIRTIO and registers it on nvdimm_bus. It also
> creates a nd_region object with the persistent memory
> range information so that existing 'nvdimm/pmem' driver
> can reserve this into system memory map. This way
> 'virtio-pmem' driver uses existing functionality of pmem
> driver to register persistent memory compatible for DAX
> capable filesystems.
> 
> This also provides function to perform guest flush over
> VIRTIO from 'pmem' driver when userspace performs flush
> on DAX memory range.
> 
> Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
> ---
>  drivers/nvdimm/virtio_pmem.c     |  88 ++++++++++++++++++++++
>  drivers/virtio/Kconfig           |  10 +++
>  drivers/virtio/Makefile          |   1 +
>  drivers/virtio/pmem.c            | 124 +++++++++++++++++++++++++++++++
>  include/linux/virtio_pmem.h      |  60 +++++++++++++++
>  include/uapi/linux/virtio_ids.h  |   1 +
>  include/uapi/linux/virtio_pmem.h |  10 +++
>  7 files changed, 294 insertions(+)
>  create mode 100644 drivers/nvdimm/virtio_pmem.c
>  create mode 100644 drivers/virtio/pmem.c
>  create mode 100644 include/linux/virtio_pmem.h
>  create mode 100644 include/uapi/linux/virtio_pmem.h
> 
(...)
> diff --git a/drivers/virtio/pmem.c b/drivers/virtio/pmem.c
> new file mode 100644
> index 000000000000..cc9de9589d56
> --- /dev/null
> +++ b/drivers/virtio/pmem.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * virtio_pmem.c: Virtio pmem Driver
> + *
> + * Discovers persistent memory range information
> + * from host and registers the virtual pmem device
> + * with libnvdimm core.
> + */
> +#include <linux/virtio_pmem.h>
> +#include <../../drivers/nvdimm/nd.h>
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> + /* Initialize virt queue */
> +static int init_vq(struct virtio_pmem *vpmem)

IMHO, you don't gain much by splitting off this function...

> +{
> +	struct virtqueue *vq;
> +
> +	/* single vq */
> +	vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev,
> +				host_ack, "flush_queue");
> +	if (IS_ERR(vq))
> +		return PTR_ERR(vq);

I'm personally not a fan of chained assignments... I think I'd just
drop the 'vq' variable and operate on vpmem->req_vq directly.

> +
> +	spin_lock_init(&vpmem->pmem_lock);
> +	INIT_LIST_HEAD(&vpmem->req_list);
> +
> +	return 0;
> +};
> +
> +static int virtio_pmem_probe(struct virtio_device *vdev)
> +{
> +	int err = 0;
> +	struct resource res;
> +	struct virtio_pmem *vpmem;
> +	struct nvdimm_bus *nvdimm_bus;
> +	struct nd_region_desc ndr_desc = {};
> +	int nid = dev_to_node(&vdev->dev);
> +	struct nd_region *nd_region;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config disabled\n",

Maybe s/config disabled/config access disabled/ ? That seems to be the
more common message.

> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem),
> +					GFP_KERNEL);

Here, the vpmem variable makes sense for convenience, but I'm again not
a fan of the chaining :)

> +	if (!vpmem) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	vpmem->vdev = vdev;
> +	err = init_vq(vpmem);
> +	if (err)
> +		goto out_err;
> +
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			start, &vpmem->start);
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			size, &vpmem->size);
> +
> +	res.start = vpmem->start;
> +	res.end   = vpmem->start + vpmem->size-1;
> +	vpmem->nd_desc.provider_name = "virtio-pmem";
> +	vpmem->nd_desc.module = THIS_MODULE;
> +
> +	vpmem->nvdimm_bus = nvdimm_bus = nvdimm_bus_register(&vdev->dev,
> +						&vpmem->nd_desc);

And here :)

> +	if (!nvdimm_bus)
> +		goto out_vq;
> +
> +	dev_set_drvdata(&vdev->dev, nvdimm_bus);
> +
> +	ndr_desc.res = &res;
> +	ndr_desc.numa_node = nid;
> +	ndr_desc.flush = virtio_pmem_flush;
> +	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
> +	set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
> +	nd_region = nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc);
> +	nd_region->provider_data =  dev_to_virtio
> +					(nd_region->dev.parent->parent);

Isn't it clear that this parent chain will always end up at &vdev->dev?
Maybe simply set ->provider_data to vdev directly? (Does it need to
grab a reference count of the device, BTW?)

> +
> +	if (!nd_region)
> +		goto out_nd;

Probably better to do this check before you access nd_region's
members :)

> +
> +	return 0;
> +out_nd:
> +	err = -ENXIO;
> +	nvdimm_bus_unregister(nvdimm_bus);
> +out_vq:
> +	vdev->config->del_vqs(vdev);
> +out_err:
> +	dev_err(&vdev->dev, "failed to register virtio pmem memory\n");
> +	return err;
> +}
> +
> +static void virtio_pmem_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pmem *vpmem = vdev->priv;
> +	struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
> +
> +	nvdimm_bus_unregister(nvdimm_bus);

I haven't followed this around the nvdimm code, but is the nd_region
you created during probe cleaned up automatically, or would you need to
do something here?

> +	vdev->config->del_vqs(vdev);
> +	vdev->config->reset(vdev);
> +	kfree(vpmem);

You allocated vpmem via devm_kzalloc; isn't it freed automatically on
remove?

> +}
> +
> +static struct virtio_driver virtio_pmem_driver = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.probe			= virtio_pmem_probe,
> +	.remove			= virtio_pmem_remove,
> +};
> +
> +module_virtio_driver(virtio_pmem_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio pmem driver");
> +MODULE_LICENSE("GPL");

Only looked at this from the general virtio driver angle; seems fine
apart from some easy-to-fix issues and some personal style preference
things.

WARNING: multiple messages have this Message-ID (diff)
From: Cornelia Huck <cohuck@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>
Cc: linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-acpi@vger.kernel.org,
	qemu-devel@nongnu.org, linux-ext4@vger.kernel.org,
	linux-xfs@vger.kernel.org, dan.j.williams@intel.com,
	zwisler@kernel.org, vishal.l.verma@intel.com,
	dave.jiang@intel.com, mst@redhat.com, jasowang@redhat.com,
	willy@infradead.org, rjw@rjwysocki.net, hch@infradead.org,
	lenb@kernel.org, jack@suse.cz, tytso@mit.edu,
	adilger.kernel@dilger.ca, darrick.wong@oracle.com,
	lcapitulino@redhat.com, kwolf@redhat.com, imammedo@redhat.com,
	jmoyer@redhat.com, nilal@redhat.com, riel@surriel.com,
	stefanha@redhat.com, aarcange@redhat.com, david@redhat.com,
	david@fromorbit.com, xiaoguangrong.eric@gmail.com,
	pbonzini@redhat.com, kilobyte@angband.pl, yuval.shaia@oracle.com
Subject: Re: [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver
Date: Wed, 10 Apr 2019 14:24:26 +0200	[thread overview]
Message-ID: <20190410142426.5bf0d9a4.cohuck@redhat.com> (raw)
Message-ID: <20190410122426.-ARPaG25k9bZ0apCY12Flj9s8DTvGqzgsEpY2tubCF0@z> (raw)
In-Reply-To: <20190410040826.24371-3-pagupta@redhat.com>

On Wed, 10 Apr 2019 09:38:22 +0530
Pankaj Gupta <pagupta@redhat.com> wrote:

> This patch adds virtio-pmem driver for KVM guest.
> 
> Guest reads the persistent memory range information from
> Qemu over VIRTIO and registers it on nvdimm_bus. It also
> creates a nd_region object with the persistent memory
> range information so that existing 'nvdimm/pmem' driver
> can reserve this into system memory map. This way
> 'virtio-pmem' driver uses existing functionality of pmem
> driver to register persistent memory compatible for DAX
> capable filesystems.
> 
> This also provides function to perform guest flush over
> VIRTIO from 'pmem' driver when userspace performs flush
> on DAX memory range.
> 
> Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
> ---
>  drivers/nvdimm/virtio_pmem.c     |  88 ++++++++++++++++++++++
>  drivers/virtio/Kconfig           |  10 +++
>  drivers/virtio/Makefile          |   1 +
>  drivers/virtio/pmem.c            | 124 +++++++++++++++++++++++++++++++
>  include/linux/virtio_pmem.h      |  60 +++++++++++++++
>  include/uapi/linux/virtio_ids.h  |   1 +
>  include/uapi/linux/virtio_pmem.h |  10 +++
>  7 files changed, 294 insertions(+)
>  create mode 100644 drivers/nvdimm/virtio_pmem.c
>  create mode 100644 drivers/virtio/pmem.c
>  create mode 100644 include/linux/virtio_pmem.h
>  create mode 100644 include/uapi/linux/virtio_pmem.h
> 
(...)
> diff --git a/drivers/virtio/pmem.c b/drivers/virtio/pmem.c
> new file mode 100644
> index 000000000000..cc9de9589d56
> --- /dev/null
> +++ b/drivers/virtio/pmem.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * virtio_pmem.c: Virtio pmem Driver
> + *
> + * Discovers persistent memory range information
> + * from host and registers the virtual pmem device
> + * with libnvdimm core.
> + */
> +#include <linux/virtio_pmem.h>
> +#include <../../drivers/nvdimm/nd.h>
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> + /* Initialize virt queue */
> +static int init_vq(struct virtio_pmem *vpmem)

IMHO, you don't gain much by splitting off this function...

> +{
> +	struct virtqueue *vq;
> +
> +	/* single vq */
> +	vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev,
> +				host_ack, "flush_queue");
> +	if (IS_ERR(vq))
> +		return PTR_ERR(vq);

I'm personally not a fan of chained assignments... I think I'd just
drop the 'vq' variable and operate on vpmem->req_vq directly.

> +
> +	spin_lock_init(&vpmem->pmem_lock);
> +	INIT_LIST_HEAD(&vpmem->req_list);
> +
> +	return 0;
> +};
> +
> +static int virtio_pmem_probe(struct virtio_device *vdev)
> +{
> +	int err = 0;
> +	struct resource res;
> +	struct virtio_pmem *vpmem;
> +	struct nvdimm_bus *nvdimm_bus;
> +	struct nd_region_desc ndr_desc = {};
> +	int nid = dev_to_node(&vdev->dev);
> +	struct nd_region *nd_region;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config disabled\n",

Maybe s/config disabled/config access disabled/ ? That seems to be the
more common message.

> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem),
> +					GFP_KERNEL);

Here, the vpmem variable makes sense for convenience, but I'm again not
a fan of the chaining :)

> +	if (!vpmem) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	vpmem->vdev = vdev;
> +	err = init_vq(vpmem);
> +	if (err)
> +		goto out_err;
> +
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			start, &vpmem->start);
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			size, &vpmem->size);
> +
> +	res.start = vpmem->start;
> +	res.end   = vpmem->start + vpmem->size-1;
> +	vpmem->nd_desc.provider_name = "virtio-pmem";
> +	vpmem->nd_desc.module = THIS_MODULE;
> +
> +	vpmem->nvdimm_bus = nvdimm_bus = nvdimm_bus_register(&vdev->dev,
> +						&vpmem->nd_desc);

And here :)

> +	if (!nvdimm_bus)
> +		goto out_vq;
> +
> +	dev_set_drvdata(&vdev->dev, nvdimm_bus);
> +
> +	ndr_desc.res = &res;
> +	ndr_desc.numa_node = nid;
> +	ndr_desc.flush = virtio_pmem_flush;
> +	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
> +	set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
> +	nd_region = nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc);
> +	nd_region->provider_data =  dev_to_virtio
> +					(nd_region->dev.parent->parent);

Isn't it clear that this parent chain will always end up at &vdev->dev?
Maybe simply set ->provider_data to vdev directly? (Does it need to
grab a reference count of the device, BTW?)

> +
> +	if (!nd_region)
> +		goto out_nd;

Probably better to do this check before you access nd_region's
members :)

> +
> +	return 0;
> +out_nd:
> +	err = -ENXIO;
> +	nvdimm_bus_unregister(nvdimm_bus);
> +out_vq:
> +	vdev->config->del_vqs(vdev);
> +out_err:
> +	dev_err(&vdev->dev, "failed to register virtio pmem memory\n");
> +	return err;
> +}
> +
> +static void virtio_pmem_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pmem *vpmem = vdev->priv;
> +	struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
> +
> +	nvdimm_bus_unregister(nvdimm_bus);

I haven't followed this around the nvdimm code, but is the nd_region
you created during probe cleaned up automatically, or would you need to
do something here?

> +	vdev->config->del_vqs(vdev);
> +	vdev->config->reset(vdev);
> +	kfree(vpmem);

You allocated vpmem via devm_kzalloc; isn't it freed automatically on
remove?

> +}
> +
> +static struct virtio_driver virtio_pmem_driver = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.probe			= virtio_pmem_probe,
> +	.remove			= virtio_pmem_remove,
> +};
> +
> +module_virtio_driver(virtio_pmem_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio pmem driver");
> +MODULE_LICENSE("GPL");

Only looked at this from the general virtio driver angle; seems fine
apart from some easy-to-fix issues and some personal style preference
things.

WARNING: multiple messages have this Message-ID (diff)
From: Cornelia Huck <cohuck@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>
Cc: linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-acpi@vger.kernel.org,
	qemu-devel@nongnu.org, linux-ext4@vger.kernel.org,
	linux-xfs@vger.kernel.org, dan.j.williams@intel.com,
	zwisler@kernel.org, vishal.l.verma@intel.com,
	dave.jiang@intel.com, mst@redhat.com, jasowang@redhat.com,
	willy@infradead.org, rjw@rjwysocki.net, hch@infradead.org,
	lenb@kernel.org, jack@suse.cz, tytso@mit.edu,
	adilger.kernel@dilger.ca, darrick.wong@oracle.com,
	lcapitulino@redhat.com, kwolf@redhat.com, imammedo@redhat.com,
	jmoyer@redhat.com, nilal@redhat.com, riel@surriel.com,
	stefanha@redhat.com, aarcange@redhat.com, david@redhat.com,
	david@fromorbit.com, xiaoguangrong.eric@gmail.com,
	pbonzini@redhat.com, kilobyte@angband.pl, yuval.shaia@oracle.com
Subject: Re: [Qemu-devel] [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver
Date: Wed, 10 Apr 2019 14:24:26 +0200	[thread overview]
Message-ID: <20190410142426.5bf0d9a4.cohuck@redhat.com> (raw)
In-Reply-To: <20190410040826.24371-3-pagupta@redhat.com>

On Wed, 10 Apr 2019 09:38:22 +0530
Pankaj Gupta <pagupta@redhat.com> wrote:

> This patch adds virtio-pmem driver for KVM guest.
> 
> Guest reads the persistent memory range information from
> Qemu over VIRTIO and registers it on nvdimm_bus. It also
> creates a nd_region object with the persistent memory
> range information so that existing 'nvdimm/pmem' driver
> can reserve this into system memory map. This way
> 'virtio-pmem' driver uses existing functionality of pmem
> driver to register persistent memory compatible for DAX
> capable filesystems.
> 
> This also provides function to perform guest flush over
> VIRTIO from 'pmem' driver when userspace performs flush
> on DAX memory range.
> 
> Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
> ---
>  drivers/nvdimm/virtio_pmem.c     |  88 ++++++++++++++++++++++
>  drivers/virtio/Kconfig           |  10 +++
>  drivers/virtio/Makefile          |   1 +
>  drivers/virtio/pmem.c            | 124 +++++++++++++++++++++++++++++++
>  include/linux/virtio_pmem.h      |  60 +++++++++++++++
>  include/uapi/linux/virtio_ids.h  |   1 +
>  include/uapi/linux/virtio_pmem.h |  10 +++
>  7 files changed, 294 insertions(+)
>  create mode 100644 drivers/nvdimm/virtio_pmem.c
>  create mode 100644 drivers/virtio/pmem.c
>  create mode 100644 include/linux/virtio_pmem.h
>  create mode 100644 include/uapi/linux/virtio_pmem.h
> 
(...)
> diff --git a/drivers/virtio/pmem.c b/drivers/virtio/pmem.c
> new file mode 100644
> index 000000000000..cc9de9589d56
> --- /dev/null
> +++ b/drivers/virtio/pmem.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * virtio_pmem.c: Virtio pmem Driver
> + *
> + * Discovers persistent memory range information
> + * from host and registers the virtual pmem device
> + * with libnvdimm core.
> + */
> +#include <linux/virtio_pmem.h>
> +#include <../../drivers/nvdimm/nd.h>
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> + /* Initialize virt queue */
> +static int init_vq(struct virtio_pmem *vpmem)

IMHO, you don't gain much by splitting off this function...

> +{
> +	struct virtqueue *vq;
> +
> +	/* single vq */
> +	vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev,
> +				host_ack, "flush_queue");
> +	if (IS_ERR(vq))
> +		return PTR_ERR(vq);

I'm personally not a fan of chained assignments... I think I'd just
drop the 'vq' variable and operate on vpmem->req_vq directly.

> +
> +	spin_lock_init(&vpmem->pmem_lock);
> +	INIT_LIST_HEAD(&vpmem->req_list);
> +
> +	return 0;
> +};
> +
> +static int virtio_pmem_probe(struct virtio_device *vdev)
> +{
> +	int err = 0;
> +	struct resource res;
> +	struct virtio_pmem *vpmem;
> +	struct nvdimm_bus *nvdimm_bus;
> +	struct nd_region_desc ndr_desc = {};
> +	int nid = dev_to_node(&vdev->dev);
> +	struct nd_region *nd_region;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config disabled\n",

Maybe s/config disabled/config access disabled/ ? That seems to be the
more common message.

> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem),
> +					GFP_KERNEL);

Here, the vpmem variable makes sense for convenience, but I'm again not
a fan of the chaining :)

> +	if (!vpmem) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	vpmem->vdev = vdev;
> +	err = init_vq(vpmem);
> +	if (err)
> +		goto out_err;
> +
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			start, &vpmem->start);
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			size, &vpmem->size);
> +
> +	res.start = vpmem->start;
> +	res.end   = vpmem->start + vpmem->size-1;
> +	vpmem->nd_desc.provider_name = "virtio-pmem";
> +	vpmem->nd_desc.module = THIS_MODULE;
> +
> +	vpmem->nvdimm_bus = nvdimm_bus = nvdimm_bus_register(&vdev->dev,
> +						&vpmem->nd_desc);

And here :)

> +	if (!nvdimm_bus)
> +		goto out_vq;
> +
> +	dev_set_drvdata(&vdev->dev, nvdimm_bus);
> +
> +	ndr_desc.res = &res;
> +	ndr_desc.numa_node = nid;
> +	ndr_desc.flush = virtio_pmem_flush;
> +	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
> +	set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
> +	nd_region = nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc);
> +	nd_region->provider_data =  dev_to_virtio
> +					(nd_region->dev.parent->parent);

Isn't it clear that this parent chain will always end up at &vdev->dev?
Maybe simply set ->provider_data to vdev directly? (Does it need to
grab a reference count of the device, BTW?)

> +
> +	if (!nd_region)
> +		goto out_nd;

Probably better to do this check before you access nd_region's
members :)

> +
> +	return 0;
> +out_nd:
> +	err = -ENXIO;
> +	nvdimm_bus_unregister(nvdimm_bus);
> +out_vq:
> +	vdev->config->del_vqs(vdev);
> +out_err:
> +	dev_err(&vdev->dev, "failed to register virtio pmem memory\n");
> +	return err;
> +}
> +
> +static void virtio_pmem_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pmem *vpmem = vdev->priv;
> +	struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
> +
> +	nvdimm_bus_unregister(nvdimm_bus);

I haven't followed this around the nvdimm code, but is the nd_region
you created during probe cleaned up automatically, or would you need to
do something here?

> +	vdev->config->del_vqs(vdev);
> +	vdev->config->reset(vdev);
> +	kfree(vpmem);

You allocated vpmem via devm_kzalloc; isn't it freed automatically on
remove?

> +}
> +
> +static struct virtio_driver virtio_pmem_driver = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.probe			= virtio_pmem_probe,
> +	.remove			= virtio_pmem_remove,
> +};
> +
> +module_virtio_driver(virtio_pmem_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio pmem driver");
> +MODULE_LICENSE("GPL");

Only looked at this from the general virtio driver angle; seems fine
apart from some easy-to-fix issues and some personal style preference
things.

WARNING: multiple messages have this Message-ID (diff)
From: Cornelia Huck <cohuck@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>
Cc: jack@suse.cz, kvm@vger.kernel.org, mst@redhat.com,
	jasowang@redhat.com, david@fromorbit.com, qemu-devel@nongnu.org,
	virtualization@lists.linux-foundation.org,
	adilger.kernel@dilger.ca, zwisler@kernel.org,
	aarcange@redhat.com, dave.jiang@intel.com,
	linux-nvdimm@lists.01.org, vishal.l.verma@intel.com,
	david@redhat.com, willy@infradead.org, hch@infradead.org,
	linux-acpi@vger.kernel.org, jmoyer@redhat.com,
	linux-ext4@vger.kernel.org, lenb@kernel.org, kilobyte@angband.pl,
	riel@surriel.com, yuval.shaia@oracle.com, stefanha@redhat.com,
	pbonzini@redhat.com, dan.j.williams@intel.com,
	lcapitulino@redhat.com, kwolf@redhat.com, nilal@redhat.com,
	tytso@mit.edu, xiaoguangrong.eric@gmail.com,
	darrick.wong@oracle.com, rjw@rjwysocki.net,
	linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, imammedo@redhat.com
Subject: Re: [Qemu-devel] [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver
Date: Wed, 10 Apr 2019 14:24:26 +0200	[thread overview]
Message-ID: <20190410142426.5bf0d9a4.cohuck@redhat.com> (raw)
Message-ID: <20190410122426.-kMX24ezSHgc0ABTyhyNS-zCG6lsOYmHrbDKETV7f4Q@z> (raw)
In-Reply-To: <20190410040826.24371-3-pagupta@redhat.com>

On Wed, 10 Apr 2019 09:38:22 +0530
Pankaj Gupta <pagupta@redhat.com> wrote:

> This patch adds virtio-pmem driver for KVM guest.
> 
> Guest reads the persistent memory range information from
> Qemu over VIRTIO and registers it on nvdimm_bus. It also
> creates a nd_region object with the persistent memory
> range information so that existing 'nvdimm/pmem' driver
> can reserve this into system memory map. This way
> 'virtio-pmem' driver uses existing functionality of pmem
> driver to register persistent memory compatible for DAX
> capable filesystems.
> 
> This also provides function to perform guest flush over
> VIRTIO from 'pmem' driver when userspace performs flush
> on DAX memory range.
> 
> Signed-off-by: Pankaj Gupta <pagupta@redhat.com>
> ---
>  drivers/nvdimm/virtio_pmem.c     |  88 ++++++++++++++++++++++
>  drivers/virtio/Kconfig           |  10 +++
>  drivers/virtio/Makefile          |   1 +
>  drivers/virtio/pmem.c            | 124 +++++++++++++++++++++++++++++++
>  include/linux/virtio_pmem.h      |  60 +++++++++++++++
>  include/uapi/linux/virtio_ids.h  |   1 +
>  include/uapi/linux/virtio_pmem.h |  10 +++
>  7 files changed, 294 insertions(+)
>  create mode 100644 drivers/nvdimm/virtio_pmem.c
>  create mode 100644 drivers/virtio/pmem.c
>  create mode 100644 include/linux/virtio_pmem.h
>  create mode 100644 include/uapi/linux/virtio_pmem.h
> 
(...)
> diff --git a/drivers/virtio/pmem.c b/drivers/virtio/pmem.c
> new file mode 100644
> index 000000000000..cc9de9589d56
> --- /dev/null
> +++ b/drivers/virtio/pmem.c
> @@ -0,0 +1,124 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * virtio_pmem.c: Virtio pmem Driver
> + *
> + * Discovers persistent memory range information
> + * from host and registers the virtual pmem device
> + * with libnvdimm core.
> + */
> +#include <linux/virtio_pmem.h>
> +#include <../../drivers/nvdimm/nd.h>
> +
> +static struct virtio_device_id id_table[] = {
> +	{ VIRTIO_ID_PMEM, VIRTIO_DEV_ANY_ID },
> +	{ 0 },
> +};
> +
> + /* Initialize virt queue */
> +static int init_vq(struct virtio_pmem *vpmem)

IMHO, you don't gain much by splitting off this function...

> +{
> +	struct virtqueue *vq;
> +
> +	/* single vq */
> +	vpmem->req_vq = vq = virtio_find_single_vq(vpmem->vdev,
> +				host_ack, "flush_queue");
> +	if (IS_ERR(vq))
> +		return PTR_ERR(vq);

I'm personally not a fan of chained assignments... I think I'd just
drop the 'vq' variable and operate on vpmem->req_vq directly.

> +
> +	spin_lock_init(&vpmem->pmem_lock);
> +	INIT_LIST_HEAD(&vpmem->req_list);
> +
> +	return 0;
> +};
> +
> +static int virtio_pmem_probe(struct virtio_device *vdev)
> +{
> +	int err = 0;
> +	struct resource res;
> +	struct virtio_pmem *vpmem;
> +	struct nvdimm_bus *nvdimm_bus;
> +	struct nd_region_desc ndr_desc = {};
> +	int nid = dev_to_node(&vdev->dev);
> +	struct nd_region *nd_region;
> +
> +	if (!vdev->config->get) {
> +		dev_err(&vdev->dev, "%s failure: config disabled\n",

Maybe s/config disabled/config access disabled/ ? That seems to be the
more common message.

> +			__func__);
> +		return -EINVAL;
> +	}
> +
> +	vdev->priv = vpmem = devm_kzalloc(&vdev->dev, sizeof(*vpmem),
> +					GFP_KERNEL);

Here, the vpmem variable makes sense for convenience, but I'm again not
a fan of the chaining :)

> +	if (!vpmem) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	vpmem->vdev = vdev;
> +	err = init_vq(vpmem);
> +	if (err)
> +		goto out_err;
> +
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			start, &vpmem->start);
> +	virtio_cread(vpmem->vdev, struct virtio_pmem_config,
> +			size, &vpmem->size);
> +
> +	res.start = vpmem->start;
> +	res.end   = vpmem->start + vpmem->size-1;
> +	vpmem->nd_desc.provider_name = "virtio-pmem";
> +	vpmem->nd_desc.module = THIS_MODULE;
> +
> +	vpmem->nvdimm_bus = nvdimm_bus = nvdimm_bus_register(&vdev->dev,
> +						&vpmem->nd_desc);

And here :)

> +	if (!nvdimm_bus)
> +		goto out_vq;
> +
> +	dev_set_drvdata(&vdev->dev, nvdimm_bus);
> +
> +	ndr_desc.res = &res;
> +	ndr_desc.numa_node = nid;
> +	ndr_desc.flush = virtio_pmem_flush;
> +	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
> +	set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
> +	nd_region = nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc);
> +	nd_region->provider_data =  dev_to_virtio
> +					(nd_region->dev.parent->parent);

Isn't it clear that this parent chain will always end up at &vdev->dev?
Maybe simply set ->provider_data to vdev directly? (Does it need to
grab a reference count of the device, BTW?)

> +
> +	if (!nd_region)
> +		goto out_nd;

Probably better to do this check before you access nd_region's
members :)

> +
> +	return 0;
> +out_nd:
> +	err = -ENXIO;
> +	nvdimm_bus_unregister(nvdimm_bus);
> +out_vq:
> +	vdev->config->del_vqs(vdev);
> +out_err:
> +	dev_err(&vdev->dev, "failed to register virtio pmem memory\n");
> +	return err;
> +}
> +
> +static void virtio_pmem_remove(struct virtio_device *vdev)
> +{
> +	struct virtio_pmem *vpmem = vdev->priv;
> +	struct nvdimm_bus *nvdimm_bus = dev_get_drvdata(&vdev->dev);
> +
> +	nvdimm_bus_unregister(nvdimm_bus);

I haven't followed this around the nvdimm code, but is the nd_region
you created during probe cleaned up automatically, or would you need to
do something here?

> +	vdev->config->del_vqs(vdev);
> +	vdev->config->reset(vdev);
> +	kfree(vpmem);

You allocated vpmem via devm_kzalloc; isn't it freed automatically on
remove?

> +}
> +
> +static struct virtio_driver virtio_pmem_driver = {
> +	.driver.name		= KBUILD_MODNAME,
> +	.driver.owner		= THIS_MODULE,
> +	.id_table		= id_table,
> +	.probe			= virtio_pmem_probe,
> +	.remove			= virtio_pmem_remove,
> +};
> +
> +module_virtio_driver(virtio_pmem_driver);
> +MODULE_DEVICE_TABLE(virtio, id_table);
> +MODULE_DESCRIPTION("Virtio pmem driver");
> +MODULE_LICENSE("GPL");

Only looked at this from the general virtio driver angle; seems fine
apart from some easy-to-fix issues and some personal style preference
things.


  parent reply	other threads:[~2019-04-10 12:24 UTC|newest]

Thread overview: 221+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-10  4:08 [PATCH v5 0/6] virtio pmem driver Pankaj Gupta
2019-04-10  4:08 ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` [PATCH v5 1/6] libnvdimm: nd_region flush callback support Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-11 14:51   ` Dan Williams
2019-04-11 14:51   ` Dan Williams
2019-04-11 14:51     ` [Qemu-devel] " Dan Williams
2019-04-11 14:51     ` Dan Williams
2019-04-11 14:51     ` Dan Williams
2019-04-11 14:51     ` Dan Williams
2019-04-11 15:57     ` [Qemu-devel] " Pankaj Gupta
2019-04-11 15:57       ` Pankaj Gupta
2019-04-11 15:57       ` Pankaj Gupta
2019-04-11 15:57       ` Pankaj Gupta
2019-04-11 16:09       ` Dan Williams
2019-04-11 16:09       ` Dan Williams
2019-04-11 16:09         ` Dan Williams
2019-04-11 16:09         ` Dan Williams
2019-04-11 16:09         ` Dan Williams
2019-04-11 16:23         ` Pankaj Gupta
2019-04-11 16:23         ` Pankaj Gupta
2019-04-11 16:23           ` Pankaj Gupta
2019-04-11 16:23           ` Pankaj Gupta
2019-04-11 16:23           ` Pankaj Gupta
2019-04-11 15:57     ` Pankaj Gupta
2019-04-12  8:32     ` Jan Kara
2019-04-12  8:32       ` [Qemu-devel] " Jan Kara
2019-04-12  8:32       ` Jan Kara
2019-04-12  8:32       ` Jan Kara
2019-04-12  8:32       ` Jan Kara
2019-04-12 13:12       ` Jeff Moyer
2019-04-12 13:12       ` Jeff Moyer
2019-04-12 13:12         ` [Qemu-devel] " Jeff Moyer
2019-04-12 13:12         ` Jeff Moyer
2019-04-12 13:12         ` Jeff Moyer
2019-04-12 13:12         ` Jeff Moyer
2019-04-18  6:27         ` [Qemu-devel] " Pankaj Gupta
2019-04-18  6:27           ` Pankaj Gupta
2019-04-18  6:27           ` Pankaj Gupta
2019-04-18  6:27           ` Pankaj Gupta
2019-04-18 16:05         ` Dan Williams
2019-04-18 16:05           ` [Qemu-devel] " Dan Williams
2019-04-18 16:05           ` Dan Williams
2019-04-18 16:05           ` Dan Williams
2019-04-18 16:05           ` Dan Williams
2019-04-18 16:10           ` Jeff Moyer
2019-04-18 16:10             ` [Qemu-devel] " Jeff Moyer
2019-04-18 16:10             ` Jeff Moyer
2019-04-18 16:10             ` Jeff Moyer
2019-04-18 16:10             ` Jeff Moyer
2019-04-18 16:10           ` Jeff Moyer
2019-04-18 16:18           ` Christoph Hellwig
2019-04-18 16:18           ` Christoph Hellwig
2019-04-18 16:18             ` [Qemu-devel] " Christoph Hellwig
2019-04-18 16:18             ` Christoph Hellwig
2019-04-18 16:18             ` Christoph Hellwig
2019-04-18 16:18             ` Christoph Hellwig
2019-04-18 18:14             ` Dan Williams
2019-04-18 18:14             ` Dan Williams
2019-04-18 18:14               ` [Qemu-devel] " Dan Williams
2019-04-18 18:14               ` Dan Williams
2019-04-18 18:14               ` Dan Williams
2019-04-18 18:14               ` Dan Williams
2019-04-22 15:51               ` Jeff Moyer
2019-04-22 15:51               ` Jeff Moyer
2019-04-22 15:51                 ` [Qemu-devel] " Jeff Moyer
2019-04-22 15:51                 ` Jeff Moyer
2019-04-22 15:51                 ` Jeff Moyer
2019-04-22 15:51                 ` Jeff Moyer
2019-04-22 19:44                 ` Dan Williams
2019-04-22 19:44                   ` [Qemu-devel] " Dan Williams
2019-04-22 19:44                   ` Dan Williams
2019-04-22 19:44                   ` Dan Williams
2019-04-22 19:44                   ` Dan Williams
2019-04-22 21:03                   ` Jeff Moyer
2019-04-22 21:03                   ` Jeff Moyer
2019-04-22 21:03                     ` [Qemu-devel] " Jeff Moyer
2019-04-22 21:03                     ` Jeff Moyer
2019-04-22 21:03                     ` Jeff Moyer
2019-04-22 21:03                     ` Jeff Moyer
2019-04-23  4:07                     ` Pankaj Gupta
2019-04-23  4:07                       ` [Qemu-devel] " Pankaj Gupta
2019-04-23  4:07                       ` Pankaj Gupta
2019-04-23  4:07                       ` Pankaj Gupta
2019-04-23  4:07                       ` Pankaj Gupta
2019-04-23  4:07                     ` Pankaj Gupta
2019-04-22 19:44                 ` Dan Williams
2019-04-18 16:05         ` Dan Williams
2019-04-12  8:32     ` Jan Kara
2019-04-10  4:08 ` [PATCH v5 2/5] virtio-pmem: Add virtio pmem driver Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10 12:24   ` Cornelia Huck
2019-04-10 12:24   ` Cornelia Huck [this message]
2019-04-10 12:24     ` [Qemu-devel] " Cornelia Huck
2019-04-10 12:24     ` Cornelia Huck
2019-04-10 12:24     ` Cornelia Huck
2019-04-10 12:24     ` Cornelia Huck
2019-04-10 14:38     ` Yuval Shaia
2019-04-10 14:38     ` Yuval Shaia
2019-04-10 14:38       ` [Qemu-devel] " Yuval Shaia
2019-04-10 14:38       ` Yuval Shaia
2019-04-10 14:38       ` Yuval Shaia
2019-04-10 14:38       ` Yuval Shaia
2019-04-10 14:38       ` Yuval Shaia
2019-04-10 15:44       ` Pankaj Gupta
2019-04-10 15:44         ` [Qemu-devel] " Pankaj Gupta
2019-04-10 15:44         ` Pankaj Gupta
2019-04-10 15:44         ` Pankaj Gupta
2019-04-10 15:44         ` Pankaj Gupta
2019-04-10 15:44       ` Pankaj Gupta
2019-04-10 15:38     ` Pankaj Gupta
2019-04-10 15:38     ` Pankaj Gupta
2019-04-10 15:38       ` [Qemu-devel] " Pankaj Gupta
2019-04-10 15:38       ` Pankaj Gupta
2019-04-10 15:38       ` Pankaj Gupta
2019-04-10 15:38       ` Pankaj Gupta
2019-04-10 13:12   ` Michael S. Tsirkin
2019-04-10 13:12   ` Michael S. Tsirkin
2019-04-10 13:12     ` [Qemu-devel] " Michael S. Tsirkin
2019-04-10 13:12     ` Michael S. Tsirkin
2019-04-10 13:12     ` Michael S. Tsirkin
2019-04-10 13:12     ` Michael S. Tsirkin
2019-04-10 14:03     ` [Qemu-devel] " Pankaj Gupta
2019-04-10 14:03       ` Pankaj Gupta
2019-04-10 14:03       ` Pankaj Gupta
2019-04-10 14:03       ` Pankaj Gupta
2019-04-10 14:31       ` Cornelia Huck
2019-04-10 14:31       ` Cornelia Huck
2019-04-10 14:31         ` Cornelia Huck
2019-04-10 14:31         ` Cornelia Huck
2019-04-10 14:31         ` Cornelia Huck
2019-04-10 16:46         ` Michael S. Tsirkin
2019-04-10 16:46           ` Michael S. Tsirkin
2019-04-10 16:46           ` Michael S. Tsirkin
2019-04-10 16:46           ` Michael S. Tsirkin
2019-04-10 16:52           ` Cornelia Huck
2019-04-10 16:52           ` Cornelia Huck
2019-04-10 16:52             ` Cornelia Huck
2019-04-10 16:52             ` Cornelia Huck
2019-04-10 16:52             ` Cornelia Huck
2019-04-10 16:46         ` Michael S. Tsirkin
2019-04-10 14:03     ` Pankaj Gupta
2019-04-10 14:41   ` Yuval Shaia
2019-04-10 14:41   ` Yuval Shaia
2019-04-10 14:41     ` [Qemu-devel] " Yuval Shaia
2019-04-10 14:41     ` Yuval Shaia
2019-04-10 14:41     ` Yuval Shaia
2019-04-10 14:41     ` Yuval Shaia
2019-04-10  4:08 ` [PATCH v5 3/6] libnvdimm: add dax_dev sync flag Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  8:28   ` Jan Kara
2019-04-10  8:28     ` [Qemu-devel] " Jan Kara
2019-04-10  8:28     ` Jan Kara
2019-04-10  8:28     ` Jan Kara
2019-04-10  8:28     ` Jan Kara
2019-04-10  8:38     ` Pankaj Gupta
2019-04-10  8:38       ` [Qemu-devel] " Pankaj Gupta
2019-04-10  8:38       ` Pankaj Gupta
2019-04-10  8:38       ` Pankaj Gupta
2019-04-10  8:38       ` Pankaj Gupta
2019-04-10  8:38     ` Pankaj Gupta
2019-04-10  8:28   ` Jan Kara
2019-04-11 14:56   ` Dan Williams
2019-04-11 14:56     ` [Qemu-devel] " Dan Williams
2019-04-11 14:56     ` Dan Williams
2019-04-11 14:56     ` Dan Williams
2019-04-11 14:56     ` Dan Williams
2019-04-11 15:39     ` Pankaj Gupta
2019-04-11 15:39       ` [Qemu-devel] " Pankaj Gupta
2019-04-11 15:39       ` Pankaj Gupta
2019-04-11 15:39       ` Pankaj Gupta
2019-04-11 15:39       ` Pankaj Gupta
2019-04-11 15:39     ` Pankaj Gupta
2019-04-11 14:56   ` Dan Williams
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` [PATCH v5 4/6] dax: check synchronous mapping is supported Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  8:25   ` Jan Kara
2019-04-10  8:25   ` Jan Kara
2019-04-10  8:25     ` [Qemu-devel] " Jan Kara
2019-04-10  8:25     ` Jan Kara
2019-04-10  8:25     ` Jan Kara
2019-04-10  8:25     ` Jan Kara
2019-04-10  8:31     ` Pankaj Gupta
2019-04-10  8:31       ` [Qemu-devel] " Pankaj Gupta
2019-04-10  8:31       ` Pankaj Gupta
2019-04-10  8:31       ` Pankaj Gupta
2019-04-10  8:31       ` Pankaj Gupta
2019-04-10  8:31     ` Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` [PATCH v5 5/6] ext4: disable map_sync for async flush Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
2019-04-10  4:08 ` [PATCH v5 6/6] xfs: " Pankaj Gupta
2019-04-10  4:08   ` [Qemu-devel] " Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08   ` Pankaj Gupta
2019-04-10  4:08 ` Pankaj Gupta
     [not found] ` <20190410040826.24371-1-pagupta-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2019-04-10  8:08   ` [PATCH v5 0/6] virtio pmem driver Arkadiusz Miśkiewicz
2019-04-10  8:08     ` [Qemu-devel] " Arkadiusz Miśkiewicz
2019-04-10  8:08     ` Arkadiusz Miśkiewicz
2019-04-10  8:08 ` Arkadiusz Miśkiewicz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190410142426.5bf0d9a4.cohuck@redhat.com \
    --to=cohuck@redhat.com \
    --cc=aarcange@redhat.com \
    --cc=adilger.kernel@dilger.ca \
    --cc=darrick.wong@oracle.com \
    --cc=david@fromorbit.com \
    --cc=david@redhat.com \
    --cc=hch@infradead.org \
    --cc=imammedo@redhat.com \
    --cc=jack@suse.cz \
    --cc=jasowang@redhat.com \
    --cc=kilobyte@angband.pl \
    --cc=kvm@vger.kernel.org \
    --cc=kwolf@redhat.com \
    --cc=lcapitulino@redhat.com \
    --cc=lenb@kernel.org \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=nilal@redhat.com \
    --cc=pagupta@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=riel@surriel.com \
    --cc=rjw@rjwysocki.net \
    --cc=stefanha@redhat.com \
    --cc=tytso@mit.edu \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=willy@infradead.org \
    --cc=xiaoguangrong.eric@gmail.com \
    --cc=yuval.shaia@oracle.com \
    --cc=zwisler@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.