From: David Hildenbrand <david@redhat.com>
To: Pankaj Gupta <pagupta@redhat.com>,
linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
qemu-devel@nongnu.org, linux-nvdimm@ml01.01.org
Cc: jack@suse.cz, stefanha@redhat.com, dan.j.williams@intel.com,
riel@surriel.com, nilal@redhat.com, kwolf@redhat.com,
pbonzini@redhat.com, ross.zwisler@intel.com,
xiaoguangrong.eric@gmail.com, hch@infradead.org, mst@redhat.com,
niteshnarayanlal@hotmail.com, lcapitulino@redhat.com,
imammedo@redhat.com, eblake@redhat.com
Subject: Re: [PATCH] qemu: Add virtio pmem device
Date: Thu, 20 Sep 2018 13:21:18 +0200 [thread overview]
Message-ID: <2721c3ee-88d1-a8e9-1f1e-ffc3eef1d1ca@redhat.com> (raw)
In-Reply-To: <20180831133019.27579-5-pagupta@redhat.com>
> @@ -0,0 +1,241 @@
> +/*
> + * Virtio pmem device
> + *
> + * Copyright (C) 2018 Red Hat, Inc.
> + * Copyright (C) 2018 Pankaj Gupta <pagupta@redhat.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2.
> + * See the COPYING file in the top-level directory.
> + *
> + */
> +
> +#include "qemu/osdep.h"
> +#include "qapi/error.h"
> +#include "qemu-common.h"
> +#include "qemu/error-report.h"
> +#include "hw/virtio/virtio-access.h"
> +#include "hw/virtio/virtio-pmem.h"
> +#include "hw/mem/memory-device.h"
> +#include "block/aio.h"
> +#include "block/thread-pool.h"
> +
> +typedef struct VirtIOPMEMresp {
> + int ret;
> +} VirtIOPMEMResp;
> +
> +typedef struct VirtIODeviceRequest {
> + VirtQueueElement elem;
> + int fd;
> + VirtIOPMEM *pmem;
> + VirtIOPMEMResp resp;
> +} VirtIODeviceRequest;
Both, response and request have to go to a linux header (and a header
sync patch).
Also, you are using the same request for host<->guest handling and
internal purposes. The fd or pmem pointer definitely don't belong here.
Use a separate struct for internal handling purposes. (passing to worker_cb)
> +
> +static int worker_cb(void *opaque)
> +{
> + VirtIODeviceRequest *req = opaque;
> + int err = 0;
> +
> + /* flush raw backing image */
> + err = fsync(req->fd);
> + if (err != 0) {
> + err = EIO;
> + }
> + req->resp.ret = err;
> +
> + return 0;
> +}
> +
> +static void done_cb(void *opaque, int ret)
> +{
> + VirtIODeviceRequest *req = opaque;
> + int len = iov_from_buf(req->elem.in_sg, req->elem.in_num, 0,
> + &req->resp, sizeof(VirtIOPMEMResp));
> +
> + /* Callbacks are serialized, so no need to use atomic ops. */
> + virtqueue_push(req->pmem->rq_vq, &req->elem, len);
> + virtio_notify((VirtIODevice *)req->pmem, req->pmem->rq_vq);
> + g_free(req);
> +}
> +
> +static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
> +{
> + VirtIODeviceRequest *req;
> + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
> + HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev);
> + ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
> +
> + req = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
> + if (!req) {
> + virtio_error(vdev, "virtio-pmem missing request data");
> + return;
> + }
> +
> + if (req->elem.out_num < 1 || req->elem.in_num < 1) {
> + virtio_error(vdev, "virtio-pmem request not proper");
> + g_free(req);
> + return;
> + }
> + req->fd = memory_region_get_fd(&backend->mr);
> + req->pmem = pmem;
> + thread_pool_submit_aio(pool, worker_cb, req, done_cb, req);
> +}
> +
> +static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config)
> +{
> + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
> + struct virtio_pmem_config *pmemcfg = (struct virtio_pmem_config *) config;
> +
> + virtio_stq_p(vdev, &pmemcfg->start, pmem->start);
> + virtio_stq_p(vdev, &pmemcfg->size, pmem->size);
> +}
> +
> +static uint64_t virtio_pmem_get_features(VirtIODevice *vdev, uint64_t features,
> + Error **errp)
> +{
> + return features;
> +}
> +
> +static void virtio_pmem_realize(DeviceState *dev, Error **errp)
> +{
> + VirtIODevice *vdev = VIRTIO_DEVICE(dev);
> + VirtIOPMEM *pmem = VIRTIO_PMEM(dev);
> + MachineState *ms = MACHINE(qdev_get_machine());
> + uint64_t align;
> + Error *local_err = NULL;
> + MemoryRegion *mr;
> +
> + if (!pmem->memdev) {
> + error_setg(errp, "virtio-pmem memdev not set");
> + return;
> + }
> +
> + mr = host_memory_backend_get_memory(pmem->memdev);
> + align = memory_region_get_alignment(mr);
> + pmem->size = QEMU_ALIGN_DOWN(memory_region_size(mr), align);
> + pmem->start = memory_device_get_free_addr(ms, NULL, align, pmem->size,
> + &local_err);
> + if (local_err) {
> + error_setg(errp, "Can't get free address in mem device");
> + return;
> + }
> + memory_region_init_alias(&pmem->mr, OBJECT(pmem),
> + "virtio_pmem-memory", mr, 0, pmem->size);
> + memory_device_plug_region(ms, &pmem->mr, pmem->start);
> +
> + host_memory_backend_set_mapped(pmem->memdev, true);
> + virtio_init(vdev, TYPE_VIRTIO_PMEM, VIRTIO_ID_PMEM,
> + sizeof(struct virtio_pmem_config));
> + pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush);
> +}
> +
> +static void virtio_mem_check_memdev(Object *obj, const char *name, Object *val,
> + Error **errp)
> +{
> + if (host_memory_backend_is_mapped(MEMORY_BACKEND(val))) {
> + char *path = object_get_canonical_path_component(val);
> + error_setg(errp, "Can't use already busy memdev: %s", path);
> + g_free(path);
> + return;
> + }
> +
> + qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
> +}
> +
> +static const char *virtio_pmem_get_device_id(VirtIOPMEM *vm)
> +{
> + Object *obj = OBJECT(vm);
> + DeviceState *parent_dev;
> +
> + /* always use the ID of the proxy device */
> + if (obj->parent && object_dynamic_cast(obj->parent, TYPE_DEVICE)) {
> + parent_dev = DEVICE(obj->parent);
> + return parent_dev->id;
> + }
> + return NULL;
> +}
> +
> +static void virtio_pmem_md_fill_device_info(const MemoryDeviceState *md,
> + MemoryDeviceInfo *info)
> +{
> + VirtioPMemDeviceInfo *vi = g_new0(VirtioPMemDeviceInfo, 1);
> + VirtIOPMEM *vm = VIRTIO_PMEM(md);
> + const char *id = virtio_pmem_get_device_id(vm);
> +
> + if (id) {
> + vi->has_id = true;
> + vi->id = g_strdup(id);
> + }
> +
> + vi->start = vm->start;
> + vi->size = vm->size;
> + vi->memdev = object_get_canonical_path(OBJECT(vm->memdev));
> +
> + info->u.virtio_pmem.data = vi;
> + info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM;
> +}
> +
> +static uint64_t virtio_pmem_md_get_addr(const MemoryDeviceState *md)
> +{
> + VirtIOPMEM *vm = VIRTIO_PMEM(md);
> +
> + return vm->start;
> +}
> +
> +static uint64_t virtio_pmem_md_get_plugged_size(const MemoryDeviceState *md)
> +{
> + VirtIOPMEM *vm = VIRTIO_PMEM(md);
> +
> + return vm->size;
> +}
> +
> +static uint64_t virtio_pmem_md_get_region_size(const MemoryDeviceState *md)
> +{
> + VirtIOPMEM *vm = VIRTIO_PMEM(md);
> +
> + return vm->size;
> +}
> +
> +static void virtio_pmem_instance_init(Object *obj)
> +{
> + VirtIOPMEM *vm = VIRTIO_PMEM(obj);
> + object_property_add_link(obj, "memdev", TYPE_MEMORY_BACKEND,
> + (Object **)&vm->memdev,
> + (void *) virtio_mem_check_memdev,
> + OBJ_PROP_LINK_STRONG,
> + &error_abort);
> +}
> +
> +
> +static void virtio_pmem_class_init(ObjectClass *klass, void *data)
> +{
> + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
> + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass);
> +
> + vdc->realize = virtio_pmem_realize;
> + vdc->get_config = virtio_pmem_get_config;
> + vdc->get_features = virtio_pmem_get_features;
> +
> + mdc->get_addr = virtio_pmem_md_get_addr;
> + mdc->get_plugged_size = virtio_pmem_md_get_plugged_size;
> + mdc->get_region_size = virtio_pmem_md_get_region_size;
> + mdc->fill_device_info = virtio_pmem_md_fill_device_info;
> +}
> +
> +static TypeInfo virtio_pmem_info = {
> + .name = TYPE_VIRTIO_PMEM,
> + .parent = TYPE_VIRTIO_DEVICE,
> + .class_init = virtio_pmem_class_init,
> + .instance_size = sizeof(VirtIOPMEM),
> + .instance_init = virtio_pmem_instance_init,
> + .interfaces = (InterfaceInfo[]) {
> + { TYPE_MEMORY_DEVICE },
> + { }
> + },
> +};
> +
> +static void virtio_register_types(void)
> +{
> + type_register_static(&virtio_pmem_info);
> +}
> +
> +type_init(virtio_register_types)
> diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
> index 990d6fcbde..28829b6437 100644
> --- a/include/hw/pci/pci.h
> +++ b/include/hw/pci/pci.h
> @@ -85,6 +85,7 @@ extern bool pci_available;
> #define PCI_DEVICE_ID_VIRTIO_RNG 0x1005
> #define PCI_DEVICE_ID_VIRTIO_9P 0x1009
> #define PCI_DEVICE_ID_VIRTIO_VSOCK 0x1012
> +#define PCI_DEVICE_ID_VIRTIO_PMEM 0x1013
>
> #define PCI_VENDOR_ID_REDHAT 0x1b36
> #define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001
> diff --git a/include/hw/virtio/virtio-pmem.h b/include/hw/virtio/virtio-pmem.h
> new file mode 100644
> index 0000000000..fda3ee691c
> --- /dev/null
> +++ b/include/hw/virtio/virtio-pmem.h
> @@ -0,0 +1,42 @@
> +/*
> + * Virtio pmem Device
> + *
> + * Copyright Red Hat, Inc. 2018
> + * Copyright Pankaj Gupta <pagupta@redhat.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or
> + * (at your option) any later version. See the COPYING file in the
> + * top-level directory.
> + */
> +
> +#ifndef QEMU_VIRTIO_PMEM_H
> +#define QEMU_VIRTIO_PMEM_H
> +
> +#include "hw/virtio/virtio.h"
> +#include "exec/memory.h"
> +#include "sysemu/hostmem.h"
> +#include "standard-headers/linux/virtio_ids.h"
> +#include "hw/boards.h"
> +#include "hw/i386/pc.h"
> +
> +#define TYPE_VIRTIO_PMEM "virtio-pmem"
> +
> +#define VIRTIO_PMEM(obj) \
> + OBJECT_CHECK(VirtIOPMEM, (obj), TYPE_VIRTIO_PMEM)
> +
> +/* VirtIOPMEM device structure */
> +typedef struct VirtIOPMEM {
> + VirtIODevice parent_obj;
> +
> + VirtQueue *rq_vq;
> + uint64_t start;
> + uint64_t size;
> + MemoryRegion mr;
> + HostMemoryBackend *memdev;
> +} VirtIOPMEM;
> +
> +struct virtio_pmem_config {
> + uint64_t start;
> + uint64_t size;
> +};
> +#endif
> diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h
> index 6d5c3b2d4f..346389565a 100644
> --- a/include/standard-headers/linux/virtio_ids.h
> +++ b/include/standard-headers/linux/virtio_ids.h
> @@ -43,5 +43,6 @@
> #define VIRTIO_ID_INPUT 18 /* virtio input */
> #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
> #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
> +#define VIRTIO_ID_PMEM 25 /* virtio pmem */
This should be moved to a linux header sync patch.
--
Thanks,
David / dhildenb
next prev parent reply other threads:[~2018-09-20 11:21 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-08-31 13:30 [PATCH 0/3] kvm "fake DAX" device Pankaj Gupta
[not found] ` <20180831133019.27579-1-pagupta-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-08-31 13:30 ` [PATCH 1/3] nd: move nd_region to common header Pankaj Gupta
[not found] ` <20180831133019.27579-2-pagupta-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-22 0:47 ` Dan Williams
[not found] ` <CAPcyv4jFimkVnVuzza5TCG=KvY88KZnXzH4GNEgUBbTouprzJA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-09-24 11:40 ` Pankaj Gupta
2018-08-31 13:30 ` [PATCH 2/3] libnvdimm: nd_region flush callback support Pankaj Gupta
2018-09-04 15:29 ` kbuild test robot
[not found] ` <20180904152917.GE17047-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2018-09-05 8:40 ` Pankaj Gupta
2018-09-22 0:43 ` Dan Williams
2018-09-24 11:07 ` Pankaj Gupta
2018-08-31 13:30 ` [PATCH 3/3] virtio-pmem: Add virtio pmem driver Pankaj Gupta
[not found] ` <20180831133019.27579-4-pagupta-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-04 15:17 ` kbuild test robot
2018-09-05 8:34 ` Pankaj Gupta
2018-09-05 12:02 ` kbuild test robot
2018-09-12 16:54 ` Luiz Capitulino
2018-09-13 6:58 ` [Qemu-devel] " Pankaj Gupta
[not found] ` <831225077.12817716.1536821901550.JavaMail.zimbra-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-13 12:19 ` Luiz Capitulino
2018-09-14 12:13 ` Pankaj Gupta
2018-09-22 1:08 ` Dan Williams
2018-09-24 9:41 ` Pankaj Gupta
2018-09-27 13:06 ` Pankaj Gupta
2018-09-27 15:55 ` Dan Williams
2018-08-31 13:30 ` [PATCH] qemu: Add virtio pmem device Pankaj Gupta
[not found] ` <20180831133019.27579-5-pagupta-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-12 16:57 ` Luiz Capitulino
2018-09-13 7:06 ` Pankaj Gupta
[not found] ` <563893075.12819183.1536822387535.JavaMail.zimbra-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-13 12:22 ` Luiz Capitulino
2018-09-20 11:21 ` David Hildenbrand [this message]
[not found] ` <2721c3ee-88d1-a8e9-1f1e-ffc3eef1d1ca-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-09-20 12:03 ` [Qemu-devel] " Pankaj Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2721c3ee-88d1-a8e9-1f1e-ffc3eef1d1ca@redhat.com \
--to=david@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=eblake@redhat.com \
--cc=hch@infradead.org \
--cc=imammedo@redhat.com \
--cc=jack@suse.cz \
--cc=kvm@vger.kernel.org \
--cc=kwolf@redhat.com \
--cc=lcapitulino@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvdimm@ml01.01.org \
--cc=mst@redhat.com \
--cc=nilal@redhat.com \
--cc=niteshnarayanlal@hotmail.com \
--cc=pagupta@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=riel@surriel.com \
--cc=ross.zwisler@intel.com \
--cc=stefanha@redhat.com \
--cc=xiaoguangrong.eric@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).