From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> To: iommu@lists.linux-foundation.org, kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, virtio-dev@lists.oasis-open.org, kvmarm@lists.cs.columbia.edu Cc: joro@8bytes.org, alex.williamson@redhat.com, mst@redhat.com, jasowang@redhat.com, marc.zyngier@arm.com, robin.murphy@arm.com, will.deacon@arm.com, lorenzo.pieralisi@arm.com, eric.auger@redhat.com, eric.auger.pro@gmail.com, peterx@redhat.com, bharat.bhushan@nxp.com, tnowicki@caviumnetworks.com, jayachandran.nair@cavium.com, kevin.tian@intel.com, jintack@cs.columbia.edu Subject: [PATCH 3/4] iommu/virtio: Add event queue Date: Wed, 14 Feb 2018 14:53:39 +0000 [thread overview] Message-ID: <20180214145340.1223-4-jean-philippe.brucker@arm.com> (raw) In-Reply-To: <20180214145340.1223-1-jean-philippe.brucker@arm.com> The event queue offers a way for the device to report access faults from endpoints. It is implemented on virtqueue #1. Whenever the host needs to signal a fault, it fills one of the buffers offered by the guest and interrupts it. Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> --- drivers/iommu/virtio-iommu.c | 139 ++++++++++++++++++++++++++++++++++---- include/uapi/linux/virtio_iommu.h | 18 +++++ 2 files changed, 143 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3ac4b38eaf19..6b96f1b36d5a 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -30,6 +30,12 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +enum viommu_vq_idx { + VIOMMU_REQUEST_VQ = 0, + VIOMMU_EVENT_VQ = 1, + VIOMMU_NUM_VQS = 2, +}; + struct viommu_dev { struct iommu_device iommu; struct device *dev; @@ -37,9 +43,10 @@ struct viommu_dev { struct ida domain_ids; - struct virtqueue *vq; + struct virtqueue *vqs[VIOMMU_NUM_VQS]; /* Serialize anything touching the request queue */ spinlock_t request_lock; + void *evts; /* Device configuration */ struct iommu_domain_geometry geometry; @@ -84,6 +91,15 @@ struct viommu_request { struct list_head list; }; +#define VIOMMU_FAULT_RESV_MASK 0xffffff00 + +struct viommu_event { + union { + u32 head; + struct virtio_iommu_fault fault; + }; +}; + #define to_viommu_domain(domain) \ container_of(domain, struct viommu_domain, domain) @@ -161,12 +177,13 @@ static int viommu_receive_resp(struct viommu_dev *viommu, int nr_sent, unsigned int len; int nr_received = 0; struct viommu_request *req, *pending; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; pending = list_first_entry_or_null(sent, struct viommu_request, list); if (WARN_ON(!pending)) return 0; - while ((req = virtqueue_get_buf(viommu->vq, &len)) != NULL) { + while ((req = virtqueue_get_buf(vq, &len)) != NULL) { if (req != pending) { dev_warn(viommu->dev, "discarding stale request\n"); continue; @@ -201,6 +218,7 @@ static int _viommu_send_reqs_sync(struct viommu_dev *viommu, * up the CPU in case of a device bug. */ unsigned long timeout_ms = 1000; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; *nr_sent = 0; @@ -210,15 +228,14 @@ static int _viommu_send_reqs_sync(struct viommu_dev *viommu, sg[0] = &req->top; sg[1] = &req->bottom; - ret = virtqueue_add_sgs(viommu->vq, sg, 1, 1, req, - GFP_ATOMIC); + ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); if (ret) break; list_add_tail(&req->list, &pending); } - if (i && !virtqueue_kick(viommu->vq)) + if (i && !virtqueue_kick(vq)) return -EPIPE; timeout = ktime_add_ms(ktime_get(), timeout_ms * i); @@ -553,6 +570,70 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) return ret; } +static int viommu_fault_handler(struct viommu_dev *viommu, + struct virtio_iommu_fault *fault) +{ + char *reason_str; + + u8 reason = fault->reason; + u32 flags = le32_to_cpu(fault->flags); + u32 endpoint = le32_to_cpu(fault->endpoint); + u64 address = le64_to_cpu(fault->address); + + switch (reason) { + case VIRTIO_IOMMU_FAULT_R_DOMAIN: + reason_str = "domain"; + break; + case VIRTIO_IOMMU_FAULT_R_MAPPING: + reason_str = "page"; + break; + case VIRTIO_IOMMU_FAULT_R_UNKNOWN: + default: + reason_str = "unknown"; + break; + } + + /* TODO: find EP by ID and report_iommu_fault */ + if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) + dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", + reason_str, endpoint, address, + flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", + flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", + flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); + else + dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", + reason_str, endpoint); + + return 0; +} + +static void viommu_event_handler(struct virtqueue *vq) +{ + int ret; + unsigned int len; + struct scatterlist sg[1]; + struct viommu_event *evt; + struct viommu_dev *viommu = vq->vdev->priv; + + while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { + if (len > sizeof(*evt)) { + dev_err(viommu->dev, + "invalid event buffer (len %u != %zu)\n", + len, sizeof(*evt)); + } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { + viommu_fault_handler(viommu, &evt->fault); + } + + sg_init_one(sg, evt, sizeof(*evt)); + ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); + if (ret) + dev_err(viommu->dev, "could not add event buffer\n"); + } + + if (!virtqueue_kick(vq)) + dev_err(viommu->dev, "kick failed\n"); +} + /* IOMMU API */ static bool viommu_capable(enum iommu_cap cap) @@ -934,19 +1015,44 @@ static struct iommu_ops viommu_ops = { .put_resv_regions = viommu_put_resv_regions, }; -static int viommu_init_vq(struct viommu_dev *viommu) +static int viommu_init_vqs(struct viommu_dev *viommu) { struct virtio_device *vdev = dev_to_virtio(viommu->dev); - const char *name = "request"; - void *ret; + const char *names[] = { "request", "event" }; + vq_callback_t *callbacks[] = { + NULL, /* No async requests */ + viommu_event_handler, + }; + + return virtio_find_vqs(vdev, VIOMMU_NUM_VQS, viommu->vqs, callbacks, + names, NULL); +} - ret = virtio_find_single_vq(vdev, NULL, name); - if (IS_ERR(ret)) { - dev_err(viommu->dev, "cannot find VQ\n"); - return PTR_ERR(ret); +static int viommu_fill_evtq(struct viommu_dev *viommu) +{ + int i, ret; + struct scatterlist sg[1]; + struct viommu_event *evts; + struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; + size_t nr_evts = min_t(size_t, PAGE_SIZE / sizeof(struct viommu_event), + viommu->vqs[VIOMMU_EVENT_VQ]->num_free); + + viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, + sizeof(*evts), GFP_KERNEL); + if (!evts) + return -ENOMEM; + + for (i = 0; i < nr_evts; i++) { + sg_init_one(sg, &evts[i], sizeof(*evts)); + ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); + if (ret) + return ret; } - viommu->vq = ret; + if (!virtqueue_kick(vq)) + return -EPIPE; + + dev_info(viommu->dev, "%zu event buffers\n", nr_evts); return 0; } @@ -969,7 +1075,7 @@ static int viommu_probe(struct virtio_device *vdev) viommu->dev = dev; viommu->vdev = vdev; - ret = viommu_init_vq(viommu); + ret = viommu_init_vqs(viommu); if (ret) return ret; @@ -1010,6 +1116,11 @@ static int viommu_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + /* Populate the event queue with buffers */ + ret = viommu_fill_evtq(viommu); + if (ret) + goto err_free_vqs; + ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", virtio_bus_name(vdev)); if (ret) diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index 2335d9ed4676..d6c0224efe61 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -150,4 +150,22 @@ union virtio_iommu_req { struct virtio_iommu_req_probe probe; }; +/* Fault types */ +#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0 +#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1 +#define VIRTIO_IOMMU_FAULT_R_MAPPING 2 + +#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0) +#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1) +#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2) +#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8) + +struct virtio_iommu_fault { + __u8 reason; + __u8 padding[3]; + __le32 flags; + __le32 endpoint; + __le64 address; +} __packed; + #endif -- 2.16.1
WARNING: multiple messages have this Message-ID (diff)
From: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> To: iommu@lists.linux-foundation.org, kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, virtio-dev@lists.oasis-open.org, kvmarm@lists.cs.columbia.edu Cc: joro@8bytes.org, alex.williamson@redhat.com, mst@redhat.com, jasowang@redhat.com, marc.zyngier@arm.com, robin.murphy@arm.com, will.deacon@arm.com, lorenzo.pieralisi@arm.com, eric.auger@redhat.com, eric.auger.pro@gmail.com, peterx@redhat.com, bharat.bhushan@nxp.com, tnowicki@caviumnetworks.com, jayachandran.nair@cavium.com, kevin.tian@intel.com, jintack@cs.columbia.edu Subject: [virtio-dev] [PATCH 3/4] iommu/virtio: Add event queue Date: Wed, 14 Feb 2018 14:53:39 +0000 [thread overview] Message-ID: <20180214145340.1223-4-jean-philippe.brucker@arm.com> (raw) In-Reply-To: <20180214145340.1223-1-jean-philippe.brucker@arm.com> The event queue offers a way for the device to report access faults from endpoints. It is implemented on virtqueue #1. Whenever the host needs to signal a fault, it fills one of the buffers offered by the guest and interrupts it. Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> --- drivers/iommu/virtio-iommu.c | 139 ++++++++++++++++++++++++++++++++++---- include/uapi/linux/virtio_iommu.h | 18 +++++ 2 files changed, 143 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 3ac4b38eaf19..6b96f1b36d5a 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -30,6 +30,12 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +enum viommu_vq_idx { + VIOMMU_REQUEST_VQ = 0, + VIOMMU_EVENT_VQ = 1, + VIOMMU_NUM_VQS = 2, +}; + struct viommu_dev { struct iommu_device iommu; struct device *dev; @@ -37,9 +43,10 @@ struct viommu_dev { struct ida domain_ids; - struct virtqueue *vq; + struct virtqueue *vqs[VIOMMU_NUM_VQS]; /* Serialize anything touching the request queue */ spinlock_t request_lock; + void *evts; /* Device configuration */ struct iommu_domain_geometry geometry; @@ -84,6 +91,15 @@ struct viommu_request { struct list_head list; }; +#define VIOMMU_FAULT_RESV_MASK 0xffffff00 + +struct viommu_event { + union { + u32 head; + struct virtio_iommu_fault fault; + }; +}; + #define to_viommu_domain(domain) \ container_of(domain, struct viommu_domain, domain) @@ -161,12 +177,13 @@ static int viommu_receive_resp(struct viommu_dev *viommu, int nr_sent, unsigned int len; int nr_received = 0; struct viommu_request *req, *pending; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; pending = list_first_entry_or_null(sent, struct viommu_request, list); if (WARN_ON(!pending)) return 0; - while ((req = virtqueue_get_buf(viommu->vq, &len)) != NULL) { + while ((req = virtqueue_get_buf(vq, &len)) != NULL) { if (req != pending) { dev_warn(viommu->dev, "discarding stale request\n"); continue; @@ -201,6 +218,7 @@ static int _viommu_send_reqs_sync(struct viommu_dev *viommu, * up the CPU in case of a device bug. */ unsigned long timeout_ms = 1000; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; *nr_sent = 0; @@ -210,15 +228,14 @@ static int _viommu_send_reqs_sync(struct viommu_dev *viommu, sg[0] = &req->top; sg[1] = &req->bottom; - ret = virtqueue_add_sgs(viommu->vq, sg, 1, 1, req, - GFP_ATOMIC); + ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); if (ret) break; list_add_tail(&req->list, &pending); } - if (i && !virtqueue_kick(viommu->vq)) + if (i && !virtqueue_kick(vq)) return -EPIPE; timeout = ktime_add_ms(ktime_get(), timeout_ms * i); @@ -553,6 +570,70 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) return ret; } +static int viommu_fault_handler(struct viommu_dev *viommu, + struct virtio_iommu_fault *fault) +{ + char *reason_str; + + u8 reason = fault->reason; + u32 flags = le32_to_cpu(fault->flags); + u32 endpoint = le32_to_cpu(fault->endpoint); + u64 address = le64_to_cpu(fault->address); + + switch (reason) { + case VIRTIO_IOMMU_FAULT_R_DOMAIN: + reason_str = "domain"; + break; + case VIRTIO_IOMMU_FAULT_R_MAPPING: + reason_str = "page"; + break; + case VIRTIO_IOMMU_FAULT_R_UNKNOWN: + default: + reason_str = "unknown"; + break; + } + + /* TODO: find EP by ID and report_iommu_fault */ + if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) + dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", + reason_str, endpoint, address, + flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", + flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", + flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); + else + dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", + reason_str, endpoint); + + return 0; +} + +static void viommu_event_handler(struct virtqueue *vq) +{ + int ret; + unsigned int len; + struct scatterlist sg[1]; + struct viommu_event *evt; + struct viommu_dev *viommu = vq->vdev->priv; + + while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { + if (len > sizeof(*evt)) { + dev_err(viommu->dev, + "invalid event buffer (len %u != %zu)\n", + len, sizeof(*evt)); + } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { + viommu_fault_handler(viommu, &evt->fault); + } + + sg_init_one(sg, evt, sizeof(*evt)); + ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); + if (ret) + dev_err(viommu->dev, "could not add event buffer\n"); + } + + if (!virtqueue_kick(vq)) + dev_err(viommu->dev, "kick failed\n"); +} + /* IOMMU API */ static bool viommu_capable(enum iommu_cap cap) @@ -934,19 +1015,44 @@ static struct iommu_ops viommu_ops = { .put_resv_regions = viommu_put_resv_regions, }; -static int viommu_init_vq(struct viommu_dev *viommu) +static int viommu_init_vqs(struct viommu_dev *viommu) { struct virtio_device *vdev = dev_to_virtio(viommu->dev); - const char *name = "request"; - void *ret; + const char *names[] = { "request", "event" }; + vq_callback_t *callbacks[] = { + NULL, /* No async requests */ + viommu_event_handler, + }; + + return virtio_find_vqs(vdev, VIOMMU_NUM_VQS, viommu->vqs, callbacks, + names, NULL); +} - ret = virtio_find_single_vq(vdev, NULL, name); - if (IS_ERR(ret)) { - dev_err(viommu->dev, "cannot find VQ\n"); - return PTR_ERR(ret); +static int viommu_fill_evtq(struct viommu_dev *viommu) +{ + int i, ret; + struct scatterlist sg[1]; + struct viommu_event *evts; + struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; + size_t nr_evts = min_t(size_t, PAGE_SIZE / sizeof(struct viommu_event), + viommu->vqs[VIOMMU_EVENT_VQ]->num_free); + + viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, + sizeof(*evts), GFP_KERNEL); + if (!evts) + return -ENOMEM; + + for (i = 0; i < nr_evts; i++) { + sg_init_one(sg, &evts[i], sizeof(*evts)); + ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); + if (ret) + return ret; } - viommu->vq = ret; + if (!virtqueue_kick(vq)) + return -EPIPE; + + dev_info(viommu->dev, "%zu event buffers\n", nr_evts); return 0; } @@ -969,7 +1075,7 @@ static int viommu_probe(struct virtio_device *vdev) viommu->dev = dev; viommu->vdev = vdev; - ret = viommu_init_vq(viommu); + ret = viommu_init_vqs(viommu); if (ret) return ret; @@ -1010,6 +1116,11 @@ static int viommu_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + /* Populate the event queue with buffers */ + ret = viommu_fill_evtq(viommu); + if (ret) + goto err_free_vqs; + ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", virtio_bus_name(vdev)); if (ret) diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index 2335d9ed4676..d6c0224efe61 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -150,4 +150,22 @@ union virtio_iommu_req { struct virtio_iommu_req_probe probe; }; +/* Fault types */ +#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0 +#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1 +#define VIRTIO_IOMMU_FAULT_R_MAPPING 2 + +#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0) +#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1) +#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2) +#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8) + +struct virtio_iommu_fault { + __u8 reason; + __u8 padding[3]; + __le32 flags; + __le32 endpoint; + __le64 address; +} __packed; + #endif -- 2.16.1 --------------------------------------------------------------------- To unsubscribe, e-mail: virtio-dev-unsubscribe@lists.oasis-open.org For additional commands, e-mail: virtio-dev-help@lists.oasis-open.org
next prev parent reply other threads:[~2018-02-14 14:54 UTC|newest] Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top 2018-02-14 14:53 [PATCH 0/4] Add virtio-iommu driver Jean-Philippe Brucker 2018-02-14 14:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-14 14:53 ` [PATCH 1/4] iommu: " Jean-Philippe Brucker 2018-02-14 14:53 ` Jean-Philippe Brucker 2018-02-14 14:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-21 20:12 ` kbuild test robot 2018-02-21 21:08 ` kbuild test robot [not found] ` <20180214145340.1223-2-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org> 2018-02-19 12:23 ` Tomasz Nowicki 2018-02-20 11:30 ` Jean-Philippe Brucker 2018-02-20 11:30 ` Jean-Philippe Brucker 2018-02-20 11:30 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-21 20:12 ` kbuild test robot [not found] ` <201802220455.lMEb6LLi%fengguang.wu-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org> 2018-02-22 11:04 ` Jean-Philippe Brucker 2018-02-22 11:04 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-27 14:47 ` Michael S. Tsirkin [not found] ` <e5ffc52f-4510-f757-aa83-2a99af3ae06b-5wv7dgnIgG8@public.gmane.org> 2018-02-27 14:47 ` Michael S. Tsirkin 2018-02-27 14:47 ` [virtio-dev] " Michael S. Tsirkin 2018-02-21 21:08 ` kbuild test robot 2018-03-21 6:43 ` Tian, Kevin 2018-03-21 6:43 ` [virtio-dev] " Tian, Kevin 2018-03-21 13:14 ` Jean-Philippe Brucker [not found] ` <AADFC41AFE54684AB9EE6CBC0274A5D19108B0FE-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org> 2018-03-21 13:14 ` Jean-Philippe Brucker 2018-03-21 13:14 ` [virtio-dev] " Jean-Philippe Brucker 2018-03-21 14:23 ` Robin Murphy 2018-03-22 10:06 ` Tian, Kevin 2018-03-22 10:06 ` [virtio-dev] " Tian, Kevin [not found] ` <AADFC41AFE54684AB9EE6CBC0274A5D19108DC42@SHSMSX101.ccr.corp.intel.com> 2018-03-23 8:27 ` Tian, Kevin [not found] ` <AADFC41AFE54684AB9EE6CBC0274A5D19108DC42-0J0gbvR4kThpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org> 2018-03-23 8:27 ` Tian, Kevin 2018-03-23 8:27 ` [virtio-dev] " Tian, Kevin 2018-04-11 18:35 ` Jean-Philippe Brucker 2018-04-11 18:35 ` [virtio-dev] " Jean-Philippe Brucker 2018-04-11 18:35 ` Jean-Philippe Brucker 2018-03-23 14:48 ` Robin Murphy 2018-04-11 18:33 ` Jean-Philippe Brucker 2018-04-11 18:33 ` [virtio-dev] " Jean-Philippe Brucker 2018-03-21 6:43 ` Tian, Kevin 2018-03-23 14:48 ` Robin Murphy 2018-02-14 14:53 ` [PATCH 2/4] iommu/virtio: Add probe request Jean-Philippe Brucker 2018-02-14 14:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-03-23 15:00 ` Robin Murphy [not found] ` <20180214145340.1223-3-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org> 2018-03-23 15:00 ` Robin Murphy 2018-04-11 18:33 ` Jean-Philippe Brucker 2018-04-11 18:33 ` Jean-Philippe Brucker 2018-04-11 18:33 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-14 14:53 ` Jean-Philippe Brucker 2018-02-14 14:53 ` [PATCH 3/4] iommu/virtio: Add event queue Jean-Philippe Brucker 2018-02-14 14:53 ` Jean-Philippe Brucker [this message] 2018-02-14 14:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-22 1:35 ` kbuild test robot [not found] ` <20180214145340.1223-4-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org> 2018-02-22 1:35 ` kbuild test robot 2018-02-14 14:53 ` [PATCH 4/4] vfio: Allow type-1 IOMMU instantiation with a virtio-iommu Jean-Philippe Brucker 2018-02-14 14:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-14 15:26 ` Alex Williamson [not found] ` <20180214145340.1223-5-jean-philippe.brucker-5wv7dgnIgG8@public.gmane.org> 2018-02-14 15:26 ` Alex Williamson [not found] ` <20180214082639.54556efb-DGNDKt5SQtizQB+pC5nmwQ@public.gmane.org> 2018-02-14 15:35 ` Robin Murphy 2018-02-15 13:53 ` Jean-Philippe Brucker 2018-02-15 13:53 ` Jean-Philippe Brucker 2018-02-15 13:53 ` [virtio-dev] " Jean-Philippe Brucker 2018-02-15 13:53 ` Jean-Philippe Brucker 2018-02-14 15:35 ` Robin Murphy 2018-02-14 14:53 ` Jean-Philippe Brucker
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20180214145340.1223-4-jean-philippe.brucker@arm.com \ --to=jean-philippe.brucker@arm.com \ --cc=alex.williamson@redhat.com \ --cc=bharat.bhushan@nxp.com \ --cc=eric.auger.pro@gmail.com \ --cc=eric.auger@redhat.com \ --cc=iommu@lists.linux-foundation.org \ --cc=jasowang@redhat.com \ --cc=jayachandran.nair@cavium.com \ --cc=jintack@cs.columbia.edu \ --cc=joro@8bytes.org \ --cc=kevin.tian@intel.com \ --cc=kvm@vger.kernel.org \ --cc=kvmarm@lists.cs.columbia.edu \ --cc=lorenzo.pieralisi@arm.com \ --cc=marc.zyngier@arm.com \ --cc=mst@redhat.com \ --cc=peterx@redhat.com \ --cc=robin.murphy@arm.com \ --cc=tnowicki@caviumnetworks.com \ --cc=virtio-dev@lists.oasis-open.org \ --cc=virtualization@lists.linux-foundation.org \ --cc=will.deacon@arm.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.