* [PATCH v12 01/13] migration: Support QLIST migration
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 02/13] virtio-iommu: Add skeleton Eric Auger
` (12 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
Support QLIST migration using the same principle as QTAILQ:
94869d5c52 ("migration: migrate QTAILQ").
The VMSTATE_QLIST_V macro has the same proto as VMSTATE_QTAILQ_V.
The change mainly resides in QLIST RAW macros: QLIST_RAW_INSERT_HEAD
and QLIST_RAW_INSERT_AFTER.
Tests also are provided.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v6 -> v7:
- use QLIST_RAW_INSERT_AFTER as suggested by Juan.
v5 - v6:
- by doing more advanced testing with virtio-iommu migration
I noticed this was broken. "prev" field was not set properly.
I improved the tests to manipulate both the next and prev
fields.
- Removed Peter and Juan's R-b
---
include/migration/vmstate.h | 21 +++++
include/qemu/queue.h | 32 +++++++
migration/trace-events | 5 ++
migration/vmstate-types.c | 74 ++++++++++++++++
tests/test-vmstate.c | 170 ++++++++++++++++++++++++++++++++++++
5 files changed, 302 insertions(+)
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 4aef72c426..0dc04fc48e 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -229,6 +229,7 @@ extern const VMStateInfo vmstate_info_tmp;
extern const VMStateInfo vmstate_info_bitmap;
extern const VMStateInfo vmstate_info_qtailq;
extern const VMStateInfo vmstate_info_gtree;
+extern const VMStateInfo vmstate_info_qlist;
#define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0)
/*
@@ -798,6 +799,26 @@ extern const VMStateInfo vmstate_info_gtree;
.offset = offsetof(_state, _field), \
}
+/*
+ * For migrating a QLIST
+ * Target QLIST needs be properly initialized.
+ * _type: type of QLIST element
+ * _next: name of QLIST_ENTRY entry field in QLIST element
+ * _vmsd: VMSD for QLIST element
+ * size: size of QLIST element
+ * start: offset of QLIST_ENTRY in QTAILQ element
+ */
+#define VMSTATE_QLIST_V(_field, _state, _version, _vmsd, _type, _next) \
+{ \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .info = &vmstate_info_qlist, \
+ .offset = offsetof(_state, _field), \
+ .start = offsetof(_type, _next), \
+}
+
/* _f : field name
_f_n : num of elements field_name
_n : num of elements
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 4764d93ea3..19425f973f 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -501,4 +501,36 @@ union { \
QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
} while (/*CONSTCOND*/0)
+#define QLIST_RAW_FIRST(head) \
+ field_at_offset(head, 0, void *)
+
+#define QLIST_RAW_NEXT(elm, entry) \
+ field_at_offset(elm, entry, void *)
+
+#define QLIST_RAW_PREVIOUS(elm, entry) \
+ field_at_offset(elm, entry + sizeof(void *), void *)
+
+#define QLIST_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = *QLIST_RAW_FIRST(head); \
+ (elm); \
+ (elm) = *QLIST_RAW_NEXT(elm, entry))
+
+#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
+ *QLIST_RAW_NEXT(prev, entry) = elem; \
+ *QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
+ *QLIST_RAW_NEXT(elem, entry) = NULL; \
+} while (0)
+
+#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
+ void *first = *QLIST_RAW_FIRST(head); \
+ *QLIST_RAW_FIRST(head) = elm; \
+ *QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
+ if (first) { \
+ *QLIST_RAW_NEXT(elm, entry) = first; \
+ *QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
+ } else { \
+ *QLIST_RAW_NEXT(elm, entry) = NULL; \
+ } \
+} while (0)
+
#endif /* QEMU_SYS_QUEUE_H */
diff --git a/migration/trace-events b/migration/trace-events
index 6dee7b5389..e0a33cffca 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -76,6 +76,11 @@ get_gtree_end(const char *field_name, const char *key_vmsd_name, const char *val
put_gtree(const char *field_name, const char *key_vmsd_name, const char *val_vmsd_name, uint32_t nnodes) "%s(%s/%s) nnodes=%d"
put_gtree_end(const char *field_name, const char *key_vmsd_name, const char *val_vmsd_name, int ret) "%s(%s/%s) %d"
+get_qlist(const char *field_name, const char *vmsd_name, int version_id) "%s(%s v%d)"
+get_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
+put_qlist(const char *field_name, const char *vmsd_name, int version_id) "%s(%s v%d)"
+put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
+
# qemu-file.c
qemu_file_fclose(void) ""
diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c
index 7236cf92bc..35e784c9d9 100644
--- a/migration/vmstate-types.c
+++ b/migration/vmstate-types.c
@@ -843,3 +843,77 @@ const VMStateInfo vmstate_info_gtree = {
.get = get_gtree,
.put = put_gtree,
};
+
+static int put_qlist(QEMUFile *f, void *pv, size_t unused_size,
+ const VMStateField *field, QJSON *vmdesc)
+{
+ const VMStateDescription *vmsd = field->vmsd;
+ /* offset of the QTAILQ entry in a QTAILQ element*/
+ size_t entry_offset = field->start;
+ void *elm;
+ int ret;
+
+ trace_put_qlist(field->name, vmsd->name, vmsd->version_id);
+ QLIST_RAW_FOREACH(elm, pv, entry_offset) {
+ qemu_put_byte(f, true);
+ ret = vmstate_save_state(f, vmsd, elm, vmdesc);
+ if (ret) {
+ error_report("%s: failed to save %s (%d)", field->name,
+ vmsd->name, ret);
+ return ret;
+ }
+ }
+ qemu_put_byte(f, false);
+ trace_put_qlist_end(field->name, vmsd->name);
+
+ return 0;
+}
+
+static int get_qlist(QEMUFile *f, void *pv, size_t unused_size,
+ const VMStateField *field)
+{
+ int ret = 0;
+ const VMStateDescription *vmsd = field->vmsd;
+ /* size of a QLIST element */
+ size_t size = field->size;
+ /* offset of the QLIST entry in a QLIST element */
+ size_t entry_offset = field->start;
+ int version_id = field->version_id;
+ void *elm, *prev = NULL;
+
+ trace_get_qlist(field->name, vmsd->name, vmsd->version_id);
+ if (version_id > vmsd->version_id) {
+ error_report("%s %s", vmsd->name, "too new");
+ return -EINVAL;
+ }
+ if (version_id < vmsd->minimum_version_id) {
+ error_report("%s %s", vmsd->name, "too old");
+ return -EINVAL;
+ }
+
+ while (qemu_get_byte(f)) {
+ elm = g_malloc(size);
+ ret = vmstate_load_state(f, vmsd, elm, version_id);
+ if (ret) {
+ error_report("%s: failed to load %s (%d)", field->name,
+ vmsd->name, ret);
+ g_free(elm);
+ return ret;
+ }
+ if (!prev) {
+ QLIST_RAW_INSERT_HEAD(pv, elm, entry_offset);
+ } else {
+ QLIST_RAW_INSERT_AFTER(pv, prev, elm, entry_offset);
+ }
+ prev = elm;
+ }
+ trace_get_qlist_end(field->name, vmsd->name);
+
+ return ret;
+}
+
+const VMStateInfo vmstate_info_qlist = {
+ .name = "qlist",
+ .get = get_qlist,
+ .put = put_qlist,
+};
diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c
index 8f184f3556..49e8a3ef46 100644
--- a/tests/test-vmstate.c
+++ b/tests/test-vmstate.c
@@ -926,6 +926,28 @@ static const VMStateDescription vmstate_domain = {
}
};
+/* test QLIST Migration */
+
+typedef struct TestQListElement {
+ uint32_t id;
+ QLIST_ENTRY(TestQListElement) next;
+} TestQListElement;
+
+typedef struct TestQListContainer {
+ uint32_t id;
+ QLIST_HEAD(, TestQListElement) list;
+} TestQListContainer;
+
+static const VMStateDescription vmstate_qlist_element = {
+ .name = "test/queue list",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, TestQListElement),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_iommu = {
.name = "iommu",
.version_id = 1,
@@ -939,6 +961,18 @@ static const VMStateDescription vmstate_iommu = {
}
};
+static const VMStateDescription vmstate_container = {
+ .name = "test/container/qlist",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, TestQListContainer),
+ VMSTATE_QLIST_V(list, TestQListContainer, 1, vmstate_qlist_element,
+ TestQListElement, next),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
uint8_t first_domain_dump[] = {
/* id */
0x00, 0x0, 0x0, 0x6,
@@ -1229,6 +1263,140 @@ static void test_gtree_load_iommu(void)
qemu_fclose(fload);
}
+static uint8_t qlist_dump[] = {
+ 0x00, 0x00, 0x00, 0x01, /* container id */
+ 0x1, /* start of a */
+ 0x00, 0x00, 0x00, 0x0a,
+ 0x1, /* start of b */
+ 0x00, 0x00, 0x0b, 0x00,
+ 0x1, /* start of c */
+ 0x00, 0x0c, 0x00, 0x00,
+ 0x1, /* start of d */
+ 0x0d, 0x00, 0x00, 0x00,
+ 0x0, /* end of list */
+ QEMU_VM_EOF, /* just to ensure we won't get EOF reported prematurely */
+};
+
+static TestQListContainer *alloc_container(void)
+{
+ TestQListElement *a = g_malloc(sizeof(TestQListElement));
+ TestQListElement *b = g_malloc(sizeof(TestQListElement));
+ TestQListElement *c = g_malloc(sizeof(TestQListElement));
+ TestQListElement *d = g_malloc(sizeof(TestQListElement));
+ TestQListContainer *container = g_malloc(sizeof(TestQListContainer));
+
+ a->id = 0x0a;
+ b->id = 0x0b00;
+ c->id = 0xc0000;
+ d->id = 0xd000000;
+ container->id = 1;
+
+ QLIST_INIT(&container->list);
+ QLIST_INSERT_HEAD(&container->list, d, next);
+ QLIST_INSERT_HEAD(&container->list, c, next);
+ QLIST_INSERT_HEAD(&container->list, b, next);
+ QLIST_INSERT_HEAD(&container->list, a, next);
+ return container;
+}
+
+static void free_container(TestQListContainer *container)
+{
+ TestQListElement *iter, *tmp;
+
+ QLIST_FOREACH_SAFE(iter, &container->list, next, tmp) {
+ QLIST_REMOVE(iter, next);
+ g_free(iter);
+ }
+ g_free(container);
+}
+
+static void compare_containers(TestQListContainer *c1, TestQListContainer *c2)
+{
+ TestQListElement *first_item_c1, *first_item_c2;
+
+ while (!QLIST_EMPTY(&c1->list)) {
+ first_item_c1 = QLIST_FIRST(&c1->list);
+ first_item_c2 = QLIST_FIRST(&c2->list);
+ assert(first_item_c2);
+ assert(first_item_c1->id == first_item_c2->id);
+ QLIST_REMOVE(first_item_c1, next);
+ QLIST_REMOVE(first_item_c2, next);
+ g_free(first_item_c1);
+ g_free(first_item_c2);
+ }
+ assert(QLIST_EMPTY(&c2->list));
+}
+
+/*
+ * Check the prev & next fields are correct by doing list
+ * manipulations on the container. We will do that for both
+ * the source and the destination containers
+ */
+static void manipulate_container(TestQListContainer *c)
+{
+ TestQListElement *prev, *iter = QLIST_FIRST(&c->list);
+ TestQListElement *elem;
+
+ elem = g_malloc(sizeof(TestQListElement));
+ elem->id = 0x12;
+ QLIST_INSERT_AFTER(iter, elem, next);
+
+ elem = g_malloc(sizeof(TestQListElement));
+ elem->id = 0x13;
+ QLIST_INSERT_HEAD(&c->list, elem, next);
+
+ while (iter) {
+ prev = iter;
+ iter = QLIST_NEXT(iter, next);
+ }
+
+ elem = g_malloc(sizeof(TestQListElement));
+ elem->id = 0x14;
+ QLIST_INSERT_BEFORE(prev, elem, next);
+
+ elem = g_malloc(sizeof(TestQListElement));
+ elem->id = 0x15;
+ QLIST_INSERT_AFTER(prev, elem, next);
+
+ QLIST_REMOVE(prev, next);
+ g_free(prev);
+}
+
+static void test_save_qlist(void)
+{
+ TestQListContainer *container = alloc_container();
+
+ save_vmstate(&vmstate_container, container);
+ compare_vmstate(qlist_dump, sizeof(qlist_dump));
+ free_container(container);
+}
+
+static void test_load_qlist(void)
+{
+ QEMUFile *fsave, *fload;
+ TestQListContainer *orig_container = alloc_container();
+ TestQListContainer *dest_container = g_malloc0(sizeof(TestQListContainer));
+ char eof;
+
+ QLIST_INIT(&dest_container->list);
+
+ fsave = open_test_file(true);
+ qemu_put_buffer(fsave, qlist_dump, sizeof(qlist_dump));
+ g_assert(!qemu_file_get_error(fsave));
+ qemu_fclose(fsave);
+
+ fload = open_test_file(false);
+ vmstate_load_state(fload, &vmstate_container, dest_container, 1);
+ eof = qemu_get_byte(fload);
+ g_assert(!qemu_file_get_error(fload));
+ g_assert_cmpint(eof, ==, QEMU_VM_EOF);
+ manipulate_container(orig_container);
+ manipulate_container(dest_container);
+ compare_containers(orig_container, dest_container);
+ free_container(orig_container);
+ free_container(dest_container);
+}
+
typedef struct TmpTestStruct {
TestStruct *parent;
int64_t diff;
@@ -1353,6 +1521,8 @@ int main(int argc, char **argv)
g_test_add_func("/vmstate/gtree/load/loaddomain", test_gtree_load_domain);
g_test_add_func("/vmstate/gtree/save/saveiommu", test_gtree_save_iommu);
g_test_add_func("/vmstate/gtree/load/loadiommu", test_gtree_load_iommu);
+ g_test_add_func("/vmstate/qlist/save/saveqlist", test_save_qlist);
+ g_test_add_func("/vmstate/qlist/load/loadqlist", test_load_qlist);
g_test_add_func("/vmstate/tmp_struct", test_tmp_struct);
g_test_run();
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 02/13] virtio-iommu: Add skeleton
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
2020-01-09 14:43 ` [PATCH v12 01/13] migration: Support QLIST migration Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-13 19:53 ` Peter Xu
2020-01-09 14:43 ` [PATCH v12 03/13] virtio-iommu: Decode the command payload Eric Auger
` (11 subsequent siblings)
13 siblings, 1 reply; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patchs adds the skeleton for the virtio-iommu device.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- remove s_by_bus_num
- drop set_features (rely on default implementation) and
acked_features
v9 -> v10:
- mutex initialized here
- initialize tail
- included hw/qdev-properties.h
- removed g_memdup
- removed s->config.domain_range.start = 0;
v9 -> v10:
- expose VIRTIO_IOMMU_F_MMIO feature
- s/domain_bits/domain_range struct
- change error codes
- enforce unmigratable
- Kconfig
v7 -> v8:
- expose VIRTIO_IOMMU_F_BYPASS and VIRTIO_F_VERSION_1
features
- set_config dummy implementation + tracing
- add trace in get_features
- set the features on realize() and store the acked ones
- remove inclusion of linux/virtio_iommu.h
v6 -> v7:
- removed qapi-event.h include
- add primary_bus and associated property
v4 -> v5:
- use the new v0.5 terminology (domain, endpoint)
- add the event virtqueue
v3 -> v4:
- use page_size_mask instead of page_sizes
- added set_features()
- added some traces (reset, set_status, set_features)
- empty virtio_iommu_set_config() as the driver MUST NOT
write to device configuration fields
- add get_config trace
v2 -> v3:
- rebase on 2.10-rc0, ie. use IOMMUMemoryRegion and remove
iommu_ops.
- advertise VIRTIO_IOMMU_F_MAP_UNMAP feature
- page_sizes set to TARGET_PAGE_SIZE
Conflicts:
hw/virtio/trace-events
---
hw/virtio/Kconfig | 5 +
hw/virtio/Makefile.objs | 1 +
hw/virtio/trace-events | 7 +
hw/virtio/virtio-iommu.c | 265 +++++++++++++++++++++++++++++++
include/hw/virtio/virtio-iommu.h | 60 +++++++
5 files changed, 338 insertions(+)
create mode 100644 hw/virtio/virtio-iommu.c
create mode 100644 include/hw/virtio/virtio-iommu.h
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig
index f87def27a6..d29525b36f 100644
--- a/hw/virtio/Kconfig
+++ b/hw/virtio/Kconfig
@@ -9,6 +9,11 @@ config VIRTIO_RNG
default y
depends on VIRTIO
+config VIRTIO_IOMMU
+ bool
+ default y
+ depends on VIRTIO
+
config VIRTIO_PCI
bool
default y if PCI_DEVICES
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index de0f5fc39b..2fd9da7410 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -16,6 +16,7 @@ obj-$(call land,$(CONFIG_VIRTIO_CRYPTO),$(CONFIG_VIRTIO_PCI)) += virtio-crypto-p
obj-$(CONFIG_VIRTIO_PMEM) += virtio-pmem.o
common-obj-$(call land,$(CONFIG_VIRTIO_PMEM),$(CONFIG_VIRTIO_PCI)) += virtio-pmem-pci.o
obj-$(call land,$(CONFIG_VHOST_USER_FS),$(CONFIG_VIRTIO_PCI)) += vhost-user-fs-pci.o
+obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o
ifeq ($(CONFIG_VIRTIO_PCI),y)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index e28ba48da6..02d93d7f63 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -53,3 +53,10 @@ virtio_mmio_write_offset(uint64_t offset, uint64_t value) "virtio_mmio_write off
virtio_mmio_guest_page(uint64_t size, int shift) "guest page size 0x%" PRIx64 " shift %d"
virtio_mmio_queue_write(uint64_t value, int max_size) "mmio_queue write 0x%" PRIx64 " max %d"
virtio_mmio_setting_irq(int level) "virtio_mmio setting IRQ %d"
+
+# hw/virtio/virtio-iommu.c
+virtio_iommu_device_reset(void) "reset!"
+virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64
+virtio_iommu_device_status(uint8_t status) "driver status = %d"
+virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_range=%d probe_size=0x%x"
+virtio_iommu_set_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_bits=%d probe_size=0x%x"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
new file mode 100644
index 0000000000..7e55eda67e
--- /dev/null
+++ b/hw/virtio/virtio-iommu.c
@@ -0,0 +1,265 @@
+/*
+ * virtio-iommu device
+ *
+ * Copyright (c) 2017 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/iov.h"
+#include "qemu-common.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/virtio.h"
+#include "sysemu/kvm.h"
+#include "trace.h"
+
+#include "standard-headers/linux/virtio_ids.h"
+
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-access.h"
+#include "hw/virtio/virtio-iommu.h"
+
+/* Max size */
+#define VIOMMU_DEFAULT_QUEUE_SIZE 256
+
+static int virtio_iommu_handle_attach(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt)
+{
+ return VIRTIO_IOMMU_S_UNSUPP;
+}
+static int virtio_iommu_handle_detach(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt)
+{
+ return VIRTIO_IOMMU_S_UNSUPP;
+}
+static int virtio_iommu_handle_map(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt)
+{
+ return VIRTIO_IOMMU_S_UNSUPP;
+}
+static int virtio_iommu_handle_unmap(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt)
+{
+ return VIRTIO_IOMMU_S_UNSUPP;
+}
+
+static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
+ struct virtio_iommu_req_head head;
+ struct virtio_iommu_req_tail tail = {};
+ VirtQueueElement *elem;
+ unsigned int iov_cnt;
+ struct iovec *iov;
+ size_t sz;
+
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ return;
+ }
+
+ if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
+ iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
+ virtio_error(vdev, "virtio-iommu bad head/tail size");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
+ }
+
+ iov_cnt = elem->out_num;
+ iov = elem->out_sg;
+ sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
+ if (unlikely(sz != sizeof(head))) {
+ tail.status = VIRTIO_IOMMU_S_DEVERR;
+ goto out;
+ }
+ qemu_mutex_lock(&s->mutex);
+ switch (head.type) {
+ case VIRTIO_IOMMU_T_ATTACH:
+ tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
+ break;
+ case VIRTIO_IOMMU_T_DETACH:
+ tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
+ break;
+ case VIRTIO_IOMMU_T_MAP:
+ tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
+ break;
+ case VIRTIO_IOMMU_T_UNMAP:
+ tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
+ break;
+ default:
+ tail.status = VIRTIO_IOMMU_S_UNSUPP;
+ }
+ qemu_mutex_unlock(&s->mutex);
+
+out:
+ sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
+ &tail, sizeof(tail));
+ assert(sz == sizeof(tail));
+
+ virtqueue_push(vq, elem, sizeof(tail));
+ virtio_notify(vdev, vq);
+ g_free(elem);
+ }
+}
+
+static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
+ struct virtio_iommu_config *config = &dev->config;
+
+ trace_virtio_iommu_get_config(config->page_size_mask,
+ config->input_range.start,
+ config->input_range.end,
+ config->domain_range.end,
+ config->probe_size);
+ memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config));
+}
+
+static void virtio_iommu_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ struct virtio_iommu_config config;
+
+ memcpy(&config, config_data, sizeof(struct virtio_iommu_config));
+ trace_virtio_iommu_set_config(config.page_size_mask,
+ config.input_range.start,
+ config.input_range.end,
+ config.domain_range.end,
+ config.probe_size);
+}
+
+static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
+ Error **errp)
+{
+ VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
+
+ f |= dev->features;
+ trace_virtio_iommu_get_features(f);
+ return f;
+}
+
+/*
+ * Migration is not yet supported: most of the state consists
+ * of balanced binary trees which are not yet ready for getting
+ * migrated
+ */
+static const VMStateDescription vmstate_virtio_iommu_device = {
+ .name = "virtio-iommu-device",
+ .unmigratable = 1,
+};
+
+static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
+
+ virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU,
+ sizeof(struct virtio_iommu_config));
+
+ s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
+ virtio_iommu_handle_command);
+ s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
+
+ s->config.page_size_mask = TARGET_PAGE_MASK;
+ s->config.input_range.end = -1UL;
+ s->config.domain_range.end = 32;
+
+ virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
+ virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
+ virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
+ virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
+ virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
+ virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
+ virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS);
+ virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
+
+ qemu_mutex_init(&s->mutex);
+}
+
+static void virtio_iommu_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+
+ virtio_cleanup(vdev);
+}
+
+static void virtio_iommu_device_reset(VirtIODevice *vdev)
+{
+ trace_virtio_iommu_device_reset();
+}
+
+static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ trace_virtio_iommu_device_status(status);
+}
+
+static void virtio_iommu_instance_init(Object *obj)
+{
+}
+
+static const VMStateDescription vmstate_virtio_iommu = {
+ .name = "virtio-iommu",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property virtio_iommu_properties[] = {
+ DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_iommu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = virtio_iommu_properties;
+ dc->vmsd = &vmstate_virtio_iommu;
+
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_iommu_device_realize;
+ vdc->unrealize = virtio_iommu_device_unrealize;
+ vdc->reset = virtio_iommu_device_reset;
+ vdc->get_config = virtio_iommu_get_config;
+ vdc->set_config = virtio_iommu_set_config;
+ vdc->get_features = virtio_iommu_get_features;
+ vdc->set_status = virtio_iommu_set_status;
+ vdc->vmsd = &vmstate_virtio_iommu_device;
+}
+
+static const TypeInfo virtio_iommu_info = {
+ .name = TYPE_VIRTIO_IOMMU,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOIOMMU),
+ .instance_init = virtio_iommu_instance_init,
+ .class_init = virtio_iommu_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_iommu_info);
+}
+
+type_init(virtio_register_types)
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
new file mode 100644
index 0000000000..4ebf4f093e
--- /dev/null
+++ b/include/hw/virtio/virtio-iommu.h
@@ -0,0 +1,60 @@
+/*
+ * virtio-iommu device
+ *
+ * Copyright (c) 2017 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QEMU_VIRTIO_IOMMU_H
+#define QEMU_VIRTIO_IOMMU_H
+
+#include "standard-headers/linux/virtio_iommu.h"
+#include "hw/virtio/virtio.h"
+#include "hw/pci/pci.h"
+
+#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
+#define VIRTIO_IOMMU(obj) \
+ OBJECT_CHECK(VirtIOIOMMU, (obj), TYPE_VIRTIO_IOMMU)
+
+#define IOMMU_PCI_BUS_MAX 256
+#define IOMMU_PCI_DEVFN_MAX 256
+
+typedef struct IOMMUDevice {
+ void *viommu;
+ PCIBus *bus;
+ int devfn;
+ IOMMUMemoryRegion iommu_mr;
+ AddressSpace as;
+} IOMMUDevice;
+
+typedef struct IOMMUPciBus {
+ PCIBus *bus;
+ IOMMUDevice *pbdev[0]; /* Parent array is sparse, so dynamically alloc */
+} IOMMUPciBus;
+
+typedef struct VirtIOIOMMU {
+ VirtIODevice parent_obj;
+ VirtQueue *req_vq;
+ VirtQueue *event_vq;
+ struct virtio_iommu_config config;
+ uint64_t features;
+ GHashTable *as_by_busptr;
+ PCIBus *primary_bus;
+ GTree *domains;
+ QemuMutex mutex;
+ GTree *endpoints;
+} VirtIOIOMMU;
+
+#endif
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 02/13] virtio-iommu: Add skeleton
2020-01-09 14:43 ` [PATCH v12 02/13] virtio-iommu: Add skeleton Eric Auger
@ 2020-01-13 19:53 ` Peter Xu
0 siblings, 0 replies; 39+ messages in thread
From: Peter Xu @ 2020-01-13 19:53 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:08PM +0100, Eric Auger wrote:
> This patchs adds the skeleton for the virtio-iommu device.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 03/13] virtio-iommu: Decode the command payload
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
2020-01-09 14:43 ` [PATCH v12 01/13] migration: Support QLIST migration Eric Auger
2020-01-09 14:43 ` [PATCH v12 02/13] virtio-iommu: Add skeleton Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 04/13] virtio-iommu: Add the iommu regions Eric Auger
` (10 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch adds the command payload decoding and
introduces the functions that will do the actual
command handling. Those functions are not yet implemented.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Peter Xu <peterx@redhat.com>
---
v11 -> v12:
- ADded Jean and Peter's R-b
v10 -> v11:
- use a macro for handle command functions
v9 -> v10:
- make virtio_iommu_handle_* more compact and
remove get_payload_size
v7 -> v8:
- handle new domain parameter in detach
- remove reserved checks
v5 -> v6:
- change map/unmap semantics (remove size)
v4 -> v5:
- adopt new v0.5 terminology
v3 -> v4:
- no flags field anymore in struct virtio_iommu_req_unmap
- test reserved on attach/detach, change trace proto
- rebase on v2.10.0.
---
hw/virtio/trace-events | 4 +++
hw/virtio/virtio-iommu.c | 76 +++++++++++++++++++++++++++++++++-------
2 files changed, 68 insertions(+), 12 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 02d93d7f63..f7141aa2f6 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -60,3 +60,7 @@ virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx6
virtio_iommu_device_status(uint8_t status) "driver status = %d"
virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_range=%d probe_size=0x%x"
virtio_iommu_set_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_bits=%d probe_size=0x%x"
+virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
+virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
+virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d"
+virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 7e55eda67e..9d1b997df7 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -34,31 +34,83 @@
/* Max size */
#define VIOMMU_DEFAULT_QUEUE_SIZE 256
-static int virtio_iommu_handle_attach(VirtIOIOMMU *s,
- struct iovec *iov,
- unsigned int iov_cnt)
+static int virtio_iommu_attach(VirtIOIOMMU *s,
+ struct virtio_iommu_req_attach *req)
{
+ uint32_t domain_id = le32_to_cpu(req->domain);
+ uint32_t ep_id = le32_to_cpu(req->endpoint);
+
+ trace_virtio_iommu_attach(domain_id, ep_id);
+
return VIRTIO_IOMMU_S_UNSUPP;
}
-static int virtio_iommu_handle_detach(VirtIOIOMMU *s,
- struct iovec *iov,
- unsigned int iov_cnt)
+
+static int virtio_iommu_detach(VirtIOIOMMU *s,
+ struct virtio_iommu_req_detach *req)
{
+ uint32_t domain_id = le32_to_cpu(req->domain);
+ uint32_t ep_id = le32_to_cpu(req->endpoint);
+
+ trace_virtio_iommu_detach(domain_id, ep_id);
+
return VIRTIO_IOMMU_S_UNSUPP;
}
-static int virtio_iommu_handle_map(VirtIOIOMMU *s,
- struct iovec *iov,
- unsigned int iov_cnt)
+
+static int virtio_iommu_map(VirtIOIOMMU *s,
+ struct virtio_iommu_req_map *req)
{
+ uint32_t domain_id = le32_to_cpu(req->domain);
+ uint64_t phys_start = le64_to_cpu(req->phys_start);
+ uint64_t virt_start = le64_to_cpu(req->virt_start);
+ uint64_t virt_end = le64_to_cpu(req->virt_end);
+ uint32_t flags = le32_to_cpu(req->flags);
+
+ trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
+
return VIRTIO_IOMMU_S_UNSUPP;
}
-static int virtio_iommu_handle_unmap(VirtIOIOMMU *s,
- struct iovec *iov,
- unsigned int iov_cnt)
+
+static int virtio_iommu_unmap(VirtIOIOMMU *s,
+ struct virtio_iommu_req_unmap *req)
{
+ uint32_t domain_id = le32_to_cpu(req->domain);
+ uint64_t virt_start = le64_to_cpu(req->virt_start);
+ uint64_t virt_end = le64_to_cpu(req->virt_end);
+
+ trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
+
return VIRTIO_IOMMU_S_UNSUPP;
}
+static int virtio_iommu_iov_to_req(struct iovec *iov,
+ unsigned int iov_cnt,
+ void *req, size_t req_sz)
+{
+ size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
+
+ sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
+ if (unlikely(sz != payload_sz)) {
+ return VIRTIO_IOMMU_S_INVAL;
+ }
+ return 0;
+}
+
+#define virtio_iommu_handle_req(__req) \
+static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \
+ struct iovec *iov, \
+ unsigned int iov_cnt) \
+{ \
+ struct virtio_iommu_req_ ## __req req; \
+ int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
+ \
+ return ret ? ret : virtio_iommu_ ## __req(s, &req); \
+}
+
+virtio_iommu_handle_req(attach)
+virtio_iommu_handle_req(detach)
+virtio_iommu_handle_req(map)
+virtio_iommu_handle_req(unmap)
+
static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (2 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 03/13] virtio-iommu: Decode the command payload Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-13 19:53 ` Peter Xu
2020-01-13 20:06 ` Peter Xu
2020-01-09 14:43 ` [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers Eric Auger
` (9 subsequent siblings)
13 siblings, 2 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
Implement a callback called on PCI bus enumeration that
initializes for a given device on the bus hierarchy
an IOMMU memory region. The PCI bus hierarchy is stored
locally in IOMMUPciBus and IOMMUDevice objects.
At the time of the enumeration, the bus number may not be
computed yet.
So operations that will need to retrieve the IOMMUdevice
and its IOMMU memory region from the bus number and devfn,
once the bus number is garanteed to be frozen,
use an array of IOMMUPciBus, lazily populated.
virtio_iommu_mr() is the top helper that allows to retrieve
the IOMMU memory region from the requester ID.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- add the iommu_find_iommu_pcibus() mechanics. Without it,
when attaching t device to a domain we could not check
the device is effectively protected by this IOMMU
v10 -> v11:
- use g_hash_table_new_full for allocating as_by_busptr
v9 -> v10:
- remove pc/virt machine headers
- virtio_iommu_find_add_as: mr_index introduced in that patch
and name properly freed
v6 -> v7:
- use primary_bus
- rebase on new translate proto featuring iommu_idx
v5 -> v6:
- include qapi/error.h
- fix g_hash_table_lookup key in virtio_iommu_find_add_as
v4 -> v5:
- use PCI bus handle as a key
- use get_primary_pci_bus() callback
v3 -> v4:
- add trace_virtio_iommu_init_iommu_mr
v2 -> v3:
- use IOMMUMemoryRegion
- iommu mr name built with BDF
- rename smmu_get_sid into virtio_iommu_get_sid and use PCI_BUILD_BDF
---
hw/virtio/trace-events | 2 +
hw/virtio/virtio-iommu.c | 135 +++++++++++++++++++++++++++++++
include/hw/virtio/virtio-iommu.h | 3 +
3 files changed, 140 insertions(+)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index f7141aa2f6..10a2b120f3 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -64,3 +64,5 @@ virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d"
virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
+virtio_iommu_translate(const char *name, uint32_t rid, uint64_t iova, int flag) "mr=%s rid=%d addr=0x%"PRIx64" flag=%d"
+virtio_iommu_init_iommu_mr(char *iommu_mr) "init %s"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 9d1b997df7..acc939f334 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -23,6 +23,8 @@
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio.h"
#include "sysemu/kvm.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "trace.h"
#include "standard-headers/linux/virtio_ids.h"
@@ -34,6 +36,93 @@
/* Max size */
#define VIOMMU_DEFAULT_QUEUE_SIZE 256
+static inline uint16_t virtio_iommu_get_sid(IOMMUDevice *dev)
+{
+ return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
+}
+
+/**
+ * The bus number is used for lookup when SID based operations occur.
+ * In that case we lazily populate the IOMMUPciBus array from the bus hash
+ * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
+ * numbers may not be always initialized yet.
+ */
+static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
+{
+ IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
+
+ if (!iommu_pci_bus) {
+ GHashTableIter iter;
+
+ g_hash_table_iter_init(&iter, s->as_by_busptr);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
+ if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
+ s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
+ return iommu_pci_bus;
+ }
+ }
+ }
+ return iommu_pci_bus;
+}
+
+IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid);
+IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
+{
+ uint8_t bus_n, devfn;
+ IOMMUPciBus *iommu_pci_bus;
+ IOMMUDevice *dev;
+
+ bus_n = PCI_BUS_NUM(sid);
+ iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
+ if (iommu_pci_bus) {
+ devfn = sid & 0xFF;
+ dev = iommu_pci_bus->pbdev[devfn];
+ if (dev) {
+ return &dev->iommu_mr;
+ }
+ }
+ return NULL;
+}
+
+static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ VirtIOIOMMU *s = opaque;
+ IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
+ static uint32_t mr_index;
+ IOMMUDevice *sdev;
+
+ if (!sbus) {
+ sbus = g_malloc0(sizeof(IOMMUPciBus) +
+ sizeof(IOMMUDevice *) * IOMMU_PCI_DEVFN_MAX);
+ sbus->bus = bus;
+ g_hash_table_insert(s->as_by_busptr, bus, sbus);
+ }
+
+ sdev = sbus->pbdev[devfn];
+ if (!sdev) {
+ char *name = g_strdup_printf("%s-%d-%d",
+ TYPE_VIRTIO_IOMMU_MEMORY_REGION,
+ mr_index++, devfn);
+ sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice));
+
+ sdev->viommu = s;
+ sdev->bus = bus;
+ sdev->devfn = devfn;
+
+ trace_virtio_iommu_init_iommu_mr(name);
+
+ memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
+ TYPE_VIRTIO_IOMMU_MEMORY_REGION,
+ OBJECT(s), name,
+ UINT64_MAX);
+ address_space_init(&sdev->as,
+ MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
+ g_free(name);
+ }
+ return &sdev->as;
+}
+
static int virtio_iommu_attach(VirtIOIOMMU *s,
struct virtio_iommu_req_attach *req)
{
@@ -172,6 +261,27 @@ out:
}
}
+static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
+ IOMMUAccessFlags flag,
+ int iommu_idx)
+{
+ IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
+ uint32_t sid;
+
+ IOMMUTLBEntry entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = addr,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+
+ sid = virtio_iommu_get_sid(sdev);
+
+ trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
+ return entry;
+}
+
static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
{
VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
@@ -226,6 +336,8 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU,
sizeof(struct virtio_iommu_config));
+ memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
+
s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
virtio_iommu_handle_command);
s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
@@ -244,6 +356,14 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
qemu_mutex_init(&s->mutex);
+
+ s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
+
+ if (s->primary_bus) {
+ pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s);
+ } else {
+ error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
+ }
}
static void virtio_iommu_device_unrealize(DeviceState *dev, Error **errp)
@@ -301,6 +421,14 @@ static void virtio_iommu_class_init(ObjectClass *klass, void *data)
vdc->vmsd = &vmstate_virtio_iommu_device;
}
+static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = virtio_iommu_translate;
+}
+
static const TypeInfo virtio_iommu_info = {
.name = TYPE_VIRTIO_IOMMU,
.parent = TYPE_VIRTIO_DEVICE,
@@ -309,9 +437,16 @@ static const TypeInfo virtio_iommu_info = {
.class_init = virtio_iommu_class_init,
};
+static const TypeInfo virtio_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
+ .class_init = virtio_iommu_memory_region_class_init,
+};
+
static void virtio_register_types(void)
{
type_register_static(&virtio_iommu_info);
+ type_register_static(&virtio_iommu_memory_region_info);
}
type_init(virtio_register_types)
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
index 4ebf4f093e..c6c33b4af0 100644
--- a/include/hw/virtio/virtio-iommu.h
+++ b/include/hw/virtio/virtio-iommu.h
@@ -28,6 +28,8 @@
#define VIRTIO_IOMMU(obj) \
OBJECT_CHECK(VirtIOIOMMU, (obj), TYPE_VIRTIO_IOMMU)
+#define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region"
+
#define IOMMU_PCI_BUS_MAX 256
#define IOMMU_PCI_DEVFN_MAX 256
@@ -51,6 +53,7 @@ typedef struct VirtIOIOMMU {
struct virtio_iommu_config config;
uint64_t features;
GHashTable *as_by_busptr;
+ IOMMUPciBus *iommu_pcibus_by_bus_num[IOMMU_PCI_BUS_MAX];
PCIBus *primary_bus;
GTree *domains;
QemuMutex mutex;
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-09 14:43 ` [PATCH v12 04/13] virtio-iommu: Add the iommu regions Eric Auger
@ 2020-01-13 19:53 ` Peter Xu
2020-01-14 8:37 ` Auger Eric
2020-01-13 20:06 ` Peter Xu
1 sibling, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-13 19:53 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:10PM +0100, Eric Auger wrote:
> Implement a callback called on PCI bus enumeration that
> initializes for a given device on the bus hierarchy
> an IOMMU memory region. The PCI bus hierarchy is stored
> locally in IOMMUPciBus and IOMMUDevice objects.
>
> At the time of the enumeration, the bus number may not be
> computed yet.
>
> So operations that will need to retrieve the IOMMUdevice
> and its IOMMU memory region from the bus number and devfn,
> once the bus number is garanteed to be frozen,
> use an array of IOMMUPciBus, lazily populated.
>
> virtio_iommu_mr() is the top helper that allows to retrieve
> the IOMMU memory region from the requester ID.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>
> ---
> v11 -> v12:
> - add the iommu_find_iommu_pcibus() mechanics. Without it,
> when attaching t device to a domain we could not check
> the device is effectively protected by this IOMMU
Sorry I probably lost the context again after read the previous
version... Could you hint me what does this used for?
In all cases, I see that virtio_iommu_mr() is introduced but not used.
Would be good to put it into the patch where it's firstly used.
Thanks,
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-13 19:53 ` Peter Xu
@ 2020-01-14 8:37 ` Auger Eric
2020-01-14 18:49 ` Peter Xu
0 siblings, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-01-14 8:37 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/13/20 8:53 PM, Peter Xu wrote:
> On Thu, Jan 09, 2020 at 03:43:10PM +0100, Eric Auger wrote:
>> Implement a callback called on PCI bus enumeration that
>> initializes for a given device on the bus hierarchy
>> an IOMMU memory region. The PCI bus hierarchy is stored
>> locally in IOMMUPciBus and IOMMUDevice objects.
>>
>> At the time of the enumeration, the bus number may not be
>> computed yet.
>>
>> So operations that will need to retrieve the IOMMUdevice
>> and its IOMMU memory region from the bus number and devfn,
>> once the bus number is garanteed to be frozen,
>> use an array of IOMMUPciBus, lazily populated.
>>
>> virtio_iommu_mr() is the top helper that allows to retrieve
>> the IOMMU memory region from the requester ID.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>
>> ---
>> v11 -> v12:
>> - add the iommu_find_iommu_pcibus() mechanics. Without it,
>> when attaching t device to a domain we could not check
>> the device is effectively protected by this IOMMU
>
> Sorry I probably lost the context again after read the previous
> version... Could you hint me what does this used for?
In v11 Jean pointed out that as_by_bus_num was not used in my series. I
first planned to remove it and then noticed that it could be useful to
test on "attach" whether the RID of the device effectively corresponds
to a device protected by the IOMMU and in the negative, return an error.
In https://patchwork.kernel.org/patch/11258269/#23067995
This is the same mechanics used in intel_iommu/smmu.
>
> In all cases, I see that virtio_iommu_mr() is introduced but not used.
> Would be good to put it into the patch where it's firstly used.
OK fair enough, I will put the helper in the same patch as the user as
you have requested that since the beginning ;-) The resulting patch may
be huge. Just hope nobody will request me to split it back ;-)
Thanks
Eric
>
> Thanks,
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-14 8:37 ` Auger Eric
@ 2020-01-14 18:49 ` Peter Xu
0 siblings, 0 replies; 39+ messages in thread
From: Peter Xu @ 2020-01-14 18:49 UTC (permalink / raw)
To: Auger Eric
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Tue, Jan 14, 2020 at 09:37:51AM +0100, Auger Eric wrote:
> > In all cases, I see that virtio_iommu_mr() is introduced but not used.
> > Would be good to put it into the patch where it's firstly used.
> OK fair enough, I will put the helper in the same patch as the user as
> you have requested that since the beginning ;-) The resulting patch may
> be huge. Just hope nobody will request me to split it back ;-)
We can wait for a few more days and see whether someone will have a
different answer before you start to squash things. :)
In all cases, I think we should get rid of the compiler trick at
least.
Thanks,
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-09 14:43 ` [PATCH v12 04/13] virtio-iommu: Add the iommu regions Eric Auger
2020-01-13 19:53 ` Peter Xu
@ 2020-01-13 20:06 ` Peter Xu
2020-01-14 8:48 ` Auger Eric
1 sibling, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-13 20:06 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:10PM +0100, Eric Auger wrote:
> +/**
> + * The bus number is used for lookup when SID based operations occur.
> + * In that case we lazily populate the IOMMUPciBus array from the bus hash
> + * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
> + * numbers may not be always initialized yet.
> + */
> +static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
> +{
> + IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
> +
> + if (!iommu_pci_bus) {
> + GHashTableIter iter;
> +
> + g_hash_table_iter_init(&iter, s->as_by_busptr);
> + while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
> + if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
> + s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
> + return iommu_pci_bus;
> + }
> + }
Btw, we may need to:
return NULL;
here.
> + }
> + return iommu_pci_bus;
> +}
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 04/13] virtio-iommu: Add the iommu regions
2020-01-13 20:06 ` Peter Xu
@ 2020-01-14 8:48 ` Auger Eric
0 siblings, 0 replies; 39+ messages in thread
From: Auger Eric @ 2020-01-14 8:48 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/13/20 9:06 PM, Peter Xu wrote:
> On Thu, Jan 09, 2020 at 03:43:10PM +0100, Eric Auger wrote:
>> +/**
>> + * The bus number is used for lookup when SID based operations occur.
>> + * In that case we lazily populate the IOMMUPciBus array from the bus hash
>> + * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
>> + * numbers may not be always initialized yet.
>> + */
>> +static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
>> +{
>> + IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
>> +
>> + if (!iommu_pci_bus) {
>> + GHashTableIter iter;
>> +
>> + g_hash_table_iter_init(&iter, s->as_by_busptr);
>> + while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
>> + if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
>> + s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
>> + return iommu_pci_bus;
>> + }
>> + }
>
> Btw, we may need to:
>
> return NULL;
Yes. By the way Yi's patch "intel_iommu: a fix to
vtd_find_as_from_bus_num()" also applies to SMMU code. I will send a patch.
Thanks
Eric
>
> here.
>
>> + }
>> + return iommu_pci_bus;
>> +}
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (3 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 04/13] virtio-iommu: Add the iommu regions Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-13 20:23 ` Peter Xu
2020-01-09 14:43 ` [PATCH v12 06/13] virtio-iommu: Implement attach/detach command Eric Auger
` (8 subsequent siblings)
13 siblings, 1 reply; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch introduce domain and endpoint internal
datatypes. Both are stored in RB trees. The domain
owns a list of endpoints attached to it.
Helpers to get/put end points and domains are introduced.
get() helpers will become static in subsequent patches.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- s/uint/guint (docker-mingw@fedora build test failure)
- s/viommu_/VirtIOIOMMU + CamelCase
- Before creating an endpoint, check the sid is one from a
device protected by the iommu, otherwise returns NULL
- add assert of endpoint put to check the endpoint was detached
from any domain
- destroy and re-allocate the trees on reset
v10 -> v11:
- fixed interval_cmp (<= -> < and >= -> >)
- removed unused viommu field from endpoint
- removed Bharat's R-b
v9 -> v10:
- added Bharat's R-b
v6 -> v7:
- on virtio_iommu_find_add_as the bus number computation may
not be finalized yet so we cannot register the EPs at that time.
Hence, let's remove the get_endpoint and also do not use the
bus number for building the memory region name string (only
used for debug though).
v4 -> v5:
- initialize as->endpoint_list
v3 -> v4:
- new separate patch
---
hw/virtio/trace-events | 4 ++
hw/virtio/virtio-iommu.c | 128 ++++++++++++++++++++++++++++++++++++++-
2 files changed, 130 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 10a2b120f3..15595f8cd7 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -66,3 +66,7 @@ virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uin
virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
virtio_iommu_translate(const char *name, uint32_t rid, uint64_t iova, int flag) "mr=%s rid=%d addr=0x%"PRIx64" flag=%d"
virtio_iommu_init_iommu_mr(char *iommu_mr) "init %s"
+virtio_iommu_get_endpoint(uint32_t ep_id) "Alloc endpoint=%d"
+virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d"
+virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
+virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index acc939f334..6a03c3d7ae 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -32,10 +32,29 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-iommu.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci.h"
/* Max size */
#define VIOMMU_DEFAULT_QUEUE_SIZE 256
+typedef struct VirtIOIOMMUDomain {
+ uint32_t id;
+ GTree *mappings;
+ QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
+} VirtIOIOMMUDomain;
+
+typedef struct VirtIOIOMMUEndpoint {
+ uint32_t id;
+ VirtIOIOMMUDomain *domain;
+ QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
+} VirtIOIOMMUEndpoint;
+
+typedef struct VirtIOIOMMUInterval {
+ uint64_t low;
+ uint64_t high;
+} VirtIOIOMMUInterval;
+
static inline uint16_t virtio_iommu_get_sid(IOMMUDevice *dev)
{
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
@@ -65,8 +84,7 @@ static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
return iommu_pci_bus;
}
-IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid);
-IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
+static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
{
uint8_t bus_n, devfn;
IOMMUPciBus *iommu_pci_bus;
@@ -84,6 +102,88 @@ IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
return NULL;
}
+static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
+{
+ VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
+ VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
+
+ if (inta->high < intb->low) {
+ return -1;
+ } else if (intb->high < inta->low) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
+{
+ QLIST_REMOVE(ep, next);
+ ep->domain = NULL;
+}
+
+VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id);
+VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id)
+{
+ VirtIOIOMMUEndpoint *ep;
+
+ ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
+ if (ep) {
+ return ep;
+ }
+ if (!virtio_iommu_mr(s, ep_id)) {
+ return NULL;
+ }
+ ep = g_malloc0(sizeof(*ep));
+ ep->id = ep_id;
+ trace_virtio_iommu_get_endpoint(ep_id);
+ g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
+ return ep;
+}
+
+static void virtio_iommu_put_endpoint(gpointer data)
+{
+ VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
+
+ assert(!ep->domain);
+
+ trace_virtio_iommu_put_endpoint(ep->id);
+ g_free(ep);
+}
+
+VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, uint32_t domain_id);
+VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, uint32_t domain_id)
+{
+ VirtIOIOMMUDomain *domain;
+
+ domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
+ if (domain) {
+ return domain;
+ }
+ domain = g_malloc0(sizeof(*domain));
+ domain->id = domain_id;
+ domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
+ NULL, (GDestroyNotify)g_free,
+ (GDestroyNotify)g_free);
+ g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
+ QLIST_INIT(&domain->endpoint_list);
+ trace_virtio_iommu_get_domain(domain_id);
+ return domain;
+}
+
+static void virtio_iommu_put_domain(gpointer data)
+{
+ VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
+ VirtIOIOMMUEndpoint *iter, *tmp;
+
+ QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
+ virtio_iommu_detach_endpoint_from_domain(iter);
+ }
+ g_tree_destroy(domain->mappings);
+ trace_virtio_iommu_put_domain(domain->id);
+ g_free(domain);
+}
+
static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
int devfn)
{
@@ -328,6 +428,13 @@ static const VMStateDescription vmstate_virtio_iommu_device = {
.unmigratable = 1,
};
+static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
+{
+ guint ua = GPOINTER_TO_UINT(a);
+ guint ub = GPOINTER_TO_UINT(b);
+ return (ua > ub) - (ua < ub);
+}
+
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -369,13 +476,30 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
static void virtio_iommu_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
+
+ g_tree_destroy(s->domains);
+ g_tree_destroy(s->endpoints);
virtio_cleanup(vdev);
}
static void virtio_iommu_device_reset(VirtIODevice *vdev)
{
+ VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
+
trace_virtio_iommu_device_reset();
+
+ if (s->domains) {
+ g_tree_destroy(s->domains);
+ }
+ if (s->endpoints) {
+ g_tree_destroy(s->endpoints);
+ }
+ s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
+ NULL, NULL, virtio_iommu_put_domain);
+ s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
+ NULL, NULL, virtio_iommu_put_endpoint);
}
static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-09 14:43 ` [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers Eric Auger
@ 2020-01-13 20:23 ` Peter Xu
2020-01-14 8:51 ` Auger Eric
0 siblings, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-13 20:23 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:11PM +0100, Eric Auger wrote:
[...]
> +VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id);
> +VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id)
Is the extra definition trying to workaround the compiler
warning/error?
I'm not sure whether it's only me who prefer this, but again I'd
really perfer we move the function into the caller patch, add "static"
as needed altogether, even if that patch can be big.
> +{
> + VirtIOIOMMUEndpoint *ep;
> +
> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
> + if (ep) {
> + return ep;
> + }
> + if (!virtio_iommu_mr(s, ep_id)) {
Could I ask when this will trigger?
> + return NULL;
> + }
> + ep = g_malloc0(sizeof(*ep));
> + ep->id = ep_id;
> + trace_virtio_iommu_get_endpoint(ep_id);
> + g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
> + return ep;
> +}
Thanks,
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-13 20:23 ` Peter Xu
@ 2020-01-14 8:51 ` Auger Eric
2020-01-14 18:07 ` Peter Xu
0 siblings, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-01-14 8:51 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/13/20 9:23 PM, Peter Xu wrote:
> On Thu, Jan 09, 2020 at 03:43:11PM +0100, Eric Auger wrote:
>
> [...]
>
>> +VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id);
>> +VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id)
>
> Is the extra definition trying to workaround the compiler
> warning/error?
yes it does
>
> I'm not sure whether it's only me who prefer this, but again I'd
> really perfer we move the function into the caller patch, add "static"
> as needed altogether, even if that patch can be big.
OK I will do that.
>
>> +{
>> + VirtIOIOMMUEndpoint *ep;
>> +
>> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
>> + if (ep) {
>> + return ep;
>> + }
>> + if (!virtio_iommu_mr(s, ep_id)) {
>
> Could I ask when this will trigger?
This can happen when a device is attached to a domain and its RID does
not correspond to one of the devices protected by the iommu.
Thanks
Eric
>
>> + return NULL;
>> + }
>> + ep = g_malloc0(sizeof(*ep));
>> + ep->id = ep_id;
>> + trace_virtio_iommu_get_endpoint(ep_id);
>> + g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
>> + return ep;
>> +}
>
> Thanks,
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-14 8:51 ` Auger Eric
@ 2020-01-14 18:07 ` Peter Xu
2020-01-15 13:00 ` Auger Eric
2020-01-15 13:15 ` Auger Eric
0 siblings, 2 replies; 39+ messages in thread
From: Peter Xu @ 2020-01-14 18:07 UTC (permalink / raw)
To: Auger Eric
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Tue, Jan 14, 2020 at 09:51:59AM +0100, Auger Eric wrote:
> Hi Peter,
Hi, Eric,
[...]
> >
> >> +{
> >> + VirtIOIOMMUEndpoint *ep;
> >> +
> >> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
> >> + if (ep) {
> >> + return ep;
> >> + }
> >> + if (!virtio_iommu_mr(s, ep_id)) {
> >
> > Could I ask when this will trigger?
>
> This can happen when a device is attached to a domain and its RID does
> not correspond to one of the devices protected by the iommu.
So will it happen only because of a kernel driver bug?
Also, I think the name of "virtio_iommu_mr" is confusing on that it
returned explicitly a MemoryRegion however it's never been used:
(since they're not in the same patch I'm pasting)
static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
{
uint8_t bus_n, devfn;
IOMMUPciBus *iommu_pci_bus;
IOMMUDevice *dev;
bus_n = PCI_BUS_NUM(sid);
iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
if (iommu_pci_bus) {
devfn = sid & 0xFF;
dev = iommu_pci_bus->pbdev[devfn];
if (dev) {
return &dev->iommu_mr;
}
}
return NULL;
}
Maybe "return !!dev" would be enough, then make the return a boolean?
Then we can rename it to virtio_iommu_has_device().
PS. I think we can also drop IOMMU_PCI_DEVFN_MAX (after all you even
didn't use it here!) and use PCI_DEVFN_MAX, and replace 0xFF.
Thanks,
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-14 18:07 ` Peter Xu
@ 2020-01-15 13:00 ` Auger Eric
2020-01-15 14:59 ` Peter Xu
2020-01-15 13:15 ` Auger Eric
1 sibling, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-01-15 13:00 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/14/20 7:07 PM, Peter Xu wrote:
> On Tue, Jan 14, 2020 at 09:51:59AM +0100, Auger Eric wrote:
>> Hi Peter,
>
> Hi, Eric,
>
> [...]
>
>>>
>>>> +{
>>>> + VirtIOIOMMUEndpoint *ep;
>>>> +
>>>> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
>>>> + if (ep) {
>>>> + return ep;
>>>> + }
>>>> + if (!virtio_iommu_mr(s, ep_id)) {
>>>
>>> Could I ask when this will trigger?
>>
>> This can happen when a device is attached to a domain and its RID does
>> not correspond to one of the devices protected by the iommu.
>
> So will it happen only because of a kernel driver bug?
Yes, at the moment, because virtio_iommu_mr() only gets called on device
attach to a domain.
The spec says:
"If the endpoint identified by endpoint doesn’t exist, the device MUST
reject the request and set status to VIRTIO_IOMMU_S_NOENT"
>
> Also, I think the name of "virtio_iommu_mr" is confusing on that it
> returned explicitly a MemoryRegion however it's never been used:
I use the same prototype as for smmu_iommu_mr(). Returning the iommu mr
will allow to proceed with further RID based operations like invalidations.
The same logic is used in vtd_context_device_invalidate.
>
> (since they're not in the same patch I'm pasting)
>
> static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
> {
> uint8_t bus_n, devfn;
> IOMMUPciBus *iommu_pci_bus;
> IOMMUDevice *dev;
>
> bus_n = PCI_BUS_NUM(sid);
> iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
> if (iommu_pci_bus) {
> devfn = sid & 0xFF;
> dev = iommu_pci_bus->pbdev[devfn];
> if (dev) {
> return &dev->iommu_mr;
> }
> }
> return NULL;
> }
>
> Maybe "return !!dev" would be enough, then make the return a boolean?
> Then we can rename it to virtio_iommu_has_device().
>
> PS. I think we can also drop IOMMU_PCI_DEVFN_MAX (after all you even
> didn't use it here!) and use PCI_DEVFN_MAX, and replace 0xFF.
well intel iommu and smmu use a similar constant (PCI_DEVFN_MAX,
SMMU_PCI_DEVFN_MAX resp.). I use it in virtio_iommu_find_add_as
Thanks
Eric
>
> Thanks,
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-15 13:00 ` Auger Eric
@ 2020-01-15 14:59 ` Peter Xu
2020-01-15 16:55 ` Auger Eric
0 siblings, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-15 14:59 UTC (permalink / raw)
To: Auger Eric
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Wed, Jan 15, 2020 at 02:00:22PM +0100, Auger Eric wrote:
> Hi Peter,
>
>
> On 1/14/20 7:07 PM, Peter Xu wrote:
> > On Tue, Jan 14, 2020 at 09:51:59AM +0100, Auger Eric wrote:
> >> Hi Peter,
> >
> > Hi, Eric,
> >
> > [...]
> >
> >>>
> >>>> +{
> >>>> + VirtIOIOMMUEndpoint *ep;
> >>>> +
> >>>> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
> >>>> + if (ep) {
> >>>> + return ep;
> >>>> + }
> >>>> + if (!virtio_iommu_mr(s, ep_id)) {
> >>>
> >>> Could I ask when this will trigger?
> >>
> >> This can happen when a device is attached to a domain and its RID does
> >> not correspond to one of the devices protected by the iommu.
>
> >
> > So will it happen only because of a kernel driver bug?
>
> Yes, at the moment, because virtio_iommu_mr() only gets called on device
> attach to a domain.
>
> The spec says:
> "If the endpoint identified by endpoint doesn’t exist, the device MUST
> reject the request and set status to VIRTIO_IOMMU_S_NOENT"
Sure. I just wanted to make sure I didn't miss anything, because I
really can't see when this extra logic can help, say, right now we
only have one vIOMMU at least for VT-d, so all devices will be managed
by that. But yeah if that's explicitly mentioned in the spec, I'd
agree we should follow that.
> >
> > Also, I think the name of "virtio_iommu_mr" is confusing on that it
> > returned explicitly a MemoryRegion however it's never been used:
>
> I use the same prototype as for smmu_iommu_mr(). Returning the iommu mr
> will allow to proceed with further RID based operations like invalidations.
>
> The same logic is used in vtd_context_device_invalidate.
I'm fine with this. Let's keep virtio_iommu_mr() as you prefer.
Another thing I'd like to mention is that, I don't think "the same
logic is used in VT-d" matters much. If we think something is wrong
(even if it's the same in VT-d), why not we fix both? :-)
Thanks,
>
>
> >
> > (since they're not in the same patch I'm pasting)
> >
> > static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
> > {
> > uint8_t bus_n, devfn;
> > IOMMUPciBus *iommu_pci_bus;
> > IOMMUDevice *dev;
> >
> > bus_n = PCI_BUS_NUM(sid);
> > iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
> > if (iommu_pci_bus) {
> > devfn = sid & 0xFF;
> > dev = iommu_pci_bus->pbdev[devfn];
> > if (dev) {
> > return &dev->iommu_mr;
> > }
> > }
> > return NULL;
> > }
> >
> > Maybe "return !!dev" would be enough, then make the return a boolean?
> > Then we can rename it to virtio_iommu_has_device().
> >
> > PS. I think we can also drop IOMMU_PCI_DEVFN_MAX (after all you even
> > didn't use it here!) and use PCI_DEVFN_MAX, and replace 0xFF.
> well intel iommu and smmu use a similar constant (PCI_DEVFN_MAX,
> SMMU_PCI_DEVFN_MAX resp.). I use it in virtio_iommu_find_add_as
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-15 14:59 ` Peter Xu
@ 2020-01-15 16:55 ` Auger Eric
0 siblings, 0 replies; 39+ messages in thread
From: Auger Eric @ 2020-01-15 16:55 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/15/20 3:59 PM, Peter Xu wrote:
> On Wed, Jan 15, 2020 at 02:00:22PM +0100, Auger Eric wrote:
>> Hi Peter,
>>
>>
>> On 1/14/20 7:07 PM, Peter Xu wrote:
>>> On Tue, Jan 14, 2020 at 09:51:59AM +0100, Auger Eric wrote:
>>>> Hi Peter,
>>>
>>> Hi, Eric,
>>>
>>> [...]
>>>
>>>>>
>>>>>> +{
>>>>>> + VirtIOIOMMUEndpoint *ep;
>>>>>> +
>>>>>> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
>>>>>> + if (ep) {
>>>>>> + return ep;
>>>>>> + }
>>>>>> + if (!virtio_iommu_mr(s, ep_id)) {
>>>>>
>>>>> Could I ask when this will trigger?
>>>>
>>>> This can happen when a device is attached to a domain and its RID does
>>>> not correspond to one of the devices protected by the iommu.
>>
>>>
>>> So will it happen only because of a kernel driver bug?
>>
>> Yes, at the moment, because virtio_iommu_mr() only gets called on device
>> attach to a domain.
>>
>> The spec says:
>> "If the endpoint identified by endpoint doesn’t exist, the device MUST
>> reject the request and set status to VIRTIO_IOMMU_S_NOENT"
>
> Sure. I just wanted to make sure I didn't miss anything, because I
> really can't see when this extra logic can help, say, right now we
> only have one vIOMMU at least for VT-d, so all devices will be managed
> by that. But yeah if that's explicitly mentioned in the spec, I'd
> agree we should follow that.
>
>>>
>>> Also, I think the name of "virtio_iommu_mr" is confusing on that it
>>> returned explicitly a MemoryRegion however it's never been used:
>>
>> I use the same prototype as for smmu_iommu_mr(). Returning the iommu mr
>> will allow to proceed with further RID based operations like invalidations.
>>
>> The same logic is used in vtd_context_device_invalidate.
>
> I'm fine with this. Let's keep virtio_iommu_mr() as you prefer.
>
> Another thing I'd like to mention is that, I don't think "the same
> logic is used in VT-d" matters much. If we think something is wrong
> (even if it's the same in VT-d), why not we fix both? :-)
Sure ;-)
Thank you for your time
Eric
>
> Thanks,
>
>>
>>
>>>
>>> (since they're not in the same patch I'm pasting)
>>>
>>> static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
>>> {
>>> uint8_t bus_n, devfn;
>>> IOMMUPciBus *iommu_pci_bus;
>>> IOMMUDevice *dev;
>>>
>>> bus_n = PCI_BUS_NUM(sid);
>>> iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
>>> if (iommu_pci_bus) {
>>> devfn = sid & 0xFF;
>>> dev = iommu_pci_bus->pbdev[devfn];
>>> if (dev) {
>>> return &dev->iommu_mr;
>>> }
>>> }
>>> return NULL;
>>> }
>>>
>>> Maybe "return !!dev" would be enough, then make the return a boolean?
>>> Then we can rename it to virtio_iommu_has_device().
>>>
>>> PS. I think we can also drop IOMMU_PCI_DEVFN_MAX (after all you even
>>> didn't use it here!) and use PCI_DEVFN_MAX, and replace 0xFF.
>> well intel iommu and smmu use a similar constant (PCI_DEVFN_MAX,
>> SMMU_PCI_DEVFN_MAX resp.). I use it in virtio_iommu_find_add_as
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers
2020-01-14 18:07 ` Peter Xu
2020-01-15 13:00 ` Auger Eric
@ 2020-01-15 13:15 ` Auger Eric
1 sibling, 0 replies; 39+ messages in thread
From: Auger Eric @ 2020-01-15 13:15 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/14/20 7:07 PM, Peter Xu wrote:
> On Tue, Jan 14, 2020 at 09:51:59AM +0100, Auger Eric wrote:
>> Hi Peter,
>
> Hi, Eric,
>
> [...]
>
>>>
>>>> +{
>>>> + VirtIOIOMMUEndpoint *ep;
>>>> +
>>>> + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
>>>> + if (ep) {
>>>> + return ep;
>>>> + }
>>>> + if (!virtio_iommu_mr(s, ep_id)) {
>>>
>>> Could I ask when this will trigger?
>>
>> This can happen when a device is attached to a domain and its RID does
>> not correspond to one of the devices protected by the iommu.
>
> So will it happen only because of a kernel driver bug?
>
> Also, I think the name of "virtio_iommu_mr" is confusing on that it
> returned explicitly a MemoryRegion however it's never been used:
>
> (since they're not in the same patch I'm pasting)
>
> static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
> {
> uint8_t bus_n, devfn;
> IOMMUPciBus *iommu_pci_bus;
> IOMMUDevice *dev;
>
> bus_n = PCI_BUS_NUM(sid);
> iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
> if (iommu_pci_bus) {
> devfn = sid & 0xFF;
> dev = iommu_pci_bus->pbdev[devfn];
> if (dev) {
> return &dev->iommu_mr;
> }
> }
> return NULL;
> }
>
> Maybe "return !!dev" would be enough, then make the return a boolean?
> Then we can rename it to virtio_iommu_has_device().
>
> PS. I think we can also drop IOMMU_PCI_DEVFN_MAX (after all you even
> didn't use it here!) and use PCI_DEVFN_MAX, and replace 0xFF.
Oh yes I can use PCI_DEVFN_MAX directly. Sorry.
Eric
>
> Thanks,
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 06/13] virtio-iommu: Implement attach/detach command
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (4 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 05/13] virtio-iommu: Endpoint and domains structs and helpers Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 07/13] virtio-iommu: Implement map/unmap Eric Auger
` (7 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch implements the endpoint attach/detach to/from
a domain.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- check the device is protected by the iommu on attach
- on detach, check the domain id the device is attached to matches
the one used in the detach command
- fix mapping ref counter and destroy the domain when no end-points
are attached anymore
---
hw/virtio/virtio-iommu.c | 65 +++++++++++++++++++++++++++++++++++-----
1 file changed, 58 insertions(+), 7 deletions(-)
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 6a03c3d7ae..63b2d30f28 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -119,11 +119,12 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
{
QLIST_REMOVE(ep, next);
+ g_tree_unref(ep->domain->mappings);
ep->domain = NULL;
}
-VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id);
-VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, uint32_t ep_id)
+static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
+ uint32_t ep_id)
{
VirtIOIOMMUEndpoint *ep;
@@ -151,8 +152,8 @@ static void virtio_iommu_put_endpoint(gpointer data)
g_free(ep);
}
-VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, uint32_t domain_id);
-VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, uint32_t domain_id)
+static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
+ uint32_t domain_id)
{
VirtIOIOMMUDomain *domain;
@@ -179,7 +180,6 @@ static void virtio_iommu_put_domain(gpointer data)
QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
virtio_iommu_detach_endpoint_from_domain(iter);
}
- g_tree_destroy(domain->mappings);
trace_virtio_iommu_put_domain(domain->id);
g_free(domain);
}
@@ -228,10 +228,37 @@ static int virtio_iommu_attach(VirtIOIOMMU *s,
{
uint32_t domain_id = le32_to_cpu(req->domain);
uint32_t ep_id = le32_to_cpu(req->endpoint);
+ VirtIOIOMMUDomain *domain;
+ VirtIOIOMMUEndpoint *ep;
trace_virtio_iommu_attach(domain_id, ep_id);
- return VIRTIO_IOMMU_S_UNSUPP;
+ ep = virtio_iommu_get_endpoint(s, ep_id);
+ if (!ep) {
+ return VIRTIO_IOMMU_S_NOENT;
+ }
+
+ if (ep->domain) {
+ VirtIOIOMMUDomain *previous_domain = ep->domain;
+ /*
+ * the device is already attached to a domain,
+ * detach it first
+ */
+ virtio_iommu_detach_endpoint_from_domain(ep);
+ if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
+ g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
+ }
+ }
+
+ domain = virtio_iommu_get_domain(s, domain_id);
+ if (!QLIST_EMPTY(&domain->endpoint_list)) {
+ g_tree_ref(domain->mappings);
+ }
+ QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
+
+ ep->domain = domain;
+
+ return VIRTIO_IOMMU_S_OK;
}
static int virtio_iommu_detach(VirtIOIOMMU *s,
@@ -239,10 +266,34 @@ static int virtio_iommu_detach(VirtIOIOMMU *s,
{
uint32_t domain_id = le32_to_cpu(req->domain);
uint32_t ep_id = le32_to_cpu(req->endpoint);
+ VirtIOIOMMUDomain *domain;
+ VirtIOIOMMUEndpoint *ep;
trace_virtio_iommu_detach(domain_id, ep_id);
- return VIRTIO_IOMMU_S_UNSUPP;
+ ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
+ if (!ep) {
+ return VIRTIO_IOMMU_S_NOENT;
+ }
+
+ domain = ep->domain;
+
+ if (!domain || domain->id != domain_id) {
+ return VIRTIO_IOMMU_S_INVAL;
+ }
+
+ virtio_iommu_detach_endpoint_from_domain(ep);
+
+ /*
+ * when the last EP is detached, simply remove the domain for
+ * the domain list and destroy it. Note its mappings were already
+ * freed by the ref count mechanism. Next operation involving
+ * the same domain id will re-create one domain object.
+ */
+ if (QLIST_EMPTY(&domain->endpoint_list)) {
+ g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
+ }
+ return VIRTIO_IOMMU_S_OK;
}
static int virtio_iommu_map(VirtIOIOMMU *s,
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 07/13] virtio-iommu: Implement map/unmap
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (5 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 06/13] virtio-iommu: Implement attach/detach command Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-14 18:55 ` Peter Xu
2020-01-09 14:43 ` [PATCH v12 08/13] virtio-iommu: Implement translate Eric Auger
` (6 subsequent siblings)
13 siblings, 1 reply; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch implements virtio_iommu_map/unmap.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- check unmanaged managed flags on map
- fix leak
v10 -> v11:
- revisit the implementation of unmap according to Peter's suggestion
- removed virt_addr and size from viommu_mapping struct
- use g_tree_lookup_extended()
- return VIRTIO_IOMMU_S_RANGE in case a mapping were
to be split on unmap (instead of INVAL)
v5 -> v6:
- use new v0.6 fields
- replace error_report by qemu_log_mask
v3 -> v4:
- implement unmap semantics as specified in v0.4
---
hw/virtio/trace-events | 1 +
hw/virtio/virtio-iommu.c | 69 ++++++++++++++++++++++++++++++++++++++--
2 files changed, 68 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 15595f8cd7..22162d6583 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -64,6 +64,7 @@ virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d"
virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d"
virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
+virtio_iommu_unmap_done(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64
virtio_iommu_translate(const char *name, uint32_t rid, uint64_t iova, int flag) "mr=%s rid=%d addr=0x%"PRIx64" flag=%d"
virtio_iommu_init_iommu_mr(char *iommu_mr) "init %s"
virtio_iommu_get_endpoint(uint32_t ep_id) "Alloc endpoint=%d"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 63b2d30f28..6df6ad98f6 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "qemu/iov.h"
#include "qemu-common.h"
#include "hw/qdev-properties.h"
@@ -55,6 +56,11 @@ typedef struct VirtIOIOMMUInterval {
uint64_t high;
} VirtIOIOMMUInterval;
+typedef struct VirtIOIOMMUMapping {
+ uint64_t phys_addr;
+ uint32_t flags;
+} VirtIOIOMMUMapping;
+
static inline uint16_t virtio_iommu_get_sid(IOMMUDevice *dev)
{
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
@@ -304,10 +310,39 @@ static int virtio_iommu_map(VirtIOIOMMU *s,
uint64_t virt_start = le64_to_cpu(req->virt_start);
uint64_t virt_end = le64_to_cpu(req->virt_end);
uint32_t flags = le32_to_cpu(req->flags);
+ VirtIOIOMMUDomain *domain;
+ VirtIOIOMMUInterval *interval;
+ VirtIOIOMMUMapping *mapping;
+
+ if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
+ return VIRTIO_IOMMU_S_INVAL;
+ }
+
+ domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
+ if (!domain) {
+ return VIRTIO_IOMMU_S_NOENT;
+ }
+
+ interval = g_malloc0(sizeof(*interval));
+
+ interval->low = virt_start;
+ interval->high = virt_end;
+
+ mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
+ if (mapping) {
+ g_free(interval);
+ return VIRTIO_IOMMU_S_INVAL;
+ }
trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
- return VIRTIO_IOMMU_S_UNSUPP;
+ mapping = g_malloc0(sizeof(*mapping));
+ mapping->phys_addr = phys_start;
+ mapping->flags = flags;
+
+ g_tree_insert(domain->mappings, interval, mapping);
+
+ return VIRTIO_IOMMU_S_OK;
}
static int virtio_iommu_unmap(VirtIOIOMMU *s,
@@ -316,10 +351,40 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s,
uint32_t domain_id = le32_to_cpu(req->domain);
uint64_t virt_start = le64_to_cpu(req->virt_start);
uint64_t virt_end = le64_to_cpu(req->virt_end);
+ VirtIOIOMMUMapping *iter_val;
+ VirtIOIOMMUInterval interval, *iter_key;
+ VirtIOIOMMUDomain *domain;
+ int ret = VIRTIO_IOMMU_S_OK;
trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
- return VIRTIO_IOMMU_S_UNSUPP;
+ domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
+ if (!domain) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no domain\n", __func__);
+ return VIRTIO_IOMMU_S_NOENT;
+ }
+ interval.low = virt_start;
+ interval.high = virt_end;
+
+ while (g_tree_lookup_extended(domain->mappings, &interval,
+ (void **)&iter_key, (void**)&iter_val)) {
+ uint64_t current_low = iter_key->low;
+ uint64_t current_high = iter_key->high;
+
+ if (interval.low <= current_low && interval.high >= current_high) {
+ g_tree_remove(domain->mappings, iter_key);
+ trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: domain= %d Unmap [0x%"PRIx64",0x%"PRIx64"] forbidden as "
+ "it would split existing mapping [0x%"PRIx64", 0x%"PRIx64"]\n",
+ __func__, domain_id, interval.low, interval.high,
+ current_low, current_high);
+ ret = VIRTIO_IOMMU_S_RANGE;
+ break;
+ }
+ }
+ return ret;
}
static int virtio_iommu_iov_to_req(struct iovec *iov,
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 07/13] virtio-iommu: Implement map/unmap
2020-01-09 14:43 ` [PATCH v12 07/13] virtio-iommu: Implement map/unmap Eric Auger
@ 2020-01-14 18:55 ` Peter Xu
0 siblings, 0 replies; 39+ messages in thread
From: Peter Xu @ 2020-01-14 18:55 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:13PM +0100, Eric Auger wrote:
> This patch implements virtio_iommu_map/unmap.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 08/13] virtio-iommu: Implement translate
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (6 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 07/13] virtio-iommu: Implement map/unmap Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 09/13] virtio-iommu: Implement fault reporting Eric Auger
` (5 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch implements the translate callback
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v11 -> v12:
- Added Jean's R-b
- s/qemu_log_mask/error_report_once
v10 -> v11:
- take into account the new value struct and use
g_tree_lookup_extended
- switched to error_report_once
v6 -> v7:
- implemented bypass-mode
v5 -> v6:
- replace error_report by qemu_log_mask
v4 -> v5:
- check the device domain is not NULL
- s/printf/error_report
- set flags to IOMMU_NONE in case of all translation faults
---
hw/virtio/trace-events | 1 +
hw/virtio/virtio-iommu.c | 60 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 60 insertions(+), 1 deletion(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 22162d6583..095aa8b509 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -71,3 +71,4 @@ virtio_iommu_get_endpoint(uint32_t ep_id) "Alloc endpoint=%d"
virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d"
virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
+virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 6df6ad98f6..d192bcb505 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -482,19 +482,77 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
int iommu_idx)
{
IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
+ VirtIOIOMMUInterval interval, *mapping_key;
+ VirtIOIOMMUMapping *mapping_value;
+ VirtIOIOMMU *s = sdev->viommu;
+ VirtIOIOMMUEndpoint *ep;
+ bool bypass_allowed;
uint32_t sid;
+ bool found;
+
+ interval.low = addr;
+ interval.high = addr + 1;
IOMMUTLBEntry entry = {
.target_as = &address_space_memory,
.iova = addr,
.translated_addr = addr,
- .addr_mask = ~(hwaddr)0,
+ .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1,
.perm = IOMMU_NONE,
};
+ bypass_allowed = virtio_vdev_has_feature(&s->parent_obj,
+ VIRTIO_IOMMU_F_BYPASS);
+
sid = virtio_iommu_get_sid(sdev);
trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
+ qemu_mutex_lock(&s->mutex);
+
+ ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
+ if (!ep) {
+ if (!bypass_allowed) {
+ error_report_once("%s sid=%d is not known!!", __func__, sid);
+ } else {
+ entry.perm = flag;
+ }
+ goto unlock;
+ }
+
+ if (!ep->domain) {
+ if (!bypass_allowed) {
+ error_report_once("%s %02x:%02x.%01x not attached to any domain",
+ __func__, PCI_BUS_NUM(sid),
+ PCI_SLOT(sid), PCI_FUNC(sid));
+ } else {
+ entry.perm = flag;
+ }
+ goto unlock;
+ }
+
+ found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval),
+ (void **)&mapping_key,
+ (void **)&mapping_value);
+ if (!found) {
+ error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
+ __func__, addr, sid);
+ goto unlock;
+ }
+
+ if (((flag & IOMMU_RO) &&
+ !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ)) ||
+ ((flag & IOMMU_WO) &&
+ !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE))) {
+ error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
+ __func__, addr, flag, mapping_value->flags);
+ goto unlock;
+ }
+ entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
+ entry.perm = flag;
+ trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
+
+unlock:
+ qemu_mutex_unlock(&s->mutex);
return entry;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 09/13] virtio-iommu: Implement fault reporting
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (7 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 08/13] virtio-iommu: Implement translate Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-14 19:04 ` Peter Xu
2020-01-09 14:43 ` [PATCH v12 10/13] virtio-iommu-pci: Add virtio iommu pci support Eric Auger
` (4 subsequent siblings)
13 siblings, 1 reply; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
The event queue allows to report asynchronous errors.
The translate function now injects faults when relevant.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- reporting the addr associated with the fault and set the
VIRTIO_IOMMU_FAULT_F_ADDRESS flag.
- added cpu_to_le<n>
v10 -> v11:
- change a virtio_error into an error_report_once
(no buffer available for output faults)
---
hw/virtio/trace-events | 1 +
hw/virtio/virtio-iommu.c | 73 +++++++++++++++++++++++++++++++++++++---
2 files changed, 69 insertions(+), 5 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 095aa8b509..e83500bee9 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -72,3 +72,4 @@ virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d"
virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
+virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index d192bcb505..09193970ee 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -477,6 +477,51 @@ out:
}
}
+static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
+ int flags, uint32_t endpoint,
+ uint64_t address)
+{
+ VirtIODevice *vdev = &viommu->parent_obj;
+ VirtQueue *vq = viommu->event_vq;
+ struct virtio_iommu_fault fault;
+ VirtQueueElement *elem;
+ size_t sz;
+
+ memset(&fault, 0, sizeof(fault));
+ fault.reason = reason;
+ fault.flags = cpu_to_le32(flags);
+ fault.endpoint = cpu_to_le32(endpoint);
+ fault.address = cpu_to_le64(address);
+
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+
+ if (!elem) {
+ error_report_once(
+ "no buffer available in event queue to report event");
+ return;
+ }
+
+ if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
+ virtio_error(vdev, "error buffer of wrong size");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ continue;
+ }
+ break;
+ }
+ /* we have a buffer to fill in */
+ sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
+ &fault, sizeof(fault));
+ assert(sz == sizeof(fault));
+
+ trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
+ virtqueue_push(vq, elem, sz);
+ virtio_notify(vdev, vq);
+ g_free(elem);
+
+}
+
static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
IOMMUAccessFlags flag,
int iommu_idx)
@@ -485,9 +530,10 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
VirtIOIOMMUInterval interval, *mapping_key;
VirtIOIOMMUMapping *mapping_value;
VirtIOIOMMU *s = sdev->viommu;
+ bool read_fault, write_fault;
VirtIOIOMMUEndpoint *ep;
+ uint32_t sid, flags;
bool bypass_allowed;
- uint32_t sid;
bool found;
interval.low = addr;
@@ -513,6 +559,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
if (!ep) {
if (!bypass_allowed) {
error_report_once("%s sid=%d is not known!!", __func__, sid);
+ virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
+ VIRTIO_IOMMU_FAULT_F_ADDRESS,
+ sid, addr);
} else {
entry.perm = flag;
}
@@ -524,6 +573,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
error_report_once("%s %02x:%02x.%01x not attached to any domain",
__func__, PCI_BUS_NUM(sid),
PCI_SLOT(sid), PCI_FUNC(sid));
+ virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
+ VIRTIO_IOMMU_FAULT_F_ADDRESS,
+ sid, addr);
} else {
entry.perm = flag;
}
@@ -536,15 +588,26 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
if (!found) {
error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
__func__, addr, sid);
+ virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
+ VIRTIO_IOMMU_FAULT_F_ADDRESS,
+ sid, addr);
goto unlock;
}
- if (((flag & IOMMU_RO) &&
- !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ)) ||
- ((flag & IOMMU_WO) &&
- !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE))) {
+ read_fault = (flag & IOMMU_RO) &&
+ !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
+ write_fault = (flag & IOMMU_WO) &&
+ !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
+
+ flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
+ flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
+ if (flags) {
error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
__func__, addr, flag, mapping_value->flags);
+ flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
+ virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
+ flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
+ sid, addr);
goto unlock;
}
entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 09/13] virtio-iommu: Implement fault reporting
2020-01-09 14:43 ` [PATCH v12 09/13] virtio-iommu: Implement fault reporting Eric Auger
@ 2020-01-14 19:04 ` Peter Xu
2020-01-15 13:12 ` Auger Eric
0 siblings, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-14 19:04 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:15PM +0100, Eric Auger wrote:
> The event queue allows to report asynchronous errors.
> The translate function now injects faults when relevant.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>
> ---
>
> v11 -> v12:
> - reporting the addr associated with the fault and set the
> VIRTIO_IOMMU_FAULT_F_ADDRESS flag.
> - added cpu_to_le<n>
>
> v10 -> v11:
> - change a virtio_error into an error_report_once
> (no buffer available for output faults)
> ---
> hw/virtio/trace-events | 1 +
> hw/virtio/virtio-iommu.c | 73 +++++++++++++++++++++++++++++++++++++---
> 2 files changed, 69 insertions(+), 5 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 095aa8b509..e83500bee9 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -72,3 +72,4 @@ virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d"
> virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
> virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
> virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
> +virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64
> diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
> index d192bcb505..09193970ee 100644
> --- a/hw/virtio/virtio-iommu.c
> +++ b/hw/virtio/virtio-iommu.c
> @@ -477,6 +477,51 @@ out:
> }
> }
>
> +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
> + int flags, uint32_t endpoint,
> + uint64_t address)
> +{
> + VirtIODevice *vdev = &viommu->parent_obj;
> + VirtQueue *vq = viommu->event_vq;
> + struct virtio_iommu_fault fault;
> + VirtQueueElement *elem;
> + size_t sz;
> +
> + memset(&fault, 0, sizeof(fault));
> + fault.reason = reason;
> + fault.flags = cpu_to_le32(flags);
> + fault.endpoint = cpu_to_le32(endpoint);
> + fault.address = cpu_to_le64(address);
> +
> + for (;;) {
> + elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
> +
> + if (!elem) {
> + error_report_once(
> + "no buffer available in event queue to report event");
(Should this also be a guest issue? IIRC you are still using a
mixture of both qemu_log_mask and error_report()... I'll stop
commenting on this, assuming that you prefer both ways to be used...)
> + return;
> + }
> +
> + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
> + virtio_error(vdev, "error buffer of wrong size");
> + virtqueue_detach_element(vq, elem, 0);
> + g_free(elem);
> + continue;
If virtio_error(), should we stop rather than continue?
> + }
> + break;
> + }
> + /* we have a buffer to fill in */
> + sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
> + &fault, sizeof(fault));
> + assert(sz == sizeof(fault));
> +
> + trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
> + virtqueue_push(vq, elem, sz);
> + virtio_notify(vdev, vq);
> + g_free(elem);
> +
> +}
> +
> static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
> IOMMUAccessFlags flag,
> int iommu_idx)
> @@ -485,9 +530,10 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
> VirtIOIOMMUInterval interval, *mapping_key;
> VirtIOIOMMUMapping *mapping_value;
> VirtIOIOMMU *s = sdev->viommu;
> + bool read_fault, write_fault;
> VirtIOIOMMUEndpoint *ep;
> + uint32_t sid, flags;
> bool bypass_allowed;
> - uint32_t sid;
> bool found;
>
> interval.low = addr;
> @@ -513,6 +559,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
> if (!ep) {
> if (!bypass_allowed) {
> error_report_once("%s sid=%d is not known!!", __func__, sid);
> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
> + sid, addr);
> } else {
> entry.perm = flag;
> }
> @@ -524,6 +573,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
> error_report_once("%s %02x:%02x.%01x not attached to any domain",
> __func__, PCI_BUS_NUM(sid),
> PCI_SLOT(sid), PCI_FUNC(sid));
> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
> + sid, addr);
> } else {
> entry.perm = flag;
> }
> @@ -536,15 +588,26 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
> if (!found) {
> error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
> __func__, addr, sid);
> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
> + sid, addr);
> goto unlock;
> }
>
> - if (((flag & IOMMU_RO) &&
> - !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ)) ||
> - ((flag & IOMMU_WO) &&
> - !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE))) {
> + read_fault = (flag & IOMMU_RO) &&
> + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
> + write_fault = (flag & IOMMU_WO) &&
> + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
> +
> + flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
> + flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
> + if (flags) {
> error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
> __func__, addr, flag, mapping_value->flags);
> + flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
> + flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
> + sid, addr);
> goto unlock;
> }
> entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
> --
> 2.20.1
>
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 09/13] virtio-iommu: Implement fault reporting
2020-01-14 19:04 ` Peter Xu
@ 2020-01-15 13:12 ` Auger Eric
2020-01-15 15:04 ` Peter Xu
0 siblings, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-01-15 13:12 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
Hi Peter,
On 1/14/20 8:04 PM, Peter Xu wrote:
> On Thu, Jan 09, 2020 at 03:43:15PM +0100, Eric Auger wrote:
>> The event queue allows to report asynchronous errors.
>> The translate function now injects faults when relevant.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>
>> ---
>>
>> v11 -> v12:
>> - reporting the addr associated with the fault and set the
>> VIRTIO_IOMMU_FAULT_F_ADDRESS flag.
>> - added cpu_to_le<n>
>>
>> v10 -> v11:
>> - change a virtio_error into an error_report_once
>> (no buffer available for output faults)
>> ---
>> hw/virtio/trace-events | 1 +
>> hw/virtio/virtio-iommu.c | 73 +++++++++++++++++++++++++++++++++++++---
>> 2 files changed, 69 insertions(+), 5 deletions(-)
>>
>> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
>> index 095aa8b509..e83500bee9 100644
>> --- a/hw/virtio/trace-events
>> +++ b/hw/virtio/trace-events
>> @@ -72,3 +72,4 @@ virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d"
>> virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
>> virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
>> virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
>> +virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64
>> diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
>> index d192bcb505..09193970ee 100644
>> --- a/hw/virtio/virtio-iommu.c
>> +++ b/hw/virtio/virtio-iommu.c
>> @@ -477,6 +477,51 @@ out:
>> }
>> }
>>
>> +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
>> + int flags, uint32_t endpoint,
>> + uint64_t address)
>> +{
>> + VirtIODevice *vdev = &viommu->parent_obj;
>> + VirtQueue *vq = viommu->event_vq;
>> + struct virtio_iommu_fault fault;
>> + VirtQueueElement *elem;
>> + size_t sz;
>> +
>> + memset(&fault, 0, sizeof(fault));
>> + fault.reason = reason;
>> + fault.flags = cpu_to_le32(flags);
>> + fault.endpoint = cpu_to_le32(endpoint);
>> + fault.address = cpu_to_le64(address);
>> +
>> + for (;;) {
>> + elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
>> +
>> + if (!elem) {
>> + error_report_once(
>> + "no buffer available in event queue to report event");
>
> (Should this also be a guest issue? IIRC you are still using a
> mixture of both qemu_log_mask and error_report()... I'll stop
> commenting on this, assuming that you prefer both ways to be used...)
I've just removed the qemu_log_mask in virtio_iommu_unmap(). So now you
should not find any qemu_log_mask anymore. Sorry for the oversight.
>
>> + return;
>> + }
>> +
>> + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
>> + virtio_error(vdev, "error buffer of wrong size");
>> + virtqueue_detach_element(vq, elem, 0);
>> + g_free(elem);
>> + continue;
>
> If virtio_error(), should we stop rather than continue?
My understanding is the buffer just popped had a wrong size so it is not
usable. We skip it we try to use another one if any. Does it make sense?
Thanks
Eric
>
>> + }
>> + break;
>> + }
>> + /* we have a buffer to fill in */
>> + sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>> + &fault, sizeof(fault));
>> + assert(sz == sizeof(fault));
>> +
>> + trace_virtio_iommu_report_fault(reason, flags, endpoint, address);
>> + virtqueue_push(vq, elem, sz);
>> + virtio_notify(vdev, vq);
>> + g_free(elem);
>> +
>> +}
>> +
>> static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
>> IOMMUAccessFlags flag,
>> int iommu_idx)
>> @@ -485,9 +530,10 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
>> VirtIOIOMMUInterval interval, *mapping_key;
>> VirtIOIOMMUMapping *mapping_value;
>> VirtIOIOMMU *s = sdev->viommu;
>> + bool read_fault, write_fault;
>> VirtIOIOMMUEndpoint *ep;
>> + uint32_t sid, flags;
>> bool bypass_allowed;
>> - uint32_t sid;
>> bool found;
>>
>> interval.low = addr;
>> @@ -513,6 +559,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
>> if (!ep) {
>> if (!bypass_allowed) {
>> error_report_once("%s sid=%d is not known!!", __func__, sid);
>> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN,
>> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
>> + sid, addr);
>> } else {
>> entry.perm = flag;
>> }
>> @@ -524,6 +573,9 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
>> error_report_once("%s %02x:%02x.%01x not attached to any domain",
>> __func__, PCI_BUS_NUM(sid),
>> PCI_SLOT(sid), PCI_FUNC(sid));
>> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN,
>> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
>> + sid, addr);
>> } else {
>> entry.perm = flag;
>> }
>> @@ -536,15 +588,26 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
>> if (!found) {
>> error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d",
>> __func__, addr, sid);
>> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
>> + VIRTIO_IOMMU_FAULT_F_ADDRESS,
>> + sid, addr);
>> goto unlock;
>> }
>>
>> - if (((flag & IOMMU_RO) &&
>> - !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ)) ||
>> - ((flag & IOMMU_WO) &&
>> - !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE))) {
>> + read_fault = (flag & IOMMU_RO) &&
>> + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ);
>> + write_fault = (flag & IOMMU_WO) &&
>> + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE);
>> +
>> + flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0;
>> + flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0;
>> + if (flags) {
>> error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d",
>> __func__, addr, flag, mapping_value->flags);
>> + flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS;
>> + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING,
>> + flags | VIRTIO_IOMMU_FAULT_F_ADDRESS,
>> + sid, addr);
>> goto unlock;
>> }
>> entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr;
>> --
>> 2.20.1
>>
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 09/13] virtio-iommu: Implement fault reporting
2020-01-15 13:12 ` Auger Eric
@ 2020-01-15 15:04 ` Peter Xu
2020-01-15 16:36 ` Auger Eric
0 siblings, 1 reply; 39+ messages in thread
From: Peter Xu @ 2020-01-15 15:04 UTC (permalink / raw)
To: Auger Eric
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Wed, Jan 15, 2020 at 02:12:20PM +0100, Auger Eric wrote:
> >> +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
> >> + int flags, uint32_t endpoint,
> >> + uint64_t address)
> >> +{
[...]
> >> + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
> >> + virtio_error(vdev, "error buffer of wrong size");
> >> + virtqueue_detach_element(vq, elem, 0);
> >> + g_free(elem);
> >> + continue;
> >
> > If virtio_error(), should we stop rather than continue?
> My understanding is the buffer just popped had a wrong size so it is not
> usable. We skip it we try to use another one if any. Does it make sense?
I'm not very familiar to virtio, but I see that virtio_error marks
vdev->broken to true. If with that iiuc the next virtqueue_pop() will
fail directly (see the first call to virtio_device_disabled in
virtqueue_pop). Then I don't see why retry any more...
Thanks,
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 09/13] virtio-iommu: Implement fault reporting
2020-01-15 15:04 ` Peter Xu
@ 2020-01-15 16:36 ` Auger Eric
0 siblings, 0 replies; 39+ messages in thread
From: Auger Eric @ 2020-01-15 16:36 UTC (permalink / raw)
To: Peter Xu
Cc: peter.maydell, kevin.tian, tnowicki, quintela, mst, qemu-devel,
dgilbert, bharatb.linux, qemu-arm, jean-philippe, eric.auger.pro
Hi Peter,
On 1/15/20 4:04 PM, Peter Xu wrote:
> On Wed, Jan 15, 2020 at 02:12:20PM +0100, Auger Eric wrote:
>>>> +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason,
>>>> + int flags, uint32_t endpoint,
>>>> + uint64_t address)
>>>> +{
>
> [...]
>
>>>> + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) {
>>>> + virtio_error(vdev, "error buffer of wrong size");
>>>> + virtqueue_detach_element(vq, elem, 0);
>>>> + g_free(elem);
>>>> + continue;
>>>
>>> If virtio_error(), should we stop rather than continue?
>> My understanding is the buffer just popped had a wrong size so it is not
>> usable. We skip it we try to use another one if any. Does it make sense?
>
> I'm not very familiar to virtio, but I see that virtio_error marks
> vdev->broken to true. If with that iiuc the next virtqueue_pop() will
> fail directly (see the first call to virtio_device_disabled in
> virtqueue_pop). Then I don't see why retry any more...
You're right. I will fix it.
Thanks
Eric
>
> Thanks,
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 10/13] virtio-iommu-pci: Add virtio iommu pci support
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (8 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 09/13] virtio-iommu: Implement fault reporting Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 11/13] hw/arm/virt: Add the virtio-iommu device tree mappings Eric Auger
` (3 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This patch adds virtio-iommu-pci, which is the pci proxy for
the virtio-iommu device.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v11 -> v12:
- added Jean's R-b
- remove the array of intervals. Will be introduced later?
v10 -> v11:
- add the reserved_regions array property
v9 -> v10:
- include "hw/qdev-properties.h" header
v8 -> v9:
- add the msi-bypass property
- create virtio-iommu-pci.c
---
hw/virtio/Makefile.objs | 1 +
hw/virtio/virtio-iommu-pci.c | 88 ++++++++++++++++++++++++++++++++
include/hw/pci/pci.h | 1 +
include/hw/virtio/virtio-iommu.h | 1 +
qdev-monitor.c | 1 +
5 files changed, 92 insertions(+)
create mode 100644 hw/virtio/virtio-iommu-pci.c
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index 2fd9da7410..4e4d39a0a4 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -29,6 +29,7 @@ obj-$(CONFIG_VIRTIO_INPUT_HOST) += virtio-input-host-pci.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio-input-pci.o
obj-$(CONFIG_VIRTIO_RNG) += virtio-rng-pci.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio-balloon-pci.o
+obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu-pci.o
obj-$(CONFIG_VIRTIO_9P) += virtio-9p-pci.o
obj-$(CONFIG_VIRTIO_SCSI) += virtio-scsi-pci.o
obj-$(CONFIG_VIRTIO_BLK) += virtio-blk-pci.o
diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c
new file mode 100644
index 0000000000..4cfae1f9df
--- /dev/null
+++ b/hw/virtio/virtio-iommu-pci.c
@@ -0,0 +1,88 @@
+/*
+ * Virtio IOMMU PCI Bindings
+ *
+ * Copyright (c) 2019 Red Hat, Inc.
+ * Written by Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+
+#include "virtio-pci.h"
+#include "hw/virtio/virtio-iommu.h"
+#include "hw/qdev-properties.h"
+
+typedef struct VirtIOIOMMUPCI VirtIOIOMMUPCI;
+
+/*
+ * virtio-iommu-pci: This extends VirtioPCIProxy.
+ *
+ */
+#define VIRTIO_IOMMU_PCI(obj) \
+ OBJECT_CHECK(VirtIOIOMMUPCI, (obj), TYPE_VIRTIO_IOMMU_PCI)
+
+struct VirtIOIOMMUPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOIOMMU vdev;
+};
+
+static Property virtio_iommu_pci_properties[] = {
+ DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_link(OBJECT(dev),
+ OBJECT(pci_get_bus(&vpci_dev->pci_dev)),
+ "primary-bus", errp);
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_iommu_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = virtio_iommu_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = virtio_iommu_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_IOMMU;
+ pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_iommu_pci_instance_init(Object *obj)
+{
+ VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_IOMMU);
+}
+
+static const VirtioPCIDeviceTypeInfo virtio_iommu_pci_info = {
+ .base_name = TYPE_VIRTIO_IOMMU_PCI,
+ .generic_name = "virtio-iommu-pci",
+ .transitional_name = "virtio-iommu-pci-transitional",
+ .non_transitional_name = "virtio-iommu-pci-non-transitional",
+ .instance_size = sizeof(VirtIOIOMMUPCI),
+ .instance_init = virtio_iommu_pci_instance_init,
+ .class_init = virtio_iommu_pci_class_init,
+};
+
+static void virtio_iommu_pci_register(void)
+{
+ virtio_pci_types_register(&virtio_iommu_pci_info);
+}
+
+type_init(virtio_iommu_pci_register)
+
+
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index 2acd8321af..cfedf5a995 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -86,6 +86,7 @@ extern bool pci_available;
#define PCI_DEVICE_ID_VIRTIO_9P 0x1009
#define PCI_DEVICE_ID_VIRTIO_VSOCK 0x1012
#define PCI_DEVICE_ID_VIRTIO_PMEM 0x1013
+#define PCI_DEVICE_ID_VIRTIO_IOMMU 0x1014
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_BRIDGE 0x0001
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
index c6c33b4af0..ce695bf827 100644
--- a/include/hw/virtio/virtio-iommu.h
+++ b/include/hw/virtio/virtio-iommu.h
@@ -25,6 +25,7 @@
#include "hw/pci/pci.h"
#define TYPE_VIRTIO_IOMMU "virtio-iommu-device"
+#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-device-base"
#define VIRTIO_IOMMU(obj) \
OBJECT_CHECK(VirtIOIOMMU, (obj), TYPE_VIRTIO_IOMMU)
diff --git a/qdev-monitor.c b/qdev-monitor.c
index 3465a1e2d0..97f4022b51 100644
--- a/qdev-monitor.c
+++ b/qdev-monitor.c
@@ -66,6 +66,7 @@ static const QDevAlias qdev_alias_table[] = {
{ "virtio-input-host-ccw", "virtio-input-host", QEMU_ARCH_S390X },
{ "virtio-input-host-pci", "virtio-input-host",
QEMU_ARCH_ALL & ~QEMU_ARCH_S390X },
+ { "virtio-iommu-pci", "virtio-iommu", QEMU_ARCH_ALL & ~QEMU_ARCH_S390X },
{ "virtio-keyboard-ccw", "virtio-keyboard", QEMU_ARCH_S390X },
{ "virtio-keyboard-pci", "virtio-keyboard",
QEMU_ARCH_ALL & ~QEMU_ARCH_S390X },
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 11/13] hw/arm/virt: Add the virtio-iommu device tree mappings
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (9 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 10/13] virtio-iommu-pci: Add virtio iommu pci support Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 14:43 ` [PATCH v12 12/13] virtio-iommu: Support migration Eric Auger
` (2 subsequent siblings)
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
Adds the "virtio,pci-iommu" node in the host bridge node and
the RID mapping, excluding the IOMMU RID.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v11 -> v12:
- Added Jean's R-b
v10 -> v11:
- remove msi_bypass
v8 -> v9:
- disable msi-bypass property
- addition of the subnode is handled is the hotplug handler
and IOMMU RID is notimposed anymore
v6 -> v7:
- align to the smmu instantiation code
v4 -> v5:
- VirtMachineClass no_iommu added in this patch
- Use object_resolve_path_type
---
hw/arm/virt.c | 54 ++++++++++++++++++++++++++++++++++++-------
include/hw/arm/virt.h | 2 ++
2 files changed, 48 insertions(+), 8 deletions(-)
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 39ab5f47e0..689e5a3bd5 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -32,6 +32,7 @@
#include "qemu-common.h"
#include "qemu/units.h"
#include "qemu/option.h"
+#include "monitor/qdev.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/boards.h"
@@ -54,6 +55,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/pci-host/gpex.h"
+#include "hw/virtio/virtio-pci.h"
#include "hw/arm/sysbus-fdt.h"
#include "hw/platform-bus.h"
#include "hw/qdev-properties.h"
@@ -71,6 +73,7 @@
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#include "hw/acpi/generic_event_device.h"
+#include "hw/virtio/virtio-iommu.h"
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
@@ -1180,6 +1183,30 @@ static void create_smmu(const VirtMachineState *vms,
g_free(node);
}
+static void create_virtio_iommu(VirtMachineState *vms, Error **errp)
+{
+ const char compat[] = "virtio,pci-iommu";
+ uint16_t bdf = vms->virtio_iommu_bdf;
+ char *node;
+
+ vms->iommu_phandle = qemu_fdt_alloc_phandle(vms->fdt);
+
+ node = g_strdup_printf("%s/virtio_iommu@%d", vms->pciehb_nodename, bdf);
+ qemu_fdt_add_subnode(vms->fdt, node);
+ qemu_fdt_setprop(vms->fdt, node, "compatible", compat, sizeof(compat));
+ qemu_fdt_setprop_sized_cells(vms->fdt, node, "reg",
+ 1, bdf << 8, 1, 0, 1, 0,
+ 1, 0, 1, 0);
+
+ qemu_fdt_setprop_cell(vms->fdt, node, "#iommu-cells", 1);
+ qemu_fdt_setprop_cell(vms->fdt, node, "phandle", vms->iommu_phandle);
+ g_free(node);
+
+ qemu_fdt_setprop_cells(vms->fdt, vms->pciehb_nodename, "iommu-map",
+ 0x0, vms->iommu_phandle, 0x0, bdf,
+ bdf + 1, vms->iommu_phandle, bdf + 1, 0xffff - bdf);
+}
+
static void create_pcie(VirtMachineState *vms)
{
hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base;
@@ -1258,7 +1285,7 @@ static void create_pcie(VirtMachineState *vms)
}
}
- nodename = g_strdup_printf("/pcie@%" PRIx64, base);
+ nodename = vms->pciehb_nodename = g_strdup_printf("/pcie@%" PRIx64, base);
qemu_fdt_add_subnode(vms->fdt, nodename);
qemu_fdt_setprop_string(vms->fdt, nodename,
"compatible", "pci-host-ecam-generic");
@@ -1301,13 +1328,16 @@ static void create_pcie(VirtMachineState *vms)
if (vms->iommu) {
vms->iommu_phandle = qemu_fdt_alloc_phandle(vms->fdt);
- create_smmu(vms, pci->bus);
-
- qemu_fdt_setprop_cells(vms->fdt, nodename, "iommu-map",
- 0x0, vms->iommu_phandle, 0x0, 0x10000);
+ switch (vms->iommu) {
+ case VIRT_IOMMU_SMMUV3:
+ create_smmu(vms, pci->bus);
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "iommu-map",
+ 0x0, vms->iommu_phandle, 0x0, 0x10000);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
-
- g_free(nodename);
}
static void create_platform_bus(VirtMachineState *vms)
@@ -1971,6 +2001,13 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_memory_plug(hotplug_dev, dev, errp);
}
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
+ PCIDevice *pdev = PCI_DEVICE(dev);
+
+ vms->iommu = VIRT_IOMMU_VIRTIO;
+ vms->virtio_iommu_bdf = pci_get_bdf(pdev);
+ create_virtio_iommu(vms, errp);
+ }
}
static void virt_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev,
@@ -1984,7 +2021,8 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
DeviceState *dev)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE) ||
- (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM))) {
+ (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) ||
+ (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI))) {
return HOTPLUG_HANDLER(machine);
}
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 38f0c33c77..165035fa8f 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -124,8 +124,10 @@ typedef struct {
bool virt;
int32_t gic_version;
VirtIOMMUType iommu;
+ uint16_t virtio_iommu_bdf;
struct arm_boot_info bootinfo;
MemMapEntry *memmap;
+ char *pciehb_nodename;
const int *irqmap;
int smp_cpus;
void *fdt;
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [PATCH v12 12/13] virtio-iommu: Support migration
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (10 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 11/13] hw/arm/virt: Add the virtio-iommu device tree mappings Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-14 19:07 ` Peter Xu
2020-02-10 12:33 ` Juan Quintela
2020-01-09 14:43 ` [PATCH v12 13/13] tests: Add virtio-iommu test Eric Auger
2020-01-09 15:12 ` [PATCH v12 00/13] VIRTIO-IOMMU device no-reply
13 siblings, 2 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
Add Migration support. We rely on recently added gtree and qlist
migration. We only migrate the domain gtree. The endpoint gtree
is re-constructed in a post-load operation.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
v11 -> v12:
- do not migrate the endpoint gtree but reconstruct it from the
domain gtree (Peter's suggestion)
- add MIG_PRI_IOMMU
---
hw/virtio/virtio-iommu.c | 109 +++++++++++++++++++++++++++++++++++----
1 file changed, 99 insertions(+), 10 deletions(-)
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 09193970ee..b0fde3af6f 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -655,16 +655,6 @@ static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
return f;
}
-/*
- * Migration is not yet supported: most of the state consists
- * of balanced binary trees which are not yet ready for getting
- * migrated
- */
-static const VMStateDescription vmstate_virtio_iommu_device = {
- .name = "virtio-iommu-device",
- .unmigratable = 1,
-};
-
static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
{
guint ua = GPOINTER_TO_UINT(a);
@@ -748,9 +738,108 @@ static void virtio_iommu_instance_init(Object *obj)
{
}
+#define VMSTATE_INTERVAL \
+{ \
+ .name = "interval", \
+ .version_id = 1, \
+ .minimum_version_id = 1, \
+ .fields = (VMStateField[]) { \
+ VMSTATE_UINT64(low, VirtIOIOMMUInterval), \
+ VMSTATE_UINT64(high, VirtIOIOMMUInterval), \
+ VMSTATE_END_OF_LIST() \
+ } \
+}
+
+#define VMSTATE_MAPPING \
+{ \
+ .name = "mapping", \
+ .version_id = 1, \
+ .minimum_version_id = 1, \
+ .fields = (VMStateField[]) { \
+ VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
+ VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \
+ VMSTATE_END_OF_LIST() \
+ }, \
+}
+
+static const VMStateDescription vmstate_interval_mapping[2] = {
+ VMSTATE_MAPPING, /* value */
+ VMSTATE_INTERVAL /* key */
+};
+
+static int domain_preload(void *opaque)
+{
+ VirtIOIOMMUDomain *domain = opaque;
+
+ domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
+ NULL, g_free, g_free);
+ return 0;
+}
+
+static const VMStateDescription vmstate_endpoint = {
+ .name = "endpoint",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, VirtIOIOMMUEndpoint),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_domain = {
+ .name = "domain",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_load = domain_preload,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(id, VirtIOIOMMUDomain),
+ VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1,
+ vmstate_interval_mapping,
+ VirtIOIOMMUInterval, VirtIOIOMMUMapping),
+ VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1,
+ vmstate_endpoint, VirtIOIOMMUEndpoint, next),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static gboolean reconstruct_endpoints(gpointer key, gpointer value,
+ gpointer data)
+{
+ VirtIOIOMMU *s = (VirtIOIOMMU *)data;
+ VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value;
+ VirtIOIOMMUEndpoint *iter;
+
+ QLIST_FOREACH(iter, &d->endpoint_list, next) {
+ iter->domain = d;
+ g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter);
+ }
+ return false; /* continue the domain traversal */
+}
+
+static int iommu_post_load(void *opaque, int version_id)
+{
+ VirtIOIOMMU *s = opaque;
+
+ g_tree_foreach(s->domains, reconstruct_endpoints, s);
+ return 0;
+}
+
+static const VMStateDescription vmstate_virtio_iommu_device = {
+ .name = "virtio-iommu-device",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .post_load = iommu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1,
+ &vmstate_domain, VirtIOIOMMUDomain),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static const VMStateDescription vmstate_virtio_iommu = {
.name = "virtio-iommu",
.minimum_version_id = 1,
+ .priority = MIG_PRI_IOMMU,
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_VIRTIO_DEVICE,
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 12/13] virtio-iommu: Support migration
2020-01-09 14:43 ` [PATCH v12 12/13] virtio-iommu: Support migration Eric Auger
@ 2020-01-14 19:07 ` Peter Xu
2020-02-10 12:33 ` Juan Quintela
1 sibling, 0 replies; 39+ messages in thread
From: Peter Xu @ 2020-01-14 19:07 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, dgilbert, bharatb.linux, qemu-arm, mst,
eric.auger.pro
On Thu, Jan 09, 2020 at 03:43:18PM +0100, Eric Auger wrote:
> Add Migration support. We rely on recently added gtree and qlist
> migration. We only migrate the domain gtree. The endpoint gtree
> is re-constructed in a post-load operation.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
--
Peter Xu
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 12/13] virtio-iommu: Support migration
2020-01-09 14:43 ` [PATCH v12 12/13] virtio-iommu: Support migration Eric Auger
2020-01-14 19:07 ` Peter Xu
@ 2020-02-10 12:33 ` Juan Quintela
2020-02-10 13:09 ` Auger Eric
1 sibling, 1 reply; 39+ messages in thread
From: Juan Quintela @ 2020-02-10 12:33 UTC (permalink / raw)
To: Eric Auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, mst,
qemu-devel, peterx, dgilbert, bharatb.linux, qemu-arm,
eric.auger.pro
Eric Auger <eric.auger@redhat.com> wrote:
> Add Migration support. We rely on recently added gtree and qlist
> migration. We only migrate the domain gtree. The endpoint gtree
> is re-constructed in a post-load operation.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
And yes, this is as confusing as it can be:
a tree of mappings (i.e. key,value)
a list of endpoints
I will propose this as most complex structure migrated ever.
Later, Juan.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 12/13] virtio-iommu: Support migration
2020-02-10 12:33 ` Juan Quintela
@ 2020-02-10 13:09 ` Auger Eric
2020-02-10 13:36 ` Auger Eric
0 siblings, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-02-10 13:09 UTC (permalink / raw)
To: quintela
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, mst,
qemu-devel, peterx, dgilbert, bharatb.linux, qemu-arm,
eric.auger.pro
Hi Juan,
On 2/10/20 1:33 PM, Juan Quintela wrote:
> Eric Auger <eric.auger@redhat.com> wrote:
>> Add Migration support. We rely on recently added gtree and qlist
>> migration. We only migrate the domain gtree. The endpoint gtree
>> is re-constructed in a post-load operation.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>
> Reviewed-by: Juan Quintela <quintela@redhat.com>
>
> And yes, this is as confusing as it can be:
> a tree of mappings (i.e. key,value)
> a list of endpoints
>
> I will propose this as most complex structure migrated ever.
:-) Thank you Juan, Dave, and Peter for your support
Best Regards
Eric
>
> Later, Juan.
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 12/13] virtio-iommu: Support migration
2020-02-10 13:09 ` Auger Eric
@ 2020-02-10 13:36 ` Auger Eric
2020-02-10 16:44 ` Juan Quintela
0 siblings, 1 reply; 39+ messages in thread
From: Auger Eric @ 2020-02-10 13:36 UTC (permalink / raw)
To: quintela
Cc: peter.maydell, kevin.tian, tnowicki, mst, qemu-devel, peterx,
dgilbert, bharatb.linux, qemu-arm, jean-philippe, eric.auger.pro
Hi Juan,
On 2/10/20 2:09 PM, Auger Eric wrote:
> Hi Juan,
>
> On 2/10/20 1:33 PM, Juan Quintela wrote:
>> Eric Auger <eric.auger@redhat.com> wrote:
>>> Add Migration support. We rely on recently added gtree and qlist
>>> migration. We only migrate the domain gtree. The endpoint gtree
>>> is re-constructed in a post-load operation.
>>>
>>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>
>> Reviewed-by: Juan Quintela <quintela@redhat.com>
>>
>> And yes, this is as confusing as it can be:
>> a tree of mappings (i.e. key,value)
>> a list of endpoints
>>
>> I will propose this as most complex structure migrated ever.
> :-) Thank you Juan, Dave, and Peter for your support
I did not notice this at the first glimpse but could you send the R-b on
the v15. I have been running like a hamster on my wheel during last week.
Code has not changed though :-).
Thanks
Eric
>
> Best Regards
>
> Eric
>>
>> Later, Juan.
>>
>
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 12/13] virtio-iommu: Support migration
2020-02-10 13:36 ` Auger Eric
@ 2020-02-10 16:44 ` Juan Quintela
0 siblings, 0 replies; 39+ messages in thread
From: Juan Quintela @ 2020-02-10 16:44 UTC (permalink / raw)
To: Auger Eric
Cc: peter.maydell, kevin.tian, tnowicki, mst, qemu-devel, peterx,
dgilbert, bharatb.linux, qemu-arm, jean-philippe, eric.auger.pro
Auger Eric <eric.auger@redhat.com> wrote:
> Hi Juan,
>
> On 2/10/20 2:09 PM, Auger Eric wrote:
>> Hi Juan,
>>
>> On 2/10/20 1:33 PM, Juan Quintela wrote:
>>> Eric Auger <eric.auger@redhat.com> wrote:
>>>> Add Migration support. We rely on recently added gtree and qlist
>>>> migration. We only migrate the domain gtree. The endpoint gtree
>>>> is re-constructed in a post-load operation.
>>>>
>>>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>>
>>> Reviewed-by: Juan Quintela <quintela@redhat.com>
>>>
>>> And yes, this is as confusing as it can be:
>>> a tree of mappings (i.e. key,value)
>>> a list of endpoints
>>>
>>> I will propose this as most complex structure migrated ever.
>> :-) Thank you Juan, Dave, and Peter for your support
>
> I did not notice this at the first glimpse but could you send the R-b on
> the v15. I have been running like a hamster on my wheel during last week.
>
> Code has not changed though :-).
Sure.
Later, Juan.
^ permalink raw reply [flat|nested] 39+ messages in thread
* [PATCH v12 13/13] tests: Add virtio-iommu test
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (11 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 12/13] virtio-iommu: Support migration Eric Auger
@ 2020-01-09 14:43 ` Eric Auger
2020-01-09 15:12 ` [PATCH v12 00/13] VIRTIO-IOMMU device no-reply
13 siblings, 0 replies; 39+ messages in thread
From: Eric Auger @ 2020-01-09 14:43 UTC (permalink / raw)
To: eric.auger.pro, eric.auger, qemu-devel, qemu-arm, peter.maydell,
jean-philippe, dgilbert, quintela, mst, peterx
Cc: kevin.tian, bharatb.linux, tnowicki
This adds the framework to test the virtio-iommu-pci device
and tests exercising the attach/detach, map/unmap API.
To run the tests:
make tests/qos-test
QTEST_QEMU_BINARY=x86_64-softmmu/qemu-system-x86_64 tests/qos-test V=1
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
tests/Makefile.include | 2 +
tests/libqos/virtio-iommu.c | 177 +++++++++++++++++++++
tests/libqos/virtio-iommu.h | 45 ++++++
tests/virtio-iommu-test.c | 308 ++++++++++++++++++++++++++++++++++++
4 files changed, 532 insertions(+)
create mode 100644 tests/libqos/virtio-iommu.c
create mode 100644 tests/libqos/virtio-iommu.h
create mode 100644 tests/virtio-iommu-test.c
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 49e3b0d319..049e06269d 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -751,6 +751,7 @@ qos-test-obj-y += tests/libqos/virtio-net.o
qos-test-obj-y += tests/libqos/virtio-pci.o
qos-test-obj-y += tests/libqos/virtio-pci-modern.o
qos-test-obj-y += tests/libqos/virtio-rng.o
+qos-test-obj-y += tests/libqos/virtio-iommu.o
qos-test-obj-y += tests/libqos/virtio-scsi.o
qos-test-obj-y += tests/libqos/virtio-serial.o
@@ -790,6 +791,7 @@ qos-test-obj-$(CONFIG_VIRTFS) += tests/virtio-9p-test.o
qos-test-obj-y += tests/virtio-blk-test.o
qos-test-obj-y += tests/virtio-net-test.o
qos-test-obj-y += tests/virtio-rng-test.o
+qos-test-obj-y += tests/virtio-iommu-test.o
qos-test-obj-y += tests/virtio-scsi-test.o
qos-test-obj-y += tests/virtio-serial-test.o
qos-test-obj-y += tests/vmxnet3-test.o
diff --git a/tests/libqos/virtio-iommu.c b/tests/libqos/virtio-iommu.c
new file mode 100644
index 0000000000..b4e9ea44fb
--- /dev/null
+++ b/tests/libqos/virtio-iommu.c
@@ -0,0 +1,177 @@
+/*
+ * libqos driver framework
+ *
+ * Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest.h"
+#include "qemu/module.h"
+#include "libqos/qgraph.h"
+#include "libqos/virtio-iommu.h"
+#include "hw/virtio/virtio-iommu.h"
+
+static QGuestAllocator *alloc;
+
+/* virtio-iommu-device */
+static void *qvirtio_iommu_get_driver(QVirtioIOMMU *v_iommu,
+ const char *interface)
+{
+ if (!g_strcmp0(interface, "virtio-iommu")) {
+ return v_iommu;
+ }
+ if (!g_strcmp0(interface, "virtio")) {
+ return v_iommu->vdev;
+ }
+
+ fprintf(stderr, "%s not present in virtio-iommu-device\n", interface);
+ g_assert_not_reached();
+}
+
+static void *qvirtio_iommu_device_get_driver(void *object,
+ const char *interface)
+{
+ QVirtioIOMMUDevice *v_iommu = object;
+ return qvirtio_iommu_get_driver(&v_iommu->iommu, interface);
+}
+
+static void virtio_iommu_cleanup(QVirtioIOMMU *interface)
+{
+ qvirtqueue_cleanup(interface->vdev->bus, interface->vq, alloc);
+}
+
+static void virtio_iommu_setup(QVirtioIOMMU *interface)
+{
+ QVirtioDevice *vdev = interface->vdev;
+ uint64_t features;
+
+ features = qvirtio_get_features(vdev);
+ features &= ~(QVIRTIO_F_BAD_FEATURE |
+ (1ull << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ull << VIRTIO_RING_F_EVENT_IDX) |
+ (1ull << VIRTIO_IOMMU_F_BYPASS));
+ qvirtio_set_features(vdev, features);
+ interface->vq = qvirtqueue_setup(interface->vdev, alloc, 0);
+ qvirtio_set_driver_ok(interface->vdev);
+}
+
+static void qvirtio_iommu_device_destructor(QOSGraphObject *obj)
+{
+ QVirtioIOMMUDevice *v_iommu = (QVirtioIOMMUDevice *) obj;
+ QVirtioIOMMU *iommu = &v_iommu->iommu;
+
+ virtio_iommu_cleanup(iommu);
+}
+
+static void qvirtio_iommu_device_start_hw(QOSGraphObject *obj)
+{
+ QVirtioIOMMUDevice *v_iommu = (QVirtioIOMMUDevice *) obj;
+ QVirtioIOMMU *iommu = &v_iommu->iommu;
+
+ virtio_iommu_setup(iommu);
+}
+
+static void *virtio_iommu_device_create(void *virtio_dev,
+ QGuestAllocator *t_alloc,
+ void *addr)
+{
+ QVirtioIOMMUDevice *virtio_rdevice = g_new0(QVirtioIOMMUDevice, 1);
+ QVirtioIOMMU *interface = &virtio_rdevice->iommu;
+
+ interface->vdev = virtio_dev;
+ alloc = t_alloc;
+
+ virtio_rdevice->obj.get_driver = qvirtio_iommu_device_get_driver;
+ virtio_rdevice->obj.destructor = qvirtio_iommu_device_destructor;
+ virtio_rdevice->obj.start_hw = qvirtio_iommu_device_start_hw;
+
+ return &virtio_rdevice->obj;
+}
+
+/* virtio-iommu-pci */
+static void *qvirtio_iommu_pci_get_driver(void *object, const char *interface)
+{
+ QVirtioIOMMUPCI *v_iommu = object;
+ if (!g_strcmp0(interface, "pci-device")) {
+ return v_iommu->pci_vdev.pdev;
+ }
+ return qvirtio_iommu_get_driver(&v_iommu->iommu, interface);
+}
+
+static void qvirtio_iommu_pci_destructor(QOSGraphObject *obj)
+{
+ QVirtioIOMMUPCI *iommu_pci = (QVirtioIOMMUPCI *) obj;
+ QVirtioIOMMU *interface = &iommu_pci->iommu;
+ QOSGraphObject *pci_vobj = &iommu_pci->pci_vdev.obj;
+
+ virtio_iommu_cleanup(interface);
+ qvirtio_pci_destructor(pci_vobj);
+}
+
+static void qvirtio_iommu_pci_start_hw(QOSGraphObject *obj)
+{
+ QVirtioIOMMUPCI *iommu_pci = (QVirtioIOMMUPCI *) obj;
+ QVirtioIOMMU *interface = &iommu_pci->iommu;
+ QOSGraphObject *pci_vobj = &iommu_pci->pci_vdev.obj;
+
+ qvirtio_pci_start_hw(pci_vobj);
+ virtio_iommu_setup(interface);
+}
+
+
+static void *virtio_iommu_pci_create(void *pci_bus, QGuestAllocator *t_alloc,
+ void *addr)
+{
+ QVirtioIOMMUPCI *virtio_rpci = g_new0(QVirtioIOMMUPCI, 1);
+ QVirtioIOMMU *interface = &virtio_rpci->iommu;
+ QOSGraphObject *obj = &virtio_rpci->pci_vdev.obj;
+
+ virtio_pci_init(&virtio_rpci->pci_vdev, pci_bus, addr);
+ interface->vdev = &virtio_rpci->pci_vdev.vdev;
+ alloc = t_alloc;
+
+ obj->get_driver = qvirtio_iommu_pci_get_driver;
+ obj->start_hw = qvirtio_iommu_pci_start_hw;
+ obj->destructor = qvirtio_iommu_pci_destructor;
+
+ return obj;
+}
+
+static void virtio_iommu_register_nodes(void)
+{
+ QPCIAddress addr = {
+ .devfn = QPCI_DEVFN(4, 0),
+ };
+
+ QOSGraphEdgeOptions opts = {
+ .extra_device_opts = "addr=04.0",
+ };
+
+ /* virtio-iommu-device */
+ qos_node_create_driver("virtio-iommu-device", virtio_iommu_device_create);
+ qos_node_consumes("virtio-iommu-device", "virtio-bus", NULL);
+ qos_node_produces("virtio-iommu-device", "virtio");
+ qos_node_produces("virtio-iommu-device", "virtio-iommu");
+
+ /* virtio-iommu-pci */
+ add_qpci_address(&opts, &addr);
+ qos_node_create_driver("virtio-iommu-pci", virtio_iommu_pci_create);
+ qos_node_consumes("virtio-iommu-pci", "pci-bus", &opts);
+ qos_node_produces("virtio-iommu-pci", "pci-device");
+ qos_node_produces("virtio-iommu-pci", "virtio");
+ qos_node_produces("virtio-iommu-pci", "virtio-iommu");
+}
+
+libqos_init(virtio_iommu_register_nodes);
diff --git a/tests/libqos/virtio-iommu.h b/tests/libqos/virtio-iommu.h
new file mode 100644
index 0000000000..6970b45a01
--- /dev/null
+++ b/tests/libqos/virtio-iommu.h
@@ -0,0 +1,45 @@
+/*
+ * libqos driver framework
+ *
+ * Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#ifndef TESTS_LIBQOS_VIRTIO_IOMMU_H
+#define TESTS_LIBQOS_VIRTIO_IOMMU_H
+
+#include "libqos/qgraph.h"
+#include "libqos/virtio.h"
+#include "libqos/virtio-pci.h"
+
+typedef struct QVirtioIOMMU QVirtioIOMMU;
+typedef struct QVirtioIOMMUPCI QVirtioIOMMUPCI;
+typedef struct QVirtioIOMMUDevice QVirtioIOMMUDevice;
+
+struct QVirtioIOMMU {
+ QVirtioDevice *vdev;
+ QVirtQueue *vq;
+};
+
+struct QVirtioIOMMUPCI {
+ QVirtioPCIDevice pci_vdev;
+ QVirtioIOMMU iommu;
+};
+
+struct QVirtioIOMMUDevice {
+ QOSGraphObject obj;
+ QVirtioIOMMU iommu;
+};
+
+#endif
diff --git a/tests/virtio-iommu-test.c b/tests/virtio-iommu-test.c
new file mode 100644
index 0000000000..ccbf7da986
--- /dev/null
+++ b/tests/virtio-iommu-test.c
@@ -0,0 +1,308 @@
+/*
+ * QTest testcase for VirtIO IOMMU
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "libqtest-single.h"
+#include "qemu/module.h"
+#include "libqos/qgraph.h"
+#include "libqos/virtio-iommu.h"
+#include "hw/virtio/virtio-iommu.h"
+
+#define PCI_SLOT_HP 0x06
+#define QVIRTIO_IOMMU_TIMEOUT_US (30 * 1000 * 1000)
+
+static QGuestAllocator *alloc;
+
+static void iommu_hotplug(void *obj, void *data, QGuestAllocator *alloc)
+{
+ QVirtioPCIDevice *dev = obj;
+ QTestState *qts = dev->pdev->bus->qts;
+
+ qtest_qmp_device_add(qts, "virtio-iommu-pci", "iommu1",
+ "{'addr': %s}", stringify(PCI_SLOT_HP));
+
+}
+
+static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+ QVirtioIOMMU *v_iommu = obj;
+ QVirtioDevice *dev = v_iommu->vdev;
+ uint64_t input_range_start = qvirtio_config_readq(dev, 8);
+ uint64_t input_range_end = qvirtio_config_readq(dev, 16);
+ uint32_t domain_range_start = qvirtio_config_readl(dev, 24);
+ uint32_t domain_range_end = qvirtio_config_readl(dev, 28);
+ uint32_t probe_size = qvirtio_config_readl(dev, 32);
+
+ g_assert_cmpint(input_range_start, ==, 0);
+ g_assert_cmphex(input_range_end, ==, 0xFFFFFFFFFFFFFFFF);
+ g_assert_cmpint(domain_range_start, ==, 0);
+ g_assert_cmpint(domain_range_end, ==, 32);
+ g_assert_cmphex(probe_size, ==, 0x200);
+}
+
+/**
+ * send_attach_detach - Send an attach/detach command to the device
+ * @type: VIRTIO_IOMMU_T_ATTACH/VIRTIO_IOMMU_T_DETACH
+ * @domain: domain the end point is attached to
+ * @ep: end-point
+ */
+static int send_attach_detach(QTestState *qts, QVirtioIOMMU *v_iommu,
+ uint8_t type, uint32_t domain, uint32_t ep)
+{
+ QVirtioDevice *dev = v_iommu->vdev;
+ QVirtQueue *vq = v_iommu->vq;
+ uint64_t ro_addr, wr_addr;
+ uint32_t free_head;
+ struct virtio_iommu_req_attach req; /* same layout as detach */
+ size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
+ size_t wr_size = sizeof(struct virtio_iommu_req_tail);
+ char buffer[64];
+ int ret;
+
+ req.head.type = type;
+ req.domain = domain;
+ req.endpoint = ep;
+
+ ro_addr = guest_alloc(alloc, ro_size);
+ wr_addr = guest_alloc(alloc, wr_size);
+
+ qtest_memwrite(qts, ro_addr, &req, ro_size);
+ free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
+ qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
+ qvirtqueue_kick(qts, dev, vq, free_head);
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_IOMMU_TIMEOUT_US);
+ qtest_memread(qts, wr_addr, buffer, wr_size);
+ ret = ((struct virtio_iommu_req_tail *)buffer)->status;
+ guest_free(alloc, ro_addr);
+ guest_free(alloc, wr_addr);
+ return ret;
+}
+
+/**
+ * send_map - Send a map command to the device
+ * @domain: domain the new binding is attached to
+ * @virt_start: iova start
+ * @virt_end: iova end
+ * @phys_start: base physical address
+ * @flags: mapping flags
+ */
+static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu,
+ uint32_t domain, uint64_t virt_start, uint64_t virt_end,
+ uint64_t phys_start, uint32_t flags)
+{
+ QVirtioDevice *dev = v_iommu->vdev;
+ QVirtQueue *vq = v_iommu->vq;
+ uint64_t ro_addr, wr_addr;
+ uint32_t free_head;
+ struct virtio_iommu_req_map req;
+ size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
+ size_t wr_size = sizeof(struct virtio_iommu_req_tail);
+ char buffer[64];
+ int ret;
+
+ req.head.type = VIRTIO_IOMMU_T_MAP;
+ req.domain = domain;
+ req.virt_start = virt_start;
+ req.virt_end = virt_end;
+ req.phys_start = phys_start;
+ req.flags = flags;
+
+ ro_addr = guest_alloc(alloc, ro_size);
+ wr_addr = guest_alloc(alloc, wr_size);
+
+ qtest_memwrite(qts, ro_addr, &req, ro_size);
+ free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
+ qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
+ qvirtqueue_kick(qts, dev, vq, free_head);
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_IOMMU_TIMEOUT_US);
+ memread(wr_addr, buffer, wr_size);
+ ret = ((struct virtio_iommu_req_tail *)buffer)->status;
+ guest_free(alloc, ro_addr);
+ guest_free(alloc, wr_addr);
+ return ret;
+}
+
+/**
+ * send_unmap - Send an unmap command to the device
+ * @domain: domain the new binding is attached to
+ * @virt_start: iova start
+ * @virt_end: iova end
+ */
+static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu,
+ uint32_t domain, uint64_t virt_start, uint64_t virt_end)
+{
+ QVirtioDevice *dev = v_iommu->vdev;
+ QVirtQueue *vq = v_iommu->vq;
+ uint64_t ro_addr, wr_addr;
+ uint32_t free_head;
+ struct virtio_iommu_req_unmap req;
+ size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail);
+ size_t wr_size = sizeof(struct virtio_iommu_req_tail);
+ char buffer[64];
+ int ret;
+
+ req.head.type = VIRTIO_IOMMU_T_UNMAP;
+ req.domain = domain;
+ req.virt_start = virt_start;
+ req.virt_end = virt_end;
+
+ ro_addr = guest_alloc(alloc, ro_size);
+ wr_addr = guest_alloc(alloc, wr_size);
+
+ qtest_memwrite(qts, ro_addr, &req, ro_size);
+ free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true);
+ qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false);
+ qvirtqueue_kick(qts, dev, vq, free_head);
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_IOMMU_TIMEOUT_US);
+ memread(wr_addr, buffer, wr_size);
+ ret = ((struct virtio_iommu_req_tail *)buffer)->status;
+ guest_free(alloc, ro_addr);
+ guest_free(alloc, wr_addr);
+ return ret;
+}
+
+/* Test unmap scenari documented in the spec v0.12 */
+static void test_attach_detach(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+ QVirtioIOMMU *v_iommu = obj;
+ QTestState *qts = global_qtest;
+ int ret;
+
+ alloc = t_alloc;
+
+ /* type, domain, ep */
+
+ /* attach ep0 to domain 0 */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 0);
+ g_assert_cmpint(ret, ==, 0);
+
+ /* attach a non existing device (1) */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 1);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
+
+ /* detach a non existing device (1) */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 1);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
+
+ /* move ep0 from domain 0 to domain 1 */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
+ g_assert_cmpint(ret, ==, 0);
+
+ /* detach ep0 to domain 0 */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 0);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL);
+
+ /* detach ep0 from domain 1 */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0);
+ g_assert_cmpint(ret, ==, 0);
+
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
+ g_assert_cmpint(ret, ==, 0);
+ ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000,
+ VIRTIO_IOMMU_MAP_F_READ);
+ g_assert_cmpint(ret, ==, 0);
+ ret = send_map(qts, v_iommu, 1, 0x2000, 0x2FFF, 0xb1000,
+ VIRTIO_IOMMU_MAP_F_READ);
+ g_assert_cmpint(ret, ==, 0);
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0);
+ g_assert_cmpint(ret, ==, 0);
+}
+
+static void test_map_unmap(void *obj, void *data, QGuestAllocator *t_alloc)
+{
+ QVirtioIOMMU *v_iommu = obj;
+ QTestState *qts = global_qtest;
+ int ret;
+
+ alloc = t_alloc;
+
+ /* attach ep0 to domain 1 */
+ ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0);
+ g_assert_cmpint(ret, ==, 0);
+
+ ret = send_map(qts, v_iommu, 0, 0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
+
+ /* domain, virt start, virt end, phys start, flags */
+ ret = send_map(qts, v_iommu, 1, 0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
+ g_assert_cmpint(ret, ==, 0);
+
+ ret = send_unmap(qts, v_iommu, 4, 0x10, 0xFFF);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT);
+
+ ret = send_unmap(qts, v_iommu, 1, 0x10, 0xFFF);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE);
+
+ ret = send_unmap(qts, v_iommu, 1, 0, 0x1000);
+ g_assert_cmpint(ret, ==, 0); /* unmap everything */
+
+ /* Spec example sequence */
+
+ /* 1 */
+ ret = send_unmap(qts, v_iommu, 1, 0, 4);
+ g_assert_cmpint(ret, ==, 0); /* doesn't unmap anything */
+
+ /* 2 */
+ send_map(qts, v_iommu, 1, 0, 9, 0xa1000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 9);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,9] */
+
+ /* 3 */
+ send_map(qts, v_iommu, 1, 0, 4, 0xb1000, VIRTIO_IOMMU_MAP_F_READ);
+ send_map(qts, v_iommu, 1, 5, 9, 0xb2000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 9);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [5,9] */
+
+ /* 4 */
+ send_map(qts, v_iommu, 1, 0, 9, 0xc1000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 4);
+ g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); /* doesn't unmap anything */
+
+ ret = send_unmap(qts, v_iommu, 1, 0, 10);
+ g_assert_cmpint(ret, ==, 0);
+
+ /* 5 */
+ send_map(qts, v_iommu, 1, 0, 4, 0xd1000, VIRTIO_IOMMU_MAP_F_READ);
+ send_map(qts, v_iommu, 1, 5, 9, 0xd2000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 4);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */
+
+ ret = send_unmap(qts, v_iommu, 1, 5, 9);
+ g_assert_cmpint(ret, ==, 0);
+
+ /* 6 */
+ send_map(qts, v_iommu, 1, 0, 4, 0xe2000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 9);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */
+
+ /* 7 */
+ send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ);
+ send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 14);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [10,14] */
+
+ send_unmap(qts, v_iommu, 1, 0, 100);
+ send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ);
+ send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ);
+ ret = send_unmap(qts, v_iommu, 1, 0, 4);
+ g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [10,14] */
+}
+
+static void register_virtio_iommu_test(void)
+{
+ qos_add_test("hotplug", "virtio-iommu-pci", iommu_hotplug, NULL);
+ qos_add_test("config", "virtio-iommu", pci_config, NULL);
+ qos_add_test("attach_detach", "virtio-iommu", test_attach_detach, NULL);
+ qos_add_test("map_unmap", "virtio-iommu", test_map_unmap, NULL);
+}
+
+libqos_init(register_virtio_iommu_test);
--
2.20.1
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [PATCH v12 00/13] VIRTIO-IOMMU device
2020-01-09 14:43 [PATCH v12 00/13] VIRTIO-IOMMU device Eric Auger
` (12 preceding siblings ...)
2020-01-09 14:43 ` [PATCH v12 13/13] tests: Add virtio-iommu test Eric Auger
@ 2020-01-09 15:12 ` no-reply
2020-01-09 16:07 ` Auger Eric
13 siblings, 1 reply; 39+ messages in thread
From: no-reply @ 2020-01-09 15:12 UTC (permalink / raw)
To: eric.auger
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
qemu-devel, peterx, dgilbert, eric.auger, bharatb.linux,
qemu-arm, mst, eric.auger.pro
Patchew URL: https://patchew.org/QEMU/20200109144319.15912-1-eric.auger@redhat.com/
Hi,
This series failed the docker-quick@centos7 build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.
=== TEST SCRIPT BEGIN ===
#!/bin/bash
make docker-image-centos7 V=1 NETWORK=1
time make docker-test-quick@centos7 SHOW_ENV=1 J=14 NETWORK=1
=== TEST SCRIPT END ===
TEST check-qtest-x86_64: tests/test-hmp
TEST check-qtest-x86_64: tests/qos-test
**
ERROR:/tmp/qemu-test/src/tests/virtio-iommu-test.c:46:pci_config: assertion failed (probe_size == 0x200): (0x00000000 == 0x00000200)
ERROR - Bail out! ERROR:/tmp/qemu-test/src/tests/virtio-iommu-test.c:46:pci_config: assertion failed (probe_size == 0x200): (0x00000000 == 0x00000200)
make: *** [check-qtest-x86_64] Error 1
make: *** Waiting for unfinished jobs....
TEST check-qtest-aarch64: tests/test-hmp
TEST iotest-qcow2: 220
---
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['sudo', '-n', 'docker', 'run', '--label', 'com.qemu.instance.uuid=de2154161ccf4468af5ebd35cbcbdc03', '-u', '1003', '--security-opt', 'seccomp=unconfined', '--rm', '-e', 'TARGET_LIST=', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=14', '-e', 'DEBUG=', '-e', 'SHOW_ENV=1', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/home/patchew2/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-_ap6ulch/src/docker-src.2020-01-09-10.01.05.32472:/var/tmp/qemu:z,ro', 'qemu:centos7', '/var/tmp/qemu/run', 'test-quick']' returned non-zero exit status 2.
filter=--filter=label=com.qemu.instance.uuid=de2154161ccf4468af5ebd35cbcbdc03
make[1]: *** [docker-run] Error 1
make[1]: Leaving directory `/var/tmp/patchew-tester-tmp-_ap6ulch/src'
make: *** [docker-run-test-quick@centos7] Error 2
real 11m50.898s
user 0m8.023s
The full log is available at
http://patchew.org/logs/20200109144319.15912-1-eric.auger@redhat.com/testing.docker-quick@centos7/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [PATCH v12 00/13] VIRTIO-IOMMU device
2020-01-09 15:12 ` [PATCH v12 00/13] VIRTIO-IOMMU device no-reply
@ 2020-01-09 16:07 ` Auger Eric
0 siblings, 0 replies; 39+ messages in thread
From: Auger Eric @ 2020-01-09 16:07 UTC (permalink / raw)
To: qemu-devel
Cc: peter.maydell, kevin.tian, tnowicki, jean-philippe, quintela,
dgilbert, peterx, bharatb.linux, qemu-arm, mst, eric.auger.pro
Hi,
On 1/9/20 4:12 PM, no-reply@patchew.org wrote:
> Patchew URL: https://patchew.org/QEMU/20200109144319.15912-1-eric.auger@redhat.com/
>
>
>
> Hi,
>
> This series failed the docker-quick@centos7 build test. Please find the testing commands and
> their output below. If you have Docker installed, you can probably reproduce it
> locally.
>
> === TEST SCRIPT BEGIN ===
> #!/bin/bash
> make docker-image-centos7 V=1 NETWORK=1
> time make docker-test-quick@centos7 SHOW_ENV=1 J=14 NETWORK=1
> === TEST SCRIPT END ===
>
> TEST check-qtest-x86_64: tests/test-hmp
> TEST check-qtest-x86_64: tests/qos-test
> **
> ERROR:/tmp/qemu-test/src/tests/virtio-iommu-test.c:46:pci_config: assertion failed (probe_size == 0x200): (0x00000000 == 0x00000200)
> ERROR - Bail out! ERROR:/tmp/qemu-test/src/tests/virtio-iommu-test.c:46:pci_config: assertion failed (probe_size == 0x200): (0x00000000 == 0x00000200)
OK sorry that's because I eventually removed "virtio-iommu: Implement
probe request" patch from the sent series. I will remove that probe_size
field test on next round.
Thanks
Eric
> make: *** [check-qtest-x86_64] Error 1
> make: *** Waiting for unfinished jobs....
> TEST check-qtest-aarch64: tests/test-hmp
> TEST iotest-qcow2: 220
> ---
> raise CalledProcessError(retcode, cmd)
> subprocess.CalledProcessError: Command '['sudo', '-n', 'docker', 'run', '--label', 'com.qemu.instance.uuid=de2154161ccf4468af5ebd35cbcbdc03', '-u', '1003', '--security-opt', 'seccomp=unconfined', '--rm', '-e', 'TARGET_LIST=', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=14', '-e', 'DEBUG=', '-e', 'SHOW_ENV=1', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/home/patchew2/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-_ap6ulch/src/docker-src.2020-01-09-10.01.05.32472:/var/tmp/qemu:z,ro', 'qemu:centos7', '/var/tmp/qemu/run', 'test-quick']' returned non-zero exit status 2.
> filter=--filter=label=com.qemu.instance.uuid=de2154161ccf4468af5ebd35cbcbdc03
> make[1]: *** [docker-run] Error 1
> make[1]: Leaving directory `/var/tmp/patchew-tester-tmp-_ap6ulch/src'
> make: *** [docker-run-test-quick@centos7] Error 2
>
> real 11m50.898s
> user 0m8.023s
>
>
> The full log is available at
> http://patchew.org/logs/20200109144319.15912-1-eric.auger@redhat.com/testing.docker-quick@centos7/?type=message.
> ---
> Email generated automatically by Patchew [https://patchew.org/].
> Please send your feedback to patchew-devel@redhat.com
>
^ permalink raw reply [flat|nested] 39+ messages in thread