All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andi Kleen <ak@linux.intel.com>
To: mst@redhat.com
Cc: jasowang@redhat.com, virtualization@lists.linux-foundation.org,
	hch@lst.de, m.szyprowski@samsung.com, robin.murphy@arm.com,
	iommu@lists.linux-foundation.org, x86@kernel.org,
	sathyanarayanan.kuppuswamy@linux.intel.com, jpoimboe@redhat.com,
	linux-kernel@vger.kernel.org, Andi Kleen <ak@linux.intel.com>
Subject: [PATCH v1 2/8] virtio: Add boundary checks to virtio ring
Date: Wed,  2 Jun 2021 17:41:27 -0700	[thread overview]
Message-ID: <20210603004133.4079390-3-ak@linux.intel.com> (raw)
In-Reply-To: <20210603004133.4079390-1-ak@linux.intel.com>

In protected guest mode we don't trust the host.

This means we need to make sure the host cannot subvert us through
virtio communication. In general it can corrupt our virtio data
and cause a DOS, but it should not be able to access any data
that is not explicitely under IO.

Also boundary checking so that the free list (which is accessible
to the host) cannot point outside the virtio ring. Note it could
still contain loops or similar, but these should only cause an DOS,
not a memory corruption or leak.

When we detect any out of bounds descriptor trigger an IO error.
We also use a WARN() (in case it was a software bug instead of
an attack). This implies that a malicious host can flood
the guest kernel log, but that's only a DOS and acceptable
in the threat model.

This patch only hardens the initial consumption of the free list,
the freeing comes later.

Any of these errors can cause DMA memory leaks, but there is nothing
we can do about that and that would be just a DOS.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 drivers/virtio/virtio_ring.c | 46 ++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f35629fa47b1..d37ff5a0ff58 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -413,6 +413,15 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
 	return desc;
 }
 
+/* assumes no indirect mode */
+static inline bool inside_split_ring(struct vring_virtqueue *vq,
+				     unsigned index)
+{
+	return !WARN(index >= vq->split.vring.num,
+		    "desc index %u out of bounds (%u)\n",
+		    index, vq->split.vring.num);
+}
+
 static inline int virtqueue_add_split(struct virtqueue *_vq,
 				      struct scatterlist *sgs[],
 				      unsigned int total_sg,
@@ -428,6 +437,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	unsigned int i, n, avail, descs_used, prev, err_idx;
 	int head;
 	bool indirect;
+	int io_err;
 
 	START_USE(vq);
 
@@ -481,7 +491,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
 	for (n = 0; n < out_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -494,7 +510,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	}
 	for (; n < (out_sgs + in_sgs); n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -513,6 +535,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		dma_addr_t addr = vring_map_single(
 			vq, desc, total_sg * sizeof(struct vring_desc),
 			DMA_TO_DEVICE);
+		io_err = -ENOMEM;
 		if (vring_mapping_error(vq, addr))
 			goto unmap_release;
 
@@ -528,6 +551,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* We're using some buffers from the free list. */
 	vq->vq.num_free -= descs_used;
 
+	io_err = -EIO;
+	if (!inside_split_ring(vq, head))
+		goto unmap_release;
+
 	/* Update free pointer */
 	if (indirect)
 		vq->free_head = virtio16_to_cpu(_vq->vdev,
@@ -545,6 +572,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* Put entry in available array (but don't update avail->idx until they
 	 * do sync). */
 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
+
+	if (avail >= vq->split.vring.num)
+		goto unmap_release;
+
 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
 
 	/* Descriptors and available array need to be set before we expose the
@@ -576,6 +607,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	for (n = 0; n < total_sg; n++) {
 		if (i == err_idx)
 			break;
+		if (!inside_split_ring(vq, i))
+			break;
 		vring_unmap_one_split(vq, &desc[i]);
 		i = virtio16_to_cpu(_vq->vdev, desc[i].next);
 	}
@@ -584,7 +617,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		kfree(desc);
 
 	END_USE(vq);
-	return -ENOMEM;
+	return io_err;
 }
 
 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1146,7 +1179,12 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
 	c = 0;
 	for (n = 0; n < out_sgs + in_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
+			dma_addr_t addr;
+
+			if (curr >= vq->packed.vring.num)
+				goto unmap_release;
+
+			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
-- 
2.25.4


WARNING: multiple messages have this Message-ID (diff)
From: Andi Kleen <ak@linux.intel.com>
To: mst@redhat.com
Cc: Andi Kleen <ak@linux.intel.com>,
	jasowang@redhat.com, x86@kernel.org,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	iommu@lists.linux-foundation.org, jpoimboe@redhat.com,
	robin.murphy@arm.com, hch@lst.de
Subject: [PATCH v1 2/8] virtio: Add boundary checks to virtio ring
Date: Wed,  2 Jun 2021 17:41:27 -0700	[thread overview]
Message-ID: <20210603004133.4079390-3-ak@linux.intel.com> (raw)
In-Reply-To: <20210603004133.4079390-1-ak@linux.intel.com>

In protected guest mode we don't trust the host.

This means we need to make sure the host cannot subvert us through
virtio communication. In general it can corrupt our virtio data
and cause a DOS, but it should not be able to access any data
that is not explicitely under IO.

Also boundary checking so that the free list (which is accessible
to the host) cannot point outside the virtio ring. Note it could
still contain loops or similar, but these should only cause an DOS,
not a memory corruption or leak.

When we detect any out of bounds descriptor trigger an IO error.
We also use a WARN() (in case it was a software bug instead of
an attack). This implies that a malicious host can flood
the guest kernel log, but that's only a DOS and acceptable
in the threat model.

This patch only hardens the initial consumption of the free list,
the freeing comes later.

Any of these errors can cause DMA memory leaks, but there is nothing
we can do about that and that would be just a DOS.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 drivers/virtio/virtio_ring.c | 46 ++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f35629fa47b1..d37ff5a0ff58 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -413,6 +413,15 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
 	return desc;
 }
 
+/* assumes no indirect mode */
+static inline bool inside_split_ring(struct vring_virtqueue *vq,
+				     unsigned index)
+{
+	return !WARN(index >= vq->split.vring.num,
+		    "desc index %u out of bounds (%u)\n",
+		    index, vq->split.vring.num);
+}
+
 static inline int virtqueue_add_split(struct virtqueue *_vq,
 				      struct scatterlist *sgs[],
 				      unsigned int total_sg,
@@ -428,6 +437,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	unsigned int i, n, avail, descs_used, prev, err_idx;
 	int head;
 	bool indirect;
+	int io_err;
 
 	START_USE(vq);
 
@@ -481,7 +491,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
 	for (n = 0; n < out_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -494,7 +510,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	}
 	for (; n < (out_sgs + in_sgs); n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -513,6 +535,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		dma_addr_t addr = vring_map_single(
 			vq, desc, total_sg * sizeof(struct vring_desc),
 			DMA_TO_DEVICE);
+		io_err = -ENOMEM;
 		if (vring_mapping_error(vq, addr))
 			goto unmap_release;
 
@@ -528,6 +551,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* We're using some buffers from the free list. */
 	vq->vq.num_free -= descs_used;
 
+	io_err = -EIO;
+	if (!inside_split_ring(vq, head))
+		goto unmap_release;
+
 	/* Update free pointer */
 	if (indirect)
 		vq->free_head = virtio16_to_cpu(_vq->vdev,
@@ -545,6 +572,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* Put entry in available array (but don't update avail->idx until they
 	 * do sync). */
 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
+
+	if (avail >= vq->split.vring.num)
+		goto unmap_release;
+
 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
 
 	/* Descriptors and available array need to be set before we expose the
@@ -576,6 +607,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	for (n = 0; n < total_sg; n++) {
 		if (i == err_idx)
 			break;
+		if (!inside_split_ring(vq, i))
+			break;
 		vring_unmap_one_split(vq, &desc[i]);
 		i = virtio16_to_cpu(_vq->vdev, desc[i].next);
 	}
@@ -584,7 +617,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		kfree(desc);
 
 	END_USE(vq);
-	return -ENOMEM;
+	return io_err;
 }
 
 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1146,7 +1179,12 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
 	c = 0;
 	for (n = 0; n < out_sgs + in_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
+			dma_addr_t addr;
+
+			if (curr >= vq->packed.vring.num)
+				goto unmap_release;
+
+			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
-- 
2.25.4

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Andi Kleen <ak@linux.intel.com>
To: mst@redhat.com
Cc: sathyanarayanan.kuppuswamy@linux.intel.com,
	Andi Kleen <ak@linux.intel.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	iommu@lists.linux-foundation.org, jpoimboe@redhat.com,
	robin.murphy@arm.com, hch@lst.de, m.szyprowski@samsung.com
Subject: [PATCH v1 2/8] virtio: Add boundary checks to virtio ring
Date: Wed,  2 Jun 2021 17:41:27 -0700	[thread overview]
Message-ID: <20210603004133.4079390-3-ak@linux.intel.com> (raw)
In-Reply-To: <20210603004133.4079390-1-ak@linux.intel.com>

In protected guest mode we don't trust the host.

This means we need to make sure the host cannot subvert us through
virtio communication. In general it can corrupt our virtio data
and cause a DOS, but it should not be able to access any data
that is not explicitely under IO.

Also boundary checking so that the free list (which is accessible
to the host) cannot point outside the virtio ring. Note it could
still contain loops or similar, but these should only cause an DOS,
not a memory corruption or leak.

When we detect any out of bounds descriptor trigger an IO error.
We also use a WARN() (in case it was a software bug instead of
an attack). This implies that a malicious host can flood
the guest kernel log, but that's only a DOS and acceptable
in the threat model.

This patch only hardens the initial consumption of the free list,
the freeing comes later.

Any of these errors can cause DMA memory leaks, but there is nothing
we can do about that and that would be just a DOS.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 drivers/virtio/virtio_ring.c | 46 ++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f35629fa47b1..d37ff5a0ff58 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -413,6 +413,15 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
 	return desc;
 }
 
+/* assumes no indirect mode */
+static inline bool inside_split_ring(struct vring_virtqueue *vq,
+				     unsigned index)
+{
+	return !WARN(index >= vq->split.vring.num,
+		    "desc index %u out of bounds (%u)\n",
+		    index, vq->split.vring.num);
+}
+
 static inline int virtqueue_add_split(struct virtqueue *_vq,
 				      struct scatterlist *sgs[],
 				      unsigned int total_sg,
@@ -428,6 +437,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	unsigned int i, n, avail, descs_used, prev, err_idx;
 	int head;
 	bool indirect;
+	int io_err;
 
 	START_USE(vq);
 
@@ -481,7 +491,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 
 	for (n = 0; n < out_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -494,7 +510,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	}
 	for (; n < (out_sgs + in_sgs); n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+			dma_addr_t addr;
+
+			io_err = -EIO;
+			if (!inside_split_ring(vq, i))
+				goto unmap_release;
+			io_err = -ENOMEM;
+			addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
 
@@ -513,6 +535,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		dma_addr_t addr = vring_map_single(
 			vq, desc, total_sg * sizeof(struct vring_desc),
 			DMA_TO_DEVICE);
+		io_err = -ENOMEM;
 		if (vring_mapping_error(vq, addr))
 			goto unmap_release;
 
@@ -528,6 +551,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* We're using some buffers from the free list. */
 	vq->vq.num_free -= descs_used;
 
+	io_err = -EIO;
+	if (!inside_split_ring(vq, head))
+		goto unmap_release;
+
 	/* Update free pointer */
 	if (indirect)
 		vq->free_head = virtio16_to_cpu(_vq->vdev,
@@ -545,6 +572,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	/* Put entry in available array (but don't update avail->idx until they
 	 * do sync). */
 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
+
+	if (avail >= vq->split.vring.num)
+		goto unmap_release;
+
 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
 
 	/* Descriptors and available array need to be set before we expose the
@@ -576,6 +607,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 	for (n = 0; n < total_sg; n++) {
 		if (i == err_idx)
 			break;
+		if (!inside_split_ring(vq, i))
+			break;
 		vring_unmap_one_split(vq, &desc[i]);
 		i = virtio16_to_cpu(_vq->vdev, desc[i].next);
 	}
@@ -584,7 +617,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
 		kfree(desc);
 
 	END_USE(vq);
-	return -ENOMEM;
+	return io_err;
 }
 
 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
@@ -1146,7 +1179,12 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
 	c = 0;
 	for (n = 0; n < out_sgs + in_sgs; n++) {
 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
-			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
+			dma_addr_t addr;
+
+			if (curr >= vq->packed.vring.num)
+				goto unmap_release;
+
+			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
 			if (vring_mapping_error(vq, addr))
 				goto unmap_release;
-- 
2.25.4

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2021-06-03  0:41 UTC|newest]

Thread overview: 116+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-03  0:41 Virtio hardening for TDX Andi Kleen
2021-06-03  0:41 ` Andi Kleen
2021-06-03  0:41 ` Andi Kleen
2021-06-03  0:41 ` [PATCH v1 1/8] virtio: Force only split mode with protected guest Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  1:36   ` Jason Wang
2021-06-03  1:36     ` Jason Wang
2021-06-03  1:36     ` Jason Wang
2021-06-03  1:48     ` Andi Kleen
2021-06-03  1:48       ` Andi Kleen
2021-06-03  1:48       ` Andi Kleen
2021-06-03  2:32       ` Jason Wang
2021-06-03  2:32         ` Jason Wang
2021-06-03  2:32         ` Jason Wang
2021-06-03  2:56         ` Andi Kleen
2021-06-03  2:56           ` Andi Kleen
2021-06-03  2:56           ` Andi Kleen
2021-06-03  3:02           ` Jason Wang
2021-06-03  3:02             ` Jason Wang
2021-06-03  3:02             ` Jason Wang
2021-06-03 13:55             ` Andi Kleen
2021-06-03 13:55               ` Andi Kleen
2021-06-03 13:55               ` Andi Kleen
2021-06-04  2:29               ` Jason Wang
2021-06-04  2:29                 ` Jason Wang
2021-06-04  2:29                 ` Jason Wang
2021-06-03 17:33   ` Andy Lutomirski
2021-06-03 17:33     ` Andy Lutomirski
2021-06-03 17:33     ` Andy Lutomirski
2021-06-03 18:00     ` Andi Kleen
2021-06-03 18:00       ` Andi Kleen
2021-06-03 18:00       ` Andi Kleen
2021-06-03 19:31       ` Andy Lutomirski
2021-06-03 19:31         ` Andy Lutomirski
2021-06-03 19:31         ` Andy Lutomirski
2021-06-03 19:53         ` Andi Kleen
2021-06-03 19:53           ` Andi Kleen
2021-06-03 19:53           ` Andi Kleen
2021-06-03 22:17           ` Andy Lutomirski
2021-06-03 22:17             ` Andy Lutomirski
2021-06-03 22:17             ` Andy Lutomirski
2021-06-03 23:32             ` Andi Kleen
2021-06-03 23:32               ` Andi Kleen
2021-06-03 23:32               ` Andi Kleen
2021-06-04  1:46               ` Andy Lutomirski
2021-06-04  1:46                 ` Andy Lutomirski
2021-06-04  1:46                 ` Andy Lutomirski
2021-06-04  1:54                 ` Andi Kleen
2021-06-04  1:54                   ` Andi Kleen
2021-06-04  1:54                   ` Andi Kleen
2021-06-04  1:22         ` Jason Wang
2021-06-04  1:22           ` Jason Wang
2021-06-04  1:22           ` Jason Wang
2021-06-04  1:29       ` Jason Wang
2021-06-04  1:29         ` Jason Wang
2021-06-04  1:29         ` Jason Wang
2021-06-04  2:20     ` Jason Wang
2021-06-04  2:20       ` Jason Wang
2021-06-04  2:20       ` Jason Wang
2021-06-03  0:41 ` Andi Kleen [this message]
2021-06-03  0:41   ` [PATCH v1 2/8] virtio: Add boundary checks to virtio ring Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  2:14   ` Jason Wang
2021-06-03  2:14     ` Jason Wang
2021-06-03  2:14     ` Jason Wang
2021-06-03  2:18     ` Andi Kleen
2021-06-03  2:18       ` Andi Kleen
2021-06-03  2:18       ` Andi Kleen
2021-06-03  2:36       ` Jason Wang
2021-06-03  2:36         ` Jason Wang
2021-06-03  2:36         ` Jason Wang
2021-06-03  0:41 ` [PATCH v1 3/8] virtio: Harden split buffer detachment Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  2:29   ` Jason Wang
2021-06-03  2:29     ` Jason Wang
2021-06-03  2:29     ` Jason Wang
2021-06-03  0:41 ` [PATCH v1 4/8] x86/tdx: Add arch_has_restricted_memory_access for TDX Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  4:02   ` Kuppuswamy, Sathyanarayanan
2021-06-03  4:02     ` Kuppuswamy, Sathyanarayanan
2021-06-03  0:41 ` [PATCH v1 5/8] dma: Use size for swiotlb boundary checks Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  1:48   ` Konrad Rzeszutek Wilk
2021-06-03  1:48     ` Konrad Rzeszutek Wilk
2021-06-03  1:48     ` Konrad Rzeszutek Wilk
2021-06-03  2:03     ` Andi Kleen
2021-06-03  2:03       ` Andi Kleen
2021-06-03  2:03       ` Andi Kleen
2021-06-03  9:09   ` Robin Murphy
2021-06-03  9:09     ` Robin Murphy
2021-06-03  9:09     ` Robin Murphy
2021-06-03  0:41 ` [PATCH v1 6/8] dma: Add return value to dma_unmap_page Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  9:08   ` Robin Murphy
2021-06-03  9:08     ` Robin Murphy
2021-06-03  9:08     ` Robin Murphy
2021-06-03 12:36     ` Andi Kleen
2021-06-03 12:36       ` Andi Kleen
2021-06-03 12:36       ` Andi Kleen
2021-06-03  0:41 ` [PATCH v1 7/8] virtio: Abort IO when descriptor points outside forced swiotlb Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41 ` [PATCH v1 8/8] virtio: Error out on endless free lists Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  0:41   ` Andi Kleen
2021-06-03  1:34 ` Virtio hardening for TDX Jason Wang
2021-06-03  1:34   ` Jason Wang
2021-06-03  1:34   ` Jason Wang
2021-06-03  1:56   ` Andi Kleen
2021-06-03  1:56     ` Andi Kleen
2021-06-03  1:56     ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210603004133.4079390-3-ak@linux.intel.com \
    --to=ak@linux.intel.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jasowang@redhat.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mst@redhat.com \
    --cc=robin.murphy@arm.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.