All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: Resize kvm_io_bus_range array dynamically
@ 2012-02-29  5:24 Amos Kong
  2012-02-29  5:50 ` [PATCH v2] KVM: Resize kvm_io_range " Amos Kong
                   ` (2 more replies)
  0 siblings, 3 replies; 28+ messages in thread
From: Amos Kong @ 2012-02-29  5:24 UTC (permalink / raw)
  To: jasowang, alex.williamson, mtosatti, avi, kvm

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
each zone has an iobus devices.

This patch makes the kvm_io_bus_range array can be resized dynamically.

Signed-off-by: Amos Kong <akong@redhat.com>
CC: Alex Williamson <alex.williamson@redhat.com>
---
 include/linux/kvm_host.h |    3 +--
 virt/kvm/kvm_main.c      |   24 +++++++++++++++---------
 2 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..0e6d9d2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -69,8 +69,7 @@ struct kvm_io_range {
 
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..a6b9445 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,10 +2488,12 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
-		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (new_bus)
+		memcpy(new_bus, bus, sizeof(struct kvm_io_bus) +
+		       (bus->dev_count * sizeof(struct kvm_io_range)));
 	if (!new_bus)
 		return -ENOMEM;
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
@@ -2514,16 +2513,23 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
 	bus = kvm->buses[bus_idx];
 
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
 
 	r = -ENOENT;
 	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+		if (i == bus->dev_count - 1) {
+			/* dev is the last item of bus->range array,
+			   and new_bus->range doesn't have this item. */
+			r = 0;
+			new_bus->dev_count--;
+			break;
+		} else if (new_bus->range[i].dev == dev) {
 			r = 0;
 			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
+			new_bus->range[i] = bus->range[new_bus->dev_count];
 			sort(new_bus->range, new_bus->dev_count,
 			     sizeof(struct kvm_io_range),
 			     kvm_io_bus_sort_cmp, NULL);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v2] KVM: Resize kvm_io_range array dynamically
  2012-02-29  5:24 [PATCH] KVM: Resize kvm_io_bus_range array dynamically Amos Kong
@ 2012-02-29  5:50 ` Amos Kong
  2012-02-29 13:30 ` [PATCH v3] " Amos Kong
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
  2 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-02-29  5:50 UTC (permalink / raw)
  To: kvm, jasowang, mtosatti, alex.williamson, avi, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
each zone has an iobus devices.

This patch makes the kvm_io_range array can be resized dynamically.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Signed-off-by: Amos Kong <akong@redhat.com>
CC: Alex Williamson <alex.williamson@redhat.com>
---
 include/linux/kvm_host.h |    3 +--
 virt/kvm/kvm_main.c      |   24 +++++++++++++++---------
 2 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..0e6d9d2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -69,8 +69,7 @@ struct kvm_io_range {
 
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..a6b9445 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,10 +2488,12 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
-		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (new_bus)
+		memcpy(new_bus, bus, sizeof(struct kvm_io_bus) +
+		       (bus->dev_count * sizeof(struct kvm_io_range)));
 	if (!new_bus)
 		return -ENOMEM;
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
@@ -2514,16 +2513,23 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
 	bus = kvm->buses[bus_idx];
 
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
 
 	r = -ENOENT;
 	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+		if (i == bus->dev_count - 1) {
+			/* dev is the last item of bus->range array,
+			   and new_bus->range doesn't have this item. */
+			r = 0;
+			new_bus->dev_count--;
+			break;
+		} else if (new_bus->range[i].dev == dev) {
 			r = 0;
 			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
+			new_bus->range[i] = bus->range[new_bus->dev_count];
 			sort(new_bus->range, new_bus->dev_count,
 			     sizeof(struct kvm_io_range),
 			     kvm_io_bus_sort_cmp, NULL);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29  5:24 [PATCH] KVM: Resize kvm_io_bus_range array dynamically Amos Kong
  2012-02-29  5:50 ` [PATCH v2] KVM: Resize kvm_io_range " Amos Kong
@ 2012-02-29 13:30 ` Amos Kong
  2012-02-29 14:19   ` Jan Kiszka
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
  2 siblings, 1 reply; 28+ messages in thread
From: Amos Kong @ 2012-02-29 13:30 UTC (permalink / raw)
  To: kvm, jasowang, mtosatti, alex.williamson, avi, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
each zone has an iobus devices. 300 io_bus devices is not enough.

This patch makes the kvm_io_range array can be resized dynamically.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Changes from v2:
- unregister device only when it exists

Signed-off-by: Amos Kong <akong@redhat.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
CC: Alex Williamson <alex.williamson@redhat.com>
---
 include/linux/kvm_host.h |    3 +--
 virt/kvm/kvm_main.c      |   41 +++++++++++++++++++++--------------------
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..0e6d9d2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -69,8 +69,7 @@ struct kvm_io_range {
 
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..1275979 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,10 +2488,12 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
-		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (new_bus)
+		memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+		       sizeof(struct kvm_io_range)));
 	if (!new_bus)
 		return -ENOMEM;
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
@@ -2513,26 +2512,28 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
+
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	new_bus->dev_count--;
+	/* copy last entry of bus->range to deleted entry spot if
+	   deleted entry isn't the last entry of bus->range */
+	if (i != bus->dev_count - 1) {
+		new_bus->range[i] = bus->range[bus->dev_count - 1];
+		sort(new_bus->range, new_bus->dev_count,
+		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
 	}
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29 13:30 ` [PATCH v3] " Amos Kong
@ 2012-02-29 14:19   ` Jan Kiszka
  2012-02-29 15:22     ` Amos Kong
  0 siblings, 1 reply; 28+ messages in thread
From: Jan Kiszka @ 2012-02-29 14:19 UTC (permalink / raw)
  To: Amos Kong; +Cc: kvm, jasowang, mtosatti, alex.williamson, avi, levinsasha928

On 2012-02-29 14:30, Amos Kong wrote:
> kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
> coalesced_mmio.
> 
> Currently Qemu only emulates one PCI bus, it contains 32 slots,
> one slot contains 8 functions, maximum of supported PCI devices:
>  1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
> each zone has an iobus devices. 300 io_bus devices is not enough.
> 
> This patch makes the kvm_io_range array can be resized dynamically.

Is there any limit, or can userspace allocate arbitrary amounts of
kernel memory this way?

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29 14:19   ` Jan Kiszka
@ 2012-02-29 15:22     ` Amos Kong
  2012-02-29 15:29       ` Jan Kiszka
  0 siblings, 1 reply; 28+ messages in thread
From: Amos Kong @ 2012-02-29 15:22 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: kvm, jasowang, mtosatti, alex williamson, avi, levinsasha928

----- Original Message -----
> On 2012-02-29 14:30, Amos Kong wrote:
> > kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
> > coalesced_mmio.
> > 
> > Currently Qemu only emulates one PCI bus, it contains 32 slots,
> > one slot contains 8 functions, maximum of supported PCI devices:
> >  1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
> > each zone has an iobus devices. 300 io_bus devices is not enough.
> > 
> > This patch makes the kvm_io_range array can be resized dynamically.
> 
> Is there any limit, or can userspace allocate arbitrary amounts of
> kernel memory this way?

Hi Jan,

There is a fixed array in linux-2.6/include/linux/kvm_host.h,
we can only register 300 devices.

struct kvm_io_range {
        gpa_t addr;               
        int len;
        struct kvm_io_device *dev;
};

struct kvm_io_bus {
        int                   dev_count;
#define NR_IOBUS_DEVS 300
        struct kvm_io_range range[NR_IOBUS_DEVS];
};
                                  ^^^^^^^^^^^^^^

Amos.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29 15:22     ` Amos Kong
@ 2012-02-29 15:29       ` Jan Kiszka
  2012-02-29 16:34         ` Amos Kong
  0 siblings, 1 reply; 28+ messages in thread
From: Jan Kiszka @ 2012-02-29 15:29 UTC (permalink / raw)
  To: Amos Kong; +Cc: kvm, jasowang, mtosatti, alex williamson, avi, levinsasha928

On 2012-02-29 16:22, Amos Kong wrote:
> ----- Original Message -----
>> On 2012-02-29 14:30, Amos Kong wrote:
>>> kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
>>> coalesced_mmio.
>>>
>>> Currently Qemu only emulates one PCI bus, it contains 32 slots,
>>> one slot contains 8 functions, maximum of supported PCI devices:
>>>  1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
>>> each zone has an iobus devices. 300 io_bus devices is not enough.
>>>
>>> This patch makes the kvm_io_range array can be resized dynamically.
>>
>> Is there any limit, or can userspace allocate arbitrary amounts of
>> kernel memory this way?
> 
> Hi Jan,
> 
> There is a fixed array in linux-2.6/include/linux/kvm_host.h,
> we can only register 300 devices.
> 
> struct kvm_io_range {
>         gpa_t addr;               
>         int len;
>         struct kvm_io_device *dev;
> };
> 
> struct kvm_io_bus {
>         int                   dev_count;
> #define NR_IOBUS_DEVS 300
>         struct kvm_io_range range[NR_IOBUS_DEVS];
> };
>                                   ^^^^^^^^^^^^^^

Right. But doesn't your patch remove precisely this limit? So what
limits userspace now? To register 300 million devices...?

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29 15:29       ` Jan Kiszka
@ 2012-02-29 16:34         ` Amos Kong
  2012-03-01  5:19           ` Amos Kong
  0 siblings, 1 reply; 28+ messages in thread
From: Amos Kong @ 2012-02-29 16:34 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: kvm, jasowang, mtosatti, alex williamson, avi, levinsasha928

----- Original Message -----
> On 2012-02-29 16:22, Amos Kong wrote:
> > ----- Original Message -----
> >> On 2012-02-29 14:30, Amos Kong wrote:
> >>> kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
> >>> coalesced_mmio.
> >>>
> >>> Currently Qemu only emulates one PCI bus, it contains 32 slots,
> >>> one slot contains 8 functions, maximum of supported PCI devices:
> >>>  1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
> >>> each zone has an iobus devices. 300 io_bus devices is not enough.
> >>>
> >>> This patch makes the kvm_io_range array can be resized
> >>> dynamically.
> >>
> >> Is there any limit, or can userspace allocate arbitrary amounts of
> >> kernel memory this way?
> > 
> > Hi Jan,
> > 
> > There is a fixed array in linux-2.6/include/linux/kvm_host.h,
> > we can only register 300 devices.
> > 
> > struct kvm_io_range {
> >         gpa_t addr;
> >         int len;
> >         struct kvm_io_device *dev;
> > };
> > 
> > struct kvm_io_bus {
> >         int                   dev_count;
> > #define NR_IOBUS_DEVS 300
> >         struct kvm_io_range range[NR_IOBUS_DEVS];
> > };
> >                                   ^^^^^^^^^^^^^^
> 
> Right. But doesn't your patch remove precisely this limit? So what
> limits userspace now? To register 300 million devices...?

Hi Jan,

It seems we need to reserve the limitation in kvm_host.h

#define NR_IOBUS_DEVS 600

/* Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions. Only 29 slots can be used to
add multiple function devices. Maximum of supported PCI devices:
29 * 8 = 232. Each virtio-blk device needs 1 iobus device,
each virtio-net(vhost) device requires 2 such devices to service
notifications (ioevent) from rx/tx queues. 
The maximum of coalesced mmio zone is 100, each zone has an iobus
devices. ioevent, pit, ioapic take less iobus devices.

So we can set max limitation to 600. */

----- check limit when register dev ----

virt/kvm/kvm_main.c:

/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
                            int len, struct kvm_io_device *dev)
{
        struct kvm_io_bus *new_bus, *bus;

        bus = kvm->buses[bus_idx];
        if (bus->dev_count > NR_IOBUS_DEVS - 1)   // can only register 600 devices
                return -ENOSPC;

Amos.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3] KVM: Resize kvm_io_range array dynamically
  2012-02-29 16:34         ` Amos Kong
@ 2012-03-01  5:19           ` Amos Kong
  0 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-01  5:19 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: kvm, jasowang, mtosatti, alex williamson, avi, levinsasha928

On 01/03/12 00:34, Amos Kong wrote:
> ----- Original Message -----
>> On 2012-02-29 16:22, Amos Kong wrote:
>>> ----- Original Message -----
>>>> On 2012-02-29 14:30, Amos Kong wrote:
>>>>> kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
>>>>> coalesced_mmio.
>>>>>
>>>>> Currently Qemu only emulates one PCI bus, it contains 32 slots,
>>>>> one slot contains 8 functions, maximum of supported PCI devices:
>>>>>   1 * 32 * 8 = 256. The maximum of coalesced mmio zone is 100,
>>>>> each zone has an iobus devices. 300 io_bus devices is not enough.
>>>>>
>>>>> This patch makes the kvm_io_range array can be resized
>>>>> dynamically.
>>>>
>>>> Is there any limit, or can userspace allocate arbitrary amounts of
>>>> kernel memory this way?
>>>
>>> Hi Jan,
>>>
>>> There is a fixed array in linux-2.6/include/linux/kvm_host.h,
>>> we can only register 300 devices.
>>>
>>> struct kvm_io_range {
>>>          gpa_t addr;
>>>          int len;
>>>          struct kvm_io_device *dev;
>>> };
>>>
>>> struct kvm_io_bus {
>>>          int                   dev_count;
>>> #define NR_IOBUS_DEVS 300
>>>          struct kvm_io_range range[NR_IOBUS_DEVS];
>>> };
>>>                                    ^^^^^^^^^^^^^^
>>
>> Right. But doesn't your patch remove precisely this limit? So what
>> limits userspace now? To register 300 million devices...?
>
> Hi Jan,
>
> It seems we need to reserve the limitation in kvm_host.h
>
> #define NR_IOBUS_DEVS 600
>
> /* Currently Qemu only emulates one PCI bus, it contains 32 slots,
> one slot contains 8 functions. Only 29 slots can be used to
> add multiple function devices. Maximum of supported PCI devices:
> 29 * 8 = 232. Each virtio-blk device needs 1 iobus device,
> each virtio-net(vhost) device requires 2 such devices to service
> notifications (ioevent) from rx/tx queues.
> The maximum of coalesced mmio zone is 100, each zone has an iobus
> devices. ioevent, pit, ioapic take less iobus devices.
>
> So we can set max limitation to 600. */

One virtio-net(vhost=on) takes two iobus devices,
and it needs three IRQs for MSI/MIS-X.
I started a guest with 232 virtio-net(vhost=on),
guest IRQ 24 to 191 were used for virtio-config/input/output,
and only 56 virtio-nics' MSIX were enabled.
56 virtio-net(vhost=on) registered 56 * 2 = 112 iobus devices.

It's safe to set the limit to 300, right ?


> ----- check limit when register dev ----
>
> virt/kvm/kvm_main.c:
>
> /* Caller must hold slots_lock. */
> int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
>                              int len, struct kvm_io_device *dev)
> {
>          struct kvm_io_bus *new_bus, *bus;
>
>          bus = kvm->buses[bus_idx];
>          if (bus->dev_count>  NR_IOBUS_DEVS - 1)   // can only register 600 devices
>                  return -ENOSPC;
>
> Amos.

-- 
			Amos.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-02-29  5:24 [PATCH] KVM: Resize kvm_io_bus_range array dynamically Amos Kong
  2012-02-29  5:50 ` [PATCH v2] KVM: Resize kvm_io_range " Amos Kong
  2012-02-29 13:30 ` [PATCH v3] " Amos Kong
@ 2012-03-01  7:01 ` Amos Kong
  2012-03-01 10:14   ` Sasha Levin
                     ` (6 more replies)
  2 siblings, 7 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-01  7:01 UTC (permalink / raw)
  To: kvm, jasowang, mtosatti, alex.williamson, avi, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
one virtio-net(vhost=on) takes two iobus devices.
The maximum of coalesced mmio zone is 100, each zone
has an iobus devices. So 300 io_bus devices are not enough.

This patch makes the kvm_io_range array can be resized dynamically.
Set an upper bounds for kvm_io_range to limit userspace.
1000 is a very large limit and not bloat the typical user.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Changes from v2:
- unregister device only when it exists

Changes from v3:
- set upper bounds to limit userspace

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   41 ++++++++++++++++++++++-------------------
 2 files changed, 25 insertions(+), 21 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..24ee2db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,10 +67,11 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 1000
+
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..1baed68 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,10 +2488,14 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
+	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (new_bus)
+		memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+		       sizeof(struct kvm_io_range)));
 	if (!new_bus)
 		return -ENOMEM;
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
@@ -2513,26 +2514,28 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
+
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	new_bus->dev_count--;
+	/* copy last entry of bus->range to deleted entry spot if
+	   deleted entry isn't the last entry of bus->range */
+	if (i != bus->dev_count - 1) {
+		new_bus->range[i] = bus->range[bus->dev_count - 1];
+		sort(new_bus->range, new_bus->dev_count,
+		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
 	}
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
@ 2012-03-01 10:14   ` Sasha Levin
  2012-03-01 15:33     ` Alex Williamson
  2012-03-07 10:57   ` Avi Kivity
                     ` (5 subsequent siblings)
  6 siblings, 1 reply; 28+ messages in thread
From: Sasha Levin @ 2012-03-01 10:14 UTC (permalink / raw)
  To: Amos Kong; +Cc: kvm, jasowang, mtosatti, alex.williamson, avi

On Thu, Mar 1, 2012 at 9:01 AM, Amos Kong <akong@redhat.com> wrote:
> This patch makes the kvm_io_range array can be resized dynamically.
> Set an upper bounds for kvm_io_range to limit userspace.
> 1000 is a very large limit and not bloat the typical user.

Whats the reason for making everything dynamic? Memory savings there
aren't that significant.

If you want to make it more efficient just define:
static struct kvm_io_bus io_bus[2];

somewhere in kvm_main.c, and just switch between them when you need to
do insertion and removal of devices. You get the benefit of zero slub
usage, no allocations in any of the paths, and much simpler logic.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-03-01 10:14   ` Sasha Levin
@ 2012-03-01 15:33     ` Alex Williamson
  0 siblings, 0 replies; 28+ messages in thread
From: Alex Williamson @ 2012-03-01 15:33 UTC (permalink / raw)
  To: Sasha Levin; +Cc: Amos Kong, kvm, jasowang, mtosatti, avi

On Thu, 2012-03-01 at 12:14 +0200, Sasha Levin wrote:
> On Thu, Mar 1, 2012 at 9:01 AM, Amos Kong <akong@redhat.com> wrote:
> > This patch makes the kvm_io_range array can be resized dynamically.
> > Set an upper bounds for kvm_io_range to limit userspace.
> > 1000 is a very large limit and not bloat the typical user.
> 
> Whats the reason for making everything dynamic? Memory savings there
> aren't that significant.

We're currently looking at about 14k for these arrays with 300 entries
since we have two of them.  If we're going to double it or triple it to
handle the maximum use case, why impose that on the typical VM?  It may
not be multiple megabytes, but I wouldn't say it's insignificant either.

> 
> If you want to make it more efficient just define:
> static struct kvm_io_bus io_bus[2];
> 
> somewhere in kvm_main.c, and just switch between them when you need to
> do insertion and removal of devices. You get the benefit of zero slub
> usage, no allocations in any of the paths, and much simpler logic.

It's updated via rcu.  The change Amos is proposing is fairly trivial;
allocate the necessary size and memcpy instead of memdup.  Maybe it can
be optimized further, but this seems like a step in the right direction
to handle worst case use and, if anything, benefit the typical user too.
Thanks,

Alex


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
  2012-03-01 10:14   ` Sasha Levin
@ 2012-03-07 10:57   ` Avi Kivity
  2012-03-07 12:51     ` Amos Kong
  2012-03-07 13:16   ` [PATCH v5 1/2] KVM: resize " Amos Kong
                     ` (4 subsequent siblings)
  6 siblings, 1 reply; 28+ messages in thread
From: Avi Kivity @ 2012-03-07 10:57 UTC (permalink / raw)
  To: Amos Kong; +Cc: kvm, jasowang, mtosatti, alex.williamson, levinsasha928

On 03/01/2012 09:01 AM, Amos Kong wrote:
> kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
> coalesced_mmio.
>
> Currently Qemu only emulates one PCI bus, it contains 32 slots,
> one slot contains 8 functions, maximum of supported PCI devices:
>  1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
> one virtio-net(vhost=on) takes two iobus devices.
> The maximum of coalesced mmio zone is 100, each zone
> has an iobus devices. So 300 io_bus devices are not enough.
>
> This patch makes the kvm_io_range array can be resized dynamically.
> Set an upper bounds for kvm_io_range to limit userspace.
> 1000 is a very large limit and not bloat the typical user.
>

Please separate the change to 1000 devs to a new patch.

>  
> +#define NR_IOBUS_DEVS 1000
> +
>  struct kvm_io_bus {
>  	int                   dev_count;
> -#define NR_IOBUS_DEVS 300
> -	struct kvm_io_range range[NR_IOBUS_DEVS];
> +	struct kvm_io_range range[];
>  };
>  
>  enum kvm_bus {
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index e4431ad..1baed68 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
>  int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
>  			  gpa_t addr, int len)
>  {
> -	if (bus->dev_count == NR_IOBUS_DEVS)
> -		return -ENOSPC;
> -
>  	bus->range[bus->dev_count++] = (struct kvm_io_range) {
>  		.addr = addr,
>  		.len = len,
> @@ -2491,10 +2488,14 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
>  	struct kvm_io_bus *new_bus, *bus;
>  
>  	bus = kvm->buses[bus_idx];
> -	if (bus->dev_count > NR_IOBUS_DEVS-1)
> +	if (bus->dev_count > NR_IOBUS_DEVS - 1)
>  		return -ENOSPC;
>  
> -	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
> +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> +	if (new_bus)
> +		memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
> +		       sizeof(struct kvm_io_range)));

This will be cleaner if you move the memcmp() after the check just below.

>  	if (!new_bus)
>  		return -ENOMEM;
>  	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
> @@ -2513,26 +2514,28 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
>  	struct kvm_io_bus *new_bus, *bus;
>  
>  	bus = kvm->buses[bus_idx];
> -
> -	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
> -	if (!new_bus)
> -		return -ENOMEM;
> -
>  	r = -ENOENT;
> -	for (i = 0; i < new_bus->dev_count; i++)
> -		if (new_bus->range[i].dev == dev) {
> +	for (i = 0; i < bus->dev_count; i++)
> +		if (bus->range[i].dev == dev) {
>  			r = 0;
> -			new_bus->dev_count--;
> -			new_bus->range[i] = new_bus->range[new_bus->dev_count];
> -			sort(new_bus->range, new_bus->dev_count,
> -			     sizeof(struct kvm_io_range),
> -			     kvm_io_bus_sort_cmp, NULL);
>  			break;
>  		}
>  
> -	if (r) {
> -		kfree(new_bus);
> +	if (r)
>  		return r;
> +
> +	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> +	if (!new_bus)
> +		return -ENOMEM;
> +
> +	new_bus->dev_count--;
> +	/* copy last entry of bus->range to deleted entry spot if
> +	   deleted entry isn't the last entry of bus->range */
> +	if (i != bus->dev_count - 1) {

The check is unneeded - if they compare equal, the copy is a no-op.

> +		new_bus->range[i] = bus->range[bus->dev_count - 1];

> +		sort(new_bus->range, new_bus->dev_count,
> +		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
>  	}
>  
>  	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
>

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-03-07 10:57   ` Avi Kivity
@ 2012-03-07 12:51     ` Amos Kong
  2012-03-07 14:12       ` Avi Kivity
  0 siblings, 1 reply; 28+ messages in thread
From: Amos Kong @ 2012-03-07 12:51 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, jasowang, mtosatti, alex williamson, levinsasha928

----- Original Message -----
> On 03/01/2012 09:01 AM, Amos Kong wrote:
> > kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
> > coalesced_mmio.
> >
> > Currently Qemu only emulates one PCI bus, it contains 32 slots,
> > one slot contains 8 functions, maximum of supported PCI devices:
> >  1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
> > one virtio-net(vhost=on) takes two iobus devices.
> > The maximum of coalesced mmio zone is 100, each zone
> > has an iobus devices. So 300 io_bus devices are not enough.
> >
> > This patch makes the kvm_io_range array can be resized dynamically.
> > Set an upper bounds for kvm_io_range to limit userspace.
> > 1000 is a very large limit and not bloat the typical user.
> >
> 
> Please separate the change to 1000 devs to a new patch.

ok

> > +#define NR_IOBUS_DEVS 1000
> > +
> >  struct kvm_io_bus {
> >  	int                   dev_count;
> > -#define NR_IOBUS_DEVS 300
> > -	struct kvm_io_range range[NR_IOBUS_DEVS];
> > +	struct kvm_io_range range[];
> >  };
> >  
> >  enum kvm_bus {
> > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > index e4431ad..1baed68 100644
> > --- a/virt/kvm/kvm_main.c
> > +++ b/virt/kvm/kvm_main.c
> > @@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const
> > void *p2)
> >  int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct
> >  kvm_io_device *dev,
> >  			  gpa_t addr, int len)
> >  {
> > -	if (bus->dev_count == NR_IOBUS_DEVS)
> > -		return -ENOSPC;
> > -
> >  	bus->range[bus->dev_count++] = (struct kvm_io_range) {
> >  		.addr = addr,
> >  		.len = len,
> > @@ -2491,10 +2488,14 @@ int kvm_io_bus_register_dev(struct kvm
> > *kvm, enum kvm_bus bus_idx, gpa_t addr,
> >  	struct kvm_io_bus *new_bus, *bus;
> >  
> >  	bus = kvm->buses[bus_idx];
> > -	if (bus->dev_count > NR_IOBUS_DEVS-1)
> > +	if (bus->dev_count > NR_IOBUS_DEVS - 1)
> >  		return -ENOSPC;
> >  
> > -	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
> > +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
> > +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> > +	if (new_bus)
> > +		memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
> > +		       sizeof(struct kvm_io_range)));
> 
> This will be cleaner if you move the memcmp() after the check just
> below.

nod.
 
> >  	if (!new_bus)
> >  		return -ENOMEM;
> >  	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
> > @@ -2513,26 +2514,28 @@ int kvm_io_bus_unregister_dev(struct kvm
> > *kvm, enum kvm_bus bus_idx,
> >  	struct kvm_io_bus *new_bus, *bus;
> >  
> >  	bus = kvm->buses[bus_idx];
> > -
> > -	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
> > -	if (!new_bus)
> > -		return -ENOMEM;
> > -
> >  	r = -ENOENT;
> > -	for (i = 0; i < new_bus->dev_count; i++)
> > -		if (new_bus->range[i].dev == dev) {
> > +	for (i = 0; i < bus->dev_count; i++)
> > +		if (bus->range[i].dev == dev) {
> >  			r = 0;
> > -			new_bus->dev_count--;
> > -			new_bus->range[i] = new_bus->range[new_bus->dev_count];
> > -			sort(new_bus->range, new_bus->dev_count,
> > -			     sizeof(struct kvm_io_range),
> > -			     kvm_io_bus_sort_cmp, NULL);
> >  			break;
> >  		}
> >  
> > -	if (r) {
> > -		kfree(new_bus);
> > +	if (r)
> >  		return r;
> > +
> > +	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
> > +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> > +	if (!new_bus)
> > +		return -ENOMEM;
> > +
> > +	new_bus->dev_count--;
> > +	/* copy last entry of bus->range to deleted entry spot if
> > +	   deleted entry isn't the last entry of bus->range */
> > +	if (i != bus->dev_count - 1) {
> 
> The check is unneeded - if they compare equal, the copy is a no-op.


In kvm_io_bus_unregister_dev(), we need to delete one entry from original bus array.
so the allocated new bus array only has $N - 1 entries, ($N is the entry number of original bus array)

If i equals to bus->dev_count - 1, then the entry which is need to be deleted is the last entry of original bus array.
and the entry isn't copied to new bus array, so we don't need to do anything, sort isn't necessary.

Amos.
 
> > +		new_bus->range[i] = bus->range[bus->dev_count - 1];
> 
> > +		sort(new_bus->range, new_bus->dev_count,
> > +		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
> >  	}
> >  
> >  	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
> >
> 
> --
> error compiling committee.c: too many arguments to function
> 
> 

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v5 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
  2012-03-01 10:14   ` Sasha Levin
  2012-03-07 10:57   ` Avi Kivity
@ 2012-03-07 13:16   ` Amos Kong
  2012-03-07 13:16   ` [PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-07 13:16 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

This patch makes the kvm_io_range array can be resized dynamically.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   40 +++++++++++++++++++++-------------------
 2 files changed, 24 insertions(+), 21 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..e20dc8d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,10 +67,11 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 300
+
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..a0261d9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,12 +2488,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
+	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
+	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+	       sizeof(struct kvm_io_range)));
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);
@@ -2513,26 +2513,28 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
+
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	new_bus->dev_count--;
+	/* copy last entry of bus->range to deleted entry spot if
+	   deleted entry isn't the last entry of bus->range */
+	if (i != bus->dev_count - 1) {
+		new_bus->range[i] = bus->range[bus->dev_count - 1];
+		sort(new_bus->range, new_bus->dev_count,
+		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
 	}
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
                     ` (2 preceding siblings ...)
  2012-03-07 13:16   ` [PATCH v5 1/2] KVM: resize " Amos Kong
@ 2012-03-07 13:16   ` Amos Kong
  2012-03-07 13:20   ` [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev Amos Kong
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-07 13:16 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
one virtio-net(vhost=on) takes two iobus devices.
The maximum of coalesced mmio zone is 100, each zone
has an iobus devices. So 300 io_bus devices are not enough.

Set an upper bounds for kvm_io_range to limit userspace.
1000 is a very large limit and not bloat the typical user.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e20dc8d..24ee2db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,7 +67,7 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
-#define NR_IOBUS_DEVS 300
+#define NR_IOBUS_DEVS 1000
 
 struct kvm_io_bus {
 	int                   dev_count;


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
                     ` (3 preceding siblings ...)
  2012-03-07 13:16   ` [PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
@ 2012-03-07 13:20   ` Amos Kong
  2012-03-07 13:20     ` [RESEND PATCH v5 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
  2012-03-07 13:20     ` [RESEND PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
  2012-03-08  2:03   ` [PATCH v6 0/2] fix ENOSPC issue of iobus dev Amos Kong
  2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
  6 siblings, 2 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-07 13:20 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

Boot up qemu-kvm guest with 232 multiple functions disks, qemu
will report a ENOSPC error, it's not good to statically increase
iobus array. This patchset makes kvm_io_range array can be
resized dynamically, and change dev limit to 1000.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Changes from v2:
- unregister device only when it exists

Changes from v3:
- set upper bounds to limit userspace

Changes from v4:
- check if allocate successfully before memcpy()
- separate the change to 1000 devs to a new patch

---

Amos Kong (2):
      KVM: resize kvm_io_range array dynamically
      KVM: set upper bounds for iobus dev to limit userspace


 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   40 +++++++++++++++++++++-------------------
 2 files changed, 24 insertions(+), 21 deletions(-)

-- 
Amos Kong

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [RESEND PATCH v5 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-07 13:20   ` [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev Amos Kong
@ 2012-03-07 13:20     ` Amos Kong
  2012-03-07 13:20     ` [RESEND PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
  1 sibling, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-07 13:20 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

This patch makes the kvm_io_range array can be resized dynamically.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   40 +++++++++++++++++++++-------------------
 2 files changed, 24 insertions(+), 21 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 355e445..e20dc8d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,10 +67,11 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 300
+
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e4431ad..a0261d9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2389,9 +2389,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2491,12 +2488,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
+	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
+	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+	       sizeof(struct kvm_io_range)));
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);
@@ -2513,26 +2513,28 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
+
+	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	new_bus->dev_count--;
+	/* copy last entry of bus->range to deleted entry spot if
+	   deleted entry isn't the last entry of bus->range */
+	if (i != bus->dev_count - 1) {
+		new_bus->range[i] = bus->range[bus->dev_count - 1];
+		sort(new_bus->range, new_bus->dev_count,
+		     sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp, NULL);
 	}
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [RESEND PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace
  2012-03-07 13:20   ` [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev Amos Kong
  2012-03-07 13:20     ` [RESEND PATCH v5 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
@ 2012-03-07 13:20     ` Amos Kong
  1 sibling, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-07 13:20 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
one virtio-net(vhost=on) takes two iobus devices.
The maximum of coalesced mmio zone is 100, each zone
has an iobus devices. So 300 io_bus devices are not enough.

Set an upper bounds for kvm_io_range to limit userspace.
1000 is a very large limit and not bloat the typical user.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e20dc8d..24ee2db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,7 +67,7 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
-#define NR_IOBUS_DEVS 300
+#define NR_IOBUS_DEVS 1000
 
 struct kvm_io_bus {
 	int                   dev_count;


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v4] KVM: Resize kvm_io_range array dynamically
  2012-03-07 12:51     ` Amos Kong
@ 2012-03-07 14:12       ` Avi Kivity
  0 siblings, 0 replies; 28+ messages in thread
From: Avi Kivity @ 2012-03-07 14:12 UTC (permalink / raw)
  To: Amos Kong; +Cc: kvm, jasowang, mtosatti, alex williamson, levinsasha928

On 03/07/2012 02:51 PM, Amos Kong wrote:
> > > +
> > > +	new_bus = kmemdup(bus, sizeof(*bus) + ((bus->dev_count - 1) *
> > > +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> > > +	if (!new_bus)
> > > +		return -ENOMEM;
> > > +
> > > +	new_bus->dev_count--;
> > > +	/* copy last entry of bus->range to deleted entry spot if
> > > +	   deleted entry isn't the last entry of bus->range */
> > > +	if (i != bus->dev_count - 1) {
> > 
> > The check is unneeded - if they compare equal, the copy is a no-op.
>
>
> In kvm_io_bus_unregister_dev(), we need to delete one entry from original bus array.
> so the allocated new bus array only has $N - 1 entries, ($N is the entry number of original bus array)
>
> If i equals to bus->dev_count - 1, then the entry which is need to be deleted is the last entry of original bus array.
> and the entry isn't copied to new bus array, so we don't need to do anything, sort isn't necessary.

It's actually wrong to avoid the copy like I suggested, since this isn't
an in-place delete, and we don't have space for the last entry.

btw you don't need to sort at all.  Instead do

  memcpy(new_bus->range, bus->range, i * sizeof)
  memcpy(new_bus->range + i, bus->range + i + 1, new_bus->dev_count - i
* sizeof)

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v6 0/2] fix ENOSPC issue of iobus dev
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
                     ` (4 preceding siblings ...)
  2012-03-07 13:20   ` [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev Amos Kong
@ 2012-03-08  2:03   ` Amos Kong
  2012-03-08  2:03     ` [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
  2012-03-08  2:04     ` [PATCH v6 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
  2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
  6 siblings, 2 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-08  2:03 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

Boot up qemu-kvm guest with 232 multiple functions disks, qemu
will report a ENOSPC error, it's not good to statically increase
iobus array. This patchset makes kvm_io_range array can be
resized dynamically, and change dev limit to 1000.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Changes from v2:
- unregister device only when it exists

Changes from v3:
- set upper bounds to limit userspace

Changes from v4:
- check if allocate successfully before memcpy()
- separate the change to 1000 devs to a new patch

Changes from v5:
- memcpy() two times, drop sort

---

Amos Kong (2):
      KVM: resize kvm_io_range array dynamically
      KVM: set upper bounds for iobus dev to limit userspace


 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
 2 files changed, 21 insertions(+), 22 deletions(-)

-- 
Amos Kong

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-08  2:03   ` [PATCH v6 0/2] fix ENOSPC issue of iobus dev Amos Kong
@ 2012-03-08  2:03     ` Amos Kong
  2012-03-08 23:20       ` Marcelo Tosatti
  2012-03-08  2:04     ` [PATCH v6 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
  1 sibling, 1 reply; 28+ messages in thread
From: Amos Kong @ 2012-03-08  2:03 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

This patch makes the kvm_io_range array can be resized dynamically.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
 2 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e42d85a..8a6c1a3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,10 +67,11 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 300
+
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 94e148e..f6ee1e2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2393,9 +2393,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2495,12 +2492,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
+	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
+	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+	       sizeof(struct kvm_io_range)));
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);
@@ -2517,27 +2517,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
-	}
+
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	new_bus->dev_count--;
+	memcpy(new_bus->range, bus->range, i * sizeof(struct kvm_io_range));
+	memcpy(new_bus->range + i, bus->range + i + 1,
+	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v6 2/2] KVM: set upper bounds for iobus dev to limit userspace
  2012-03-08  2:03   ` [PATCH v6 0/2] fix ENOSPC issue of iobus dev Amos Kong
  2012-03-08  2:03     ` [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
@ 2012-03-08  2:04     ` Amos Kong
  1 sibling, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-08  2:04 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
one virtio-net(vhost=on) takes two iobus devices.
The maximum of coalesced mmio zone is 100, each zone
has an iobus devices. So 300 io_bus devices are not enough.

Set an upper bounds for kvm_io_range to limit userspace.
1000 is a very large limit and not bloat the typical user.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8a6c1a3..15e9404 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,7 +67,7 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
-#define NR_IOBUS_DEVS 300
+#define NR_IOBUS_DEVS 1000
 
 struct kvm_io_bus {
 	int                   dev_count;


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-08  2:03     ` [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
@ 2012-03-08 23:20       ` Marcelo Tosatti
  2012-03-09  4:05         ` Amos Kong
  0 siblings, 1 reply; 28+ messages in thread
From: Marcelo Tosatti @ 2012-03-08 23:20 UTC (permalink / raw)
  To: Amos Kong; +Cc: jasowang, alex.williamson, kvm, levinsasha928

On Thu, Mar 08, 2012 at 10:03:55AM +0800, Amos Kong wrote:
> This patch makes the kvm_io_range array can be resized dynamically.
> 
> Signed-off-by: Amos Kong <akong@redhat.com>
> ---
>  include/linux/kvm_host.h |    5 +++--
>  virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
>  2 files changed, 21 insertions(+), 22 deletions(-)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index e42d85a..8a6c1a3 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -67,10 +67,11 @@ struct kvm_io_range {
>  	struct kvm_io_device *dev;
>  };
>  
> +#define NR_IOBUS_DEVS 300
> +
>  struct kvm_io_bus {
>  	int                   dev_count;
> -#define NR_IOBUS_DEVS 300
> -	struct kvm_io_range range[NR_IOBUS_DEVS];
> +	struct kvm_io_range range[];
>  };
>  
>  enum kvm_bus {
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 94e148e..f6ee1e2 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2393,9 +2393,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
>  int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
>  			  gpa_t addr, int len)
>  {
> -	if (bus->dev_count == NR_IOBUS_DEVS)
> -		return -ENOSPC;
> -
>  	bus->range[bus->dev_count++] = (struct kvm_io_range) {
>  		.addr = addr,
>  		.len = len,
> @@ -2495,12 +2492,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
>  	struct kvm_io_bus *new_bus, *bus;
>  
>  	bus = kvm->buses[bus_idx];
> -	if (bus->dev_count > NR_IOBUS_DEVS-1)
> +	if (bus->dev_count > NR_IOBUS_DEVS - 1)
>  		return -ENOSPC;
>  
> -	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
> +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
>  	if (!new_bus)
>  		return -ENOMEM;
> +	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
> +	       sizeof(struct kvm_io_range)));
>  	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
>  	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
>  	synchronize_srcu_expedited(&kvm->srcu);
> @@ -2517,27 +2517,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
>  	struct kvm_io_bus *new_bus, *bus;
>  
>  	bus = kvm->buses[bus_idx];
> -
> -	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
> -	if (!new_bus)
> -		return -ENOMEM;
> -
>  	r = -ENOENT;
> -	for (i = 0; i < new_bus->dev_count; i++)
> -		if (new_bus->range[i].dev == dev) {
> +	for (i = 0; i < bus->dev_count; i++)
> +		if (bus->range[i].dev == dev) {
>  			r = 0;
> -			new_bus->dev_count--;
> -			new_bus->range[i] = new_bus->range[new_bus->dev_count];
> -			sort(new_bus->range, new_bus->dev_count,
> -			     sizeof(struct kvm_io_range),
> -			     kvm_io_bus_sort_cmp, NULL);
>  			break;
>  		}
>  
> -	if (r) {
> -		kfree(new_bus);
> +	if (r)
>  		return r;
> -	}
> +
> +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
> +	if (!new_bus)
> +		return -ENOMEM;
> +
> +	new_bus->dev_count--;

Was just zeroed above?

> +	memcpy(new_bus->range, bus->range, i * sizeof(struct kvm_io_range));
> +	memcpy(new_bus->range + i, bus->range + i + 1,
> +	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));

Did you mean "i" as an indice for ->range[] ?

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-08 23:20       ` Marcelo Tosatti
@ 2012-03-09  4:05         ` Amos Kong
  0 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-09  4:05 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: jasowang, alex.williamson, kvm, levinsasha928

On 09/03/12 07:20, Marcelo Tosatti wrote:
> On Thu, Mar 08, 2012 at 10:03:55AM +0800, Amos Kong wrote:
>> This patch makes the kvm_io_range array can be resized dynamically.
>>
>> Signed-off-by: Amos Kong<akong@redhat.com>
>> ---
>>   include/linux/kvm_host.h |    5 +++--
>>   virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
>>   2 files changed, 21 insertions(+), 22 deletions(-)
>>
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index e42d85a..8a6c1a3 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -67,10 +67,11 @@ struct kvm_io_range {
>>   	struct kvm_io_device *dev;
>>   };
>>
>> +#define NR_IOBUS_DEVS 300
>> +
>>   struct kvm_io_bus {
>>   	int                   dev_count;
>> -#define NR_IOBUS_DEVS 300
>> -	struct kvm_io_range range[NR_IOBUS_DEVS];
>> +	struct kvm_io_range range[];
>>   };
>>
>>   enum kvm_bus {
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 94e148e..f6ee1e2 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -2393,9 +2393,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
>>   int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
>>   			  gpa_t addr, int len)
>>   {
>> -	if (bus->dev_count == NR_IOBUS_DEVS)
>> -		return -ENOSPC;
>> -
>>   	bus->range[bus->dev_count++] = (struct kvm_io_range) {
>>   		.addr = addr,
>>   		.len = len,
>> @@ -2495,12 +2492,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
>>   	struct kvm_io_bus *new_bus, *bus;
>>
>>   	bus = kvm->buses[bus_idx];
>> -	if (bus->dev_count>  NR_IOBUS_DEVS-1)
>> +	if (bus->dev_count>  NR_IOBUS_DEVS - 1)
>>   		return -ENOSPC;
>>
>> -	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
>> +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
>> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
>>   	if (!new_bus)
>>   		return -ENOMEM;
>> +	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
>> +	       sizeof(struct kvm_io_range)));
>>   	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
>>   	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
>>   	synchronize_srcu_expedited(&kvm->srcu);
>> @@ -2517,27 +2517,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
>>   	struct kvm_io_bus *new_bus, *bus;
>>
>>   	bus = kvm->buses[bus_idx];
>> -
>> -	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
>> -	if (!new_bus)
>> -		return -ENOMEM;
>> -
>>   	r = -ENOENT;
>> -	for (i = 0; i<  new_bus->dev_count; i++)
>> -		if (new_bus->range[i].dev == dev) {
>> +	for (i = 0; i<  bus->dev_count; i++)
>> +		if (bus->range[i].dev == dev) {

Hi Marcelo,

the deleted dev is found here.

>>   			r = 0;
>> -			new_bus->dev_count--;
>> -			new_bus->range[i] = new_bus->range[new_bus->dev_count];
>> -			sort(new_bus->range, new_bus->dev_count,
>> -			     sizeof(struct kvm_io_range),
>> -			     kvm_io_bus_sort_cmp, NULL);
>>   			break;
>>   		}
>>
>> -	if (r) {
>> -		kfree(new_bus);
>> +	if (r)
>>   		return r;
>> -	}
>> +
>> +	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
>> +			  sizeof(struct kvm_io_range)), GFP_KERNEL);
>> +	if (!new_bus)
>> +		return -ENOMEM;
>> +


>> +	new_bus->dev_count--;
>
> Was just zeroed above?
>
>> +	memcpy(new_bus->range, bus->range, i * sizeof(struct kvm_io_range));

Oh, dev_count memory needs to be copied.

         memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct 
kvm_io_range));
         new_bus->dev_count--;


>> +	memcpy(new_bus->range + i, bus->range + i + 1,
>> +	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
>
> Did you mean "i" as an indice for ->range[] ?

'i' is the index of deleted dev in bus->range[]


-- 
			Amos.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v7 0/2] fix ENOSPC issue of iobus dev
  2012-03-01  7:01 ` [PATCH v4] " Amos Kong
                     ` (5 preceding siblings ...)
  2012-03-08  2:03   ` [PATCH v6 0/2] fix ENOSPC issue of iobus dev Amos Kong
@ 2012-03-09  4:17   ` Amos Kong
  2012-03-09  4:17     ` [PATCH v7 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
                       ` (2 more replies)
  6 siblings, 3 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-09  4:17 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

Boot up qemu-kvm guest with 232 multiple functions disks, qemu
will report a ENOSPC error, it's not good to statically increase
iobus array. This patchset makes kvm_io_range array can be
resized dynamically, and change dev limit to 1000.

Changes from v1:
- fix typo: kvm_io_bus_range -> kvm_io_range

Changes from v2:
- unregister device only when it exists

Changes from v3:
- set upper bounds to limit userspace

Changes from v4:
- check if allocate successfully before memcpy()
- separate the change to 1000 devs to a new patch

Changes from v5:
- memcpy() two times, drop sort

Changes from V6:
- copy bus->dev_count memory to new_bus

---

Amos Kong (2):
      KVM: resize kvm_io_range array dynamically
      KVM: set upper bounds for iobus dev to limit userspace


 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
 2 files changed, 21 insertions(+), 22 deletions(-)

-- 
Amos Kong

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v7 1/2] KVM: resize kvm_io_range array dynamically
  2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
@ 2012-03-09  4:17     ` Amos Kong
  2012-03-09  4:17     ` [PATCH v7 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
  2012-03-09 21:07     ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Marcelo Tosatti
  2 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-09  4:17 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

This patch makes the kvm_io_range array can be resized dynamically.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    5 +++--
 virt/kvm/kvm_main.c      |   38 ++++++++++++++++++--------------------
 2 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ec171c1..9aff477 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,10 +67,11 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
+#define NR_IOBUS_DEVS 300
+
 struct kvm_io_bus {
 	int                   dev_count;
-#define NR_IOBUS_DEVS 300
-	struct kvm_io_range range[NR_IOBUS_DEVS];
+	struct kvm_io_range range[];
 };
 
 enum kvm_bus {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 42b7393..a9565e2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2393,9 +2393,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 			  gpa_t addr, int len)
 {
-	if (bus->dev_count == NR_IOBUS_DEVS)
-		return -ENOSPC;
-
 	bus->range[bus->dev_count++] = (struct kvm_io_range) {
 		.addr = addr,
 		.len = len,
@@ -2495,12 +2492,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	if (bus->dev_count > NR_IOBUS_DEVS-1)
+	if (bus->dev_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
 
-	new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
 	if (!new_bus)
 		return -ENOMEM;
+	memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+	       sizeof(struct kvm_io_range)));
 	kvm_io_bus_insert_dev(new_bus, dev, addr, len);
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);
@@ -2517,27 +2517,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-
-	new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
-
 	r = -ENOENT;
-	for (i = 0; i < new_bus->dev_count; i++)
-		if (new_bus->range[i].dev == dev) {
+	for (i = 0; i < bus->dev_count; i++)
+		if (bus->range[i].dev == dev) {
 			r = 0;
-			new_bus->dev_count--;
-			new_bus->range[i] = new_bus->range[new_bus->dev_count];
-			sort(new_bus->range, new_bus->dev_count,
-			     sizeof(struct kvm_io_range),
-			     kvm_io_bus_sort_cmp, NULL);
 			break;
 		}
 
-	if (r) {
-		kfree(new_bus);
+	if (r)
 		return r;
-	}
+
+	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+	if (!new_bus)
+		return -ENOMEM;
+
+	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+	new_bus->dev_count--;
+	memcpy(new_bus->range + i, bus->range + i + 1,
+	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v7 2/2] KVM: set upper bounds for iobus dev to limit userspace
  2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
  2012-03-09  4:17     ` [PATCH v7 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
@ 2012-03-09  4:17     ` Amos Kong
  2012-03-09 21:07     ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Marcelo Tosatti
  2 siblings, 0 replies; 28+ messages in thread
From: Amos Kong @ 2012-03-09  4:17 UTC (permalink / raw)
  To: jasowang, mtosatti, alex.williamson, kvm, levinsasha928

kvm_io_bus devices are used for ioevent, pit, pic, ioapic,
coalesced_mmio.

Currently Qemu only emulates one PCI bus, it contains 32 slots,
one slot contains 8 functions, maximum of supported PCI devices:
 1 * 32 * 8 = 256. One virtio-blk takes one iobus device,
one virtio-net(vhost=on) takes two iobus devices.
The maximum of coalesced mmio zone is 100, each zone
has an iobus devices. So 300 io_bus devices are not enough.

Set an upper bounds for kvm_io_range to limit userspace.
1000 is a very large limit and not bloat the typical user.

Signed-off-by: Amos Kong <akong@redhat.com>
---
 include/linux/kvm_host.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9aff477..b0195d2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -67,7 +67,7 @@ struct kvm_io_range {
 	struct kvm_io_device *dev;
 };
 
-#define NR_IOBUS_DEVS 300
+#define NR_IOBUS_DEVS 1000
 
 struct kvm_io_bus {
 	int                   dev_count;


^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v7 0/2] fix ENOSPC issue of iobus dev
  2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
  2012-03-09  4:17     ` [PATCH v7 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
  2012-03-09  4:17     ` [PATCH v7 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
@ 2012-03-09 21:07     ` Marcelo Tosatti
  2 siblings, 0 replies; 28+ messages in thread
From: Marcelo Tosatti @ 2012-03-09 21:07 UTC (permalink / raw)
  To: Amos Kong; +Cc: jasowang, alex.williamson, kvm, levinsasha928

On Fri, Mar 09, 2012 at 12:17:22PM +0800, Amos Kong wrote:
> Boot up qemu-kvm guest with 232 multiple functions disks, qemu
> will report a ENOSPC error, it's not good to statically increase
> iobus array. This patchset makes kvm_io_range array can be
> resized dynamically, and change dev limit to 1000.
> 
> Changes from v1:
> - fix typo: kvm_io_bus_range -> kvm_io_range
> 
> Changes from v2:
> - unregister device only when it exists
> 
> Changes from v3:
> - set upper bounds to limit userspace
> 
> Changes from v4:
> - check if allocate successfully before memcpy()
> - separate the change to 1000 devs to a new patch
> 
> Changes from v5:
> - memcpy() two times, drop sort
> 
> Changes from V6:
> - copy bus->dev_count memory to new_bus
> 
> ---
> 
> Amos Kong (2):
>       KVM: resize kvm_io_range array dynamically
>       KVM: set upper bounds for iobus dev to limit userspace

Applied, thanks.


^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2012-03-09 21:09 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-02-29  5:24 [PATCH] KVM: Resize kvm_io_bus_range array dynamically Amos Kong
2012-02-29  5:50 ` [PATCH v2] KVM: Resize kvm_io_range " Amos Kong
2012-02-29 13:30 ` [PATCH v3] " Amos Kong
2012-02-29 14:19   ` Jan Kiszka
2012-02-29 15:22     ` Amos Kong
2012-02-29 15:29       ` Jan Kiszka
2012-02-29 16:34         ` Amos Kong
2012-03-01  5:19           ` Amos Kong
2012-03-01  7:01 ` [PATCH v4] " Amos Kong
2012-03-01 10:14   ` Sasha Levin
2012-03-01 15:33     ` Alex Williamson
2012-03-07 10:57   ` Avi Kivity
2012-03-07 12:51     ` Amos Kong
2012-03-07 14:12       ` Avi Kivity
2012-03-07 13:16   ` [PATCH v5 1/2] KVM: resize " Amos Kong
2012-03-07 13:16   ` [PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
2012-03-07 13:20   ` [RESEND PATCH v5 0/2] fix ENOSPC issue of iobus dev Amos Kong
2012-03-07 13:20     ` [RESEND PATCH v5 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
2012-03-07 13:20     ` [RESEND PATCH v5 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
2012-03-08  2:03   ` [PATCH v6 0/2] fix ENOSPC issue of iobus dev Amos Kong
2012-03-08  2:03     ` [PATCH v6 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
2012-03-08 23:20       ` Marcelo Tosatti
2012-03-09  4:05         ` Amos Kong
2012-03-08  2:04     ` [PATCH v6 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
2012-03-09  4:17   ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Amos Kong
2012-03-09  4:17     ` [PATCH v7 1/2] KVM: resize kvm_io_range array dynamically Amos Kong
2012-03-09  4:17     ` [PATCH v7 2/2] KVM: set upper bounds for iobus dev to limit userspace Amos Kong
2012-03-09 21:07     ` [PATCH v7 0/2] fix ENOSPC issue of iobus dev Marcelo Tosatti

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.