All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] decouple vcpu index from apic id
@ 2009-06-08  9:32 Gleb Natapov
  2009-06-08  9:32 ` [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
                   ` (4 more replies)
  0 siblings, 5 replies; 16+ messages in thread
From: Gleb Natapov @ 2009-06-08  9:32 UTC (permalink / raw)
  To: avi; +Cc: kvm

Currently vcpu_id is used as an index into vcpus array and as apic id
on x86.  This is incorrect since apic ids not have to be continuous
(they can also encode cpu hierarchy information) and may have values
bigger then vcpu array in case of x2apic. This series removes use of
vcpu_id as vcpus array index. Each architecture may use it how it sees
fit. x86 uses it as apic id.

Gleb Natapov (4):
  Introduce kvm_vcpu_is_bsp() function.
  Use pointer to vcpu instead of vcpu_id in timer code.
  Break dependency between vcpu index in vcpus array and vcpu_id.
  Use macro to iterate over vcpus.

 arch/ia64/kvm/kvm-ia64.c   |   35 +++++++++------------
 arch/ia64/kvm/vcpu.c       |    2 +-
 arch/powerpc/kvm/powerpc.c |   16 ++++++----
 arch/s390/kvm/kvm-s390.c   |   33 ++++++++++----------
 arch/x86/kvm/i8254.c       |   13 +++-----
 arch/x86/kvm/i8259.c       |    6 ++--
 arch/x86/kvm/kvm_timer.h   |    2 +-
 arch/x86/kvm/lapic.c       |    9 +++--
 arch/x86/kvm/mmu.c         |    6 ++--
 arch/x86/kvm/svm.c         |    4 +-
 arch/x86/kvm/timer.c       |    2 +-
 arch/x86/kvm/vmx.c         |    6 ++--
 arch/x86/kvm/x86.c         |   29 ++++++++---------
 include/linux/kvm.h        |    2 +
 include/linux/kvm_host.h   |   18 +++++++++++
 virt/kvm/ioapic.c          |    4 ++-
 virt/kvm/irq_comm.c        |    6 +--
 virt/kvm/kvm_main.c        |   71 +++++++++++++++++++++++--------------------
 18 files changed, 142 insertions(+), 122 deletions(-)


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
@ 2009-06-08  9:32 ` Gleb Natapov
  2009-06-08 11:20   ` Avi Kivity
  2009-06-08  9:32 ` [PATCH 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 16+ messages in thread
From: Gleb Natapov @ 2009-06-08  9:32 UTC (permalink / raw)
  To: avi; +Cc: kvm

Use it instead of open code "vcpu_id zero is BSP" assumption.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 arch/ia64/kvm/kvm-ia64.c |    2 +-
 arch/ia64/kvm/vcpu.c     |    2 +-
 arch/x86/kvm/i8254.c     |    4 ++--
 arch/x86/kvm/i8259.c     |    6 +++---
 arch/x86/kvm/lapic.c     |    7 ++++---
 arch/x86/kvm/svm.c       |    4 ++--
 arch/x86/kvm/vmx.c       |    6 +++---
 arch/x86/kvm/x86.c       |    4 ++--
 include/linux/kvm_host.h |    5 +++++
 virt/kvm/ioapic.c        |    4 +++-
 virt/kvm/kvm_main.c      |    2 ++
 11 files changed, 28 insertions(+), 18 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3199221..3924591 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1216,7 +1216,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	if (IS_ERR(vmm_vcpu))
 		return PTR_ERR(vmm_vcpu);
 
-	if (vcpu->vcpu_id == 0) {
+	if (kvm_vcpu_is_bsp(vcpu)) {
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
 		/*Set entry address for first run.*/
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15..7e7391d 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -830,7 +830,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
 
 	kvm = (struct kvm *)KVM_VM_BASE;
 
-	if (vcpu->vcpu_id == 0) {
+	if (kvm_vcpu_is_bsp(vcpu)) {
 		for (i = 0; i < kvm->arch.online_vcpus; i++) {
 			v = (struct kvm_vcpu *)((char *)vcpu +
 					sizeof(struct kvm_vcpu_data) * i);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index f91b0e3..b1d97e7 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -228,7 +228,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pit *pit = vcpu->kvm->arch.vpit;
 
-	if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack)
+	if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
 		return atomic_read(&pit->pit_state.pit_timer.pending);
 	return 0;
 }
@@ -249,7 +249,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 	struct kvm_pit *pit = vcpu->kvm->arch.vpit;
 	struct hrtimer *timer;
 
-	if (vcpu->vcpu_id != 0 || !pit)
+	if (!kvm_vcpu_is_bsp(vcpu) || !pit)
 		return;
 
 	timer = &pit->pit_state.pit_timer.timer;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 2520922..95bdb83 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -57,7 +57,7 @@ static void pic_unlock(struct kvm_pic *s)
 	}
 
 	if (wakeup) {
-		vcpu = s->kvm->vcpus[0];
+		vcpu = s->kvm->bsp_vcpu;
 		if (vcpu)
 			kvm_vcpu_kick(vcpu);
 	}
@@ -252,7 +252,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
 {
 	int irq, irqbase, n;
 	struct kvm *kvm = s->pics_state->irq_request_opaque;
-	struct kvm_vcpu *vcpu0 = kvm->vcpus[0];
+	struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
 
 	if (s == &s->pics_state->pics[0])
 		irqbase = 0;
@@ -510,7 +510,7 @@ static void picdev_read(struct kvm_io_device *this,
 static void pic_irq_request(void *opaque, int level)
 {
 	struct kvm *kvm = opaque;
-	struct kvm_vcpu *vcpu = kvm->vcpus[0];
+	struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
 	struct kvm_pic *s = pic_irqchip(kvm);
 	int irq = pic_get_irq(&s->pics[0]);
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4bfd458..1820911 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -792,7 +792,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 		vcpu->arch.apic_base = value;
 		return;
 	}
-	if (apic->vcpu->vcpu_id)
+
+	if (!kvm_vcpu_is_bsp(apic->vcpu))
 		value &= ~MSR_IA32_APICBASE_BSP;
 
 	vcpu->arch.apic_base = value;
@@ -849,7 +850,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
 	}
 	update_divide_count(apic);
 	atomic_set(&apic->lapic_timer.pending, 0);
-	if (vcpu->vcpu_id == 0)
+	if (kvm_vcpu_is_bsp(vcpu))
 		vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
 	apic_update_ppr(apic);
 
@@ -993,7 +994,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 	u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
 	int r = 0;
 
-	if (vcpu->vcpu_id == 0) {
+	if (kvm_vcpu_is_bsp(vcpu)) {
 		if (!apic_hw_enabled(vcpu->arch.apic))
 			r = 1;
 		if ((lvt0 & APIC_LVT_MASKED) == 0 &&
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 37397f6..13f6f7d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -645,7 +645,7 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
 
 	init_vmcb(svm);
 
-	if (vcpu->vcpu_id != 0) {
+	if (!kvm_vcpu_is_bsp(vcpu)) {
 		kvm_rip_write(vcpu, 0);
 		svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
 		svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
@@ -709,7 +709,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	fx_init(&svm->vcpu);
 	svm->vcpu.fpu_active = 1;
 	svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
-	if (svm->vcpu.vcpu_id == 0)
+	if (kvm_vcpu_is_bsp(&svm->vcpu))
 		svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
 
 	return &svm->vcpu;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7eb98e5..d95c1a5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2344,7 +2344,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
 	kvm_set_cr8(&vmx->vcpu, 0);
 	msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
-	if (vmx->vcpu.vcpu_id == 0)
+	if (kvm_vcpu_is_bsp(&vmx->vcpu))
 		msr |= MSR_IA32_APICBASE_BSP;
 	kvm_set_apic_base(&vmx->vcpu, msr);
 
@@ -2355,7 +2355,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 	 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
 	 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
 	 */
-	if (vmx->vcpu.vcpu_id == 0) {
+	if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
 		vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
 		vmcs_writel(GUEST_CS_BASE, 0x000f0000);
 	} else {
@@ -2384,7 +2384,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 	vmcs_writel(GUEST_SYSENTER_EIP, 0);
 
 	vmcs_writel(GUEST_RFLAGS, 0x02);
-	if (vmx->vcpu.vcpu_id == 0)
+	if (kvm_vcpu_is_bsp(&vmx->vcpu))
 		kvm_rip_write(vcpu, 0xfff0);
 	else
 		kvm_rip_write(vcpu, 0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c1ed485..2b8316a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4294,7 +4294,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
 	/* Older userspace won't unhalt the vcpu on reset. */
-	if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
+	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
 	    !(vcpu->arch.cr0 & X86_CR0_PE))
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -4565,7 +4565,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	kvm = vcpu->kvm;
 
 	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
-	if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 	else
 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bcb94eb..4d8e222 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,6 +130,7 @@ struct kvm {
 	int nmemslots;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
+	struct kvm_vcpu *bsp_vcpu;
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 	struct list_head vm_list;
 	struct kvm_io_bus mmio_bus;
@@ -547,4 +548,8 @@ static inline void kvm_irqfd_release(struct kvm *kvm) {}
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->bsp_vcpu == vcpu;
+}
 #endif
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 6b00433..017d2d6 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -165,7 +165,9 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
 	/* Always delivery PIT interrupt to vcpu 0 */
 	if (irq == 0) {
 		irqe.dest_mode = 0; /* Physical mode. */
-		irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
+		/* need to read apic_id from apic regiest since
+		 * it can be rewritten */
+		irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
 	}
 #endif
 	return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a9f62bb..9a415c5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1732,6 +1732,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		goto vcpu_destroy;
 	}
 	kvm->vcpus[n] = vcpu;
+	if (n == 0)
+		kvm->bsp_vcpu = vcpu;
 	mutex_unlock(&kvm->lock);
 
 	/* Now it's all set up, let userspace reach it */
-- 
1.6.2.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/4] Use pointer to vcpu instead of vcpu_id in timer code.
  2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
  2009-06-08  9:32 ` [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
@ 2009-06-08  9:32 ` Gleb Natapov
  2009-06-08  9:32 ` [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 16+ messages in thread
From: Gleb Natapov @ 2009-06-08  9:32 UTC (permalink / raw)
  To: avi; +Cc: kvm


Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 arch/x86/kvm/i8254.c     |    2 +-
 arch/x86/kvm/kvm_timer.h |    2 +-
 arch/x86/kvm/lapic.c     |    2 +-
 arch/x86/kvm/timer.c     |    2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b1d97e7..0e32420 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -291,7 +291,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
 	pt->timer.function = kvm_timer_fn;
 	pt->t_ops = &kpit_ops;
 	pt->kvm = ps->pit->kvm;
-	pt->vcpu_id = 0;
+	pt->vcpu = pt->kvm->bsp_vcpu;
 
 	atomic_set(&pt->pending, 0);
 	ps->irq_ack = 1;
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 26bd6ba..55c7524 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -6,7 +6,7 @@ struct kvm_timer {
 	bool reinject;
 	struct kvm_timer_ops *t_ops;
 	struct kvm *kvm;
-	int vcpu_id;
+	struct kvm_vcpu *vcpu;
 };
 
 struct kvm_timer_ops {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1820911..2c94498 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -957,7 +957,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	apic->lapic_timer.timer.function = kvm_timer_fn;
 	apic->lapic_timer.t_ops = &lapic_timer_ops;
 	apic->lapic_timer.kvm = vcpu->kvm;
-	apic->lapic_timer.vcpu_id = vcpu->vcpu_id;
+	apic->lapic_timer.vcpu = vcpu;
 
 	apic->base_address = APIC_DEFAULT_PHYS_BASE;
 	vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 86dbac0..85cc743 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -33,7 +33,7 @@ enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
 	struct kvm_vcpu *vcpu;
 	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
 
-	vcpu = ktimer->kvm->vcpus[ktimer->vcpu_id];
+	vcpu = ktimer->vcpu;
 	if (!vcpu)
 		return HRTIMER_NORESTART;
 
-- 
1.6.2.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id.
  2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
  2009-06-08  9:32 ` [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
  2009-06-08  9:32 ` [PATCH 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
@ 2009-06-08  9:32 ` Gleb Natapov
  2009-06-08 11:23   ` Avi Kivity
  2009-06-08  9:32 ` [PATCH 4/4] Use macro to iterate over vcpus Gleb Natapov
  2009-06-08 11:25 ` [PATCH 0/4] decouple vcpu index from apic id Avi Kivity
  4 siblings, 1 reply; 16+ messages in thread
From: Gleb Natapov @ 2009-06-08  9:32 UTC (permalink / raw)
  To: avi; +Cc: kvm

Archs are free to use vcpu_id as they see fit. For x86 it is used as
vcpu's apic id. New ioctl is added to configure boot vcpu id that was
assumed to be 0 till now.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 include/linux/kvm.h      |    2 +
 include/linux/kvm_host.h |    2 +
 virt/kvm/kvm_main.c      |   58 ++++++++++++++++++++++++++-------------------
 3 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 29b62cc..c5109a4 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -430,6 +430,7 @@ struct kvm_trace_rec {
 #ifdef __KVM_HAVE_PIT
 #define KVM_CAP_PIT2 33
 #endif
+#define KVM_CAP_SET_BOOT_CPU_ID 34
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -535,6 +536,7 @@ struct kvm_irqfd {
 #define KVM_DEASSIGN_DEV_IRQ       _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
 #define KVM_IRQFD                  _IOW(KVMIO, 0x76, struct kvm_irqfd)
 #define KVM_CREATE_PIT2		   _IOW(KVMIO, 0x77, struct kvm_pit_config)
+#define KVM_SET_BOOT_CPU_ID        _IO(KVMIO, 0x78)
 
 /*
  * ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4d8e222..3be6768 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,8 +130,10 @@ struct kvm {
 	int nmemslots;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
+	u32 bsp_vcpu_id;
 	struct kvm_vcpu *bsp_vcpu;
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+	atomic_t online_vcpus;
 	struct list_head vm_list;
 	struct kvm_io_bus mmio_bus;
 	struct kvm_io_bus pio_bus;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9a415c5..52d01db 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -687,11 +687,6 @@ out:
 }
 #endif
 
-static inline int valid_vcpu(int n)
-{
-	return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
 	if (pfn_valid(pfn)) {
@@ -1699,24 +1694,18 @@ static struct file_operations kvm_vcpu_fops = {
  */
 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 {
-	int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
-	if (fd < 0)
-		kvm_put_kvm(vcpu->kvm);
-	return fd;
+	return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
 }
 
 /*
  * Creates some virtual cpus.  Good luck creating more than one.
  */
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 {
 	int r;
 	struct kvm_vcpu *vcpu;
 
-	if (!valid_vcpu(n))
-		return -EINVAL;
-
-	vcpu = kvm_arch_vcpu_create(kvm, n);
+	vcpu = kvm_arch_vcpu_create(kvm, id);
 	if (IS_ERR(vcpu))
 		return PTR_ERR(vcpu);
 
@@ -1727,25 +1716,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 		return r;
 
 	mutex_lock(&kvm->lock);
-	if (kvm->vcpus[n]) {
-		r = -EEXIST;
+	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+		r = -EINVAL;
 		goto vcpu_destroy;
 	}
-	kvm->vcpus[n] = vcpu;
-	if (n == 0)
-		kvm->bsp_vcpu = vcpu;
-	mutex_unlock(&kvm->lock);
+
+	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
+		if (kvm->vcpus[r]->vcpu_id == id) {
+			r = -EEXIST;
+			goto vcpu_destroy;
+		}
+
+	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
 
 	/* Now it's all set up, let userspace reach it */
 	kvm_get_kvm(kvm);
 	r = create_vcpu_fd(vcpu);
-	if (r < 0)
-		goto unlink;
+	if (r < 0) {
+		kvm_put_kvm(kvm);
+		goto vcpu_destroy;
+	}
+
+	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+	smp_wmb();
+	atomic_inc(&kvm->online_vcpus);
+
+	if (kvm->bsp_vcpu_id == id)
+		kvm->bsp_vcpu = vcpu;
+	mutex_unlock(&kvm->lock);
 	return r;
 
-unlink:
-	mutex_lock(&kvm->lock);
-	kvm->vcpus[n] = NULL;
 vcpu_destroy:
 	mutex_unlock(&kvm->lock);
 	kvm_arch_vcpu_destroy(vcpu);
@@ -2218,6 +2218,13 @@ static long kvm_vm_ioctl(struct file *filp,
 		r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
 		break;
 	}
+	case KVM_SET_BOOT_CPU_ID:
+		r = 0;
+		if (atomic_read(&kvm->online_vcpus) != 0)
+			r = -EBUSY;
+		else
+			kvm->bsp_vcpu_id = arg;
+		break;
 	default:
 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
 	}
@@ -2284,6 +2291,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
 	case KVM_CAP_USER_MEMORY:
 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+	case KVM_CAP_SET_BOOT_CPU_ID:
 		return 1;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 	case KVM_CAP_IRQ_ROUTING:
-- 
1.6.2.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/4] Use macro to iterate over vcpus.
  2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
                   ` (2 preceding siblings ...)
  2009-06-08  9:32 ` [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
@ 2009-06-08  9:32 ` Gleb Natapov
  2009-06-08 11:25 ` [PATCH 0/4] decouple vcpu index from apic id Avi Kivity
  4 siblings, 0 replies; 16+ messages in thread
From: Gleb Natapov @ 2009-06-08  9:32 UTC (permalink / raw)
  To: avi; +Cc: kvm


Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 arch/ia64/kvm/kvm-ia64.c   |   33 ++++++++++++++-------------------
 arch/powerpc/kvm/powerpc.c |   16 ++++++++++------
 arch/s390/kvm/kvm-s390.c   |   33 ++++++++++++++++-----------------
 arch/x86/kvm/i8254.c       |    7 ++-----
 arch/x86/kvm/mmu.c         |    6 +++---
 arch/x86/kvm/x86.c         |   25 ++++++++++++-------------
 include/linux/kvm_host.h   |   11 +++++++++++
 virt/kvm/irq_comm.c        |    6 ++----
 virt/kvm/kvm_main.c        |   19 +++++++------------
 9 files changed, 77 insertions(+), 79 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3924591..c1c5cb6 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -337,13 +337,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
 {
 	union ia64_lid lid;
 	int i;
+	struct kvm_vcpu *vcpu;
 
-	for (i = 0; i < kvm->arch.online_vcpus; i++) {
-		if (kvm->vcpus[i]) {
-			lid.val = VCPU_LID(kvm->vcpus[i]);
-			if (lid.id == id && lid.eid == eid)
-				return kvm->vcpus[i];
-		}
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		lid.val = VCPU_LID(vcpu);
+		if (lid.id == id && lid.eid == eid)
+			return vcpu;
 	}
 
 	return NULL;
@@ -409,21 +408,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	struct kvm *kvm = vcpu->kvm;
 	struct call_data call_data;
 	int i;
+	struct kvm_vcpu *vcpui;
 
 	call_data.ptc_g_data = p->u.ptc_g_data;
 
-	for (i = 0; i < kvm->arch.online_vcpus; i++) {
-		if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
-						KVM_MP_STATE_UNINITIALIZED ||
-					vcpu == kvm->vcpus[i])
+	kvm_for_each_vcpu(i, vcpui, kvm) {
+		if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+				vcpu == vcpui)
 			continue;
 
-		if (waitqueue_active(&kvm->vcpus[i]->wq))
-			wake_up_interruptible(&kvm->vcpus[i]->wq);
+		if (waitqueue_active(&vcpui->wq))
+			wake_up_interruptible(&vcpui->wq);
 
-		if (kvm->vcpus[i]->cpu != -1) {
-			call_data.vcpu = kvm->vcpus[i];
-			smp_call_function_single(kvm->vcpus[i]->cpu,
+		if (vcpui->cpu != -1) {
+			call_data.vcpu = vcpui;
+			smp_call_function_single(vcpui->cpu,
 					vcpu_global_purge, &call_data, 1);
 		} else
 			printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -852,8 +851,6 @@ struct  kvm *kvm_arch_create_vm(void)
 
 	kvm_init_vm(kvm);
 
-	kvm->arch.online_vcpus = 0;
-
 	return kvm;
 
 }
@@ -1356,8 +1353,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 		goto fail;
 	}
 
-	kvm->arch.online_vcpus++;
-
 	return vcpu;
 fail:
 	return ERR_PTR(r);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2cf915e..7ad30e0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -122,13 +122,17 @@ struct kvm *kvm_arch_create_vm(void)
 static void kvmppc_free_vcpus(struct kvm *kvm)
 {
 	unsigned int i;
+	struct kvm_vcpu *vcpu;
 
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		if (kvm->vcpus[i]) {
-			kvm_arch_vcpu_free(kvm->vcpus[i]);
-			kvm->vcpus[i] = NULL;
-		}
-	}
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_arch_vcpu_free(vcpu);
+
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+		kvm->vcpus[i] = NULL;
+
+	atomic_set(&kvm->online_vcpus, 0);
+	mutex_unlock(&kvm->lock);
 }
 
 void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 981ab04..f5fbc51 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -209,13 +209,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 static void kvm_free_vcpus(struct kvm *kvm)
 {
 	unsigned int i;
+	struct kvm_vcpu *vcpu;
 
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		if (kvm->vcpus[i]) {
-			kvm_arch_vcpu_destroy(kvm->vcpus[i]);
-			kvm->vcpus[i] = NULL;
-		}
-	}
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_arch_vcpu_destroy(vcpu);
+
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+		kvm->vcpus[i] = NULL;
+
+	atomic_set(&kvm->online_vcpus, 0);
+	mutex_unlock(&kvm->lock);
 }
 
 void kvm_arch_sync_events(struct kvm *kvm)
@@ -311,8 +315,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 	BUG_ON(!kvm->arch.sca);
 	if (!kvm->arch.sca->cpu[id].sda)
 		kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
-	else
-		BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
 	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
 	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 
@@ -679,7 +681,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 				struct kvm_memory_slot old,
 				int user_alloc)
 {
-	int i;
+	int i, j = 0, r = -EINVAL;
+	struct kvm_vcpu *vcpu;
 
 	/* A few sanity checks. We can have exactly one memory slot which has
 	   to start at guest virtual zero and which has to be located at a
@@ -704,14 +707,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 		return -EINVAL;
 
 	/* request update of sie control block for all available vcpus */
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		if (kvm->vcpus[i]) {
-			if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
-						&kvm->vcpus[i]->requests))
-				continue;
-			kvm_s390_inject_sigp_stop(kvm->vcpus[i],
-						  ACTION_RELOADVCPU_ON_STOP);
-		}
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+			continue;
+		kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
 	}
 
 	return 0;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 0e32420..719a1a7 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -666,11 +666,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
 	 * VCPU0, and only if its LVT0 is in EXTINT mode.
 	 */
 	if (kvm->arch.vapics_in_nmi_mode > 0)
-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-			vcpu = kvm->vcpus[i];
-			if (vcpu)
-				kvm_apic_nmi_wd_deliver(vcpu);
-		}
+		kvm_for_each_vcpu(i, vcpu, kvm)
+			kvm_apic_nmi_wd_deliver(vcpu);
 }
 
 void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 809cce0..7c00009 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1327,10 +1327,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
 {
 	int i;
+	struct kvm_vcpu *vcpu;
 
-	for (i = 0; i < KVM_MAX_VCPUS; ++i)
-		if (kvm->vcpus[i])
-			kvm->vcpus[i]->arch.last_pte_updated = NULL;
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		vcpu->arch.last_pte_updated = NULL;
 }
 
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b8316a..5ba45ad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2910,10 +2910,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
 
 	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list) {
-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-			vcpu = kvm->vcpus[i];
-			if (!vcpu)
-				continue;
+		kvm_for_each_vcpu(i, vcpu, kvm) {
 			if (vcpu->cpu != freq->cpu)
 				continue;
 			if (!kvm_request_guest_time_update(vcpu))
@@ -4642,20 +4639,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 static void kvm_free_vcpus(struct kvm *kvm)
 {
 	unsigned int i;
+	struct kvm_vcpu *vcpu;
 
 	/*
 	 * Unpin any mmu pages first.
 	 */
-	for (i = 0; i < KVM_MAX_VCPUS; ++i)
-		if (kvm->vcpus[i])
-			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		if (kvm->vcpus[i]) {
-			kvm_arch_vcpu_free(kvm->vcpus[i]);
-			kvm->vcpus[i] = NULL;
-		}
-	}
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_unload_vcpu_mmu(vcpu);
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvm_arch_vcpu_free(vcpu);
+
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+		kvm->vcpus[i] = NULL;
 
+	atomic_set(&kvm->online_vcpus, 0);
+	mutex_unlock(&kvm->lock);
 }
 
 void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3be6768..eb5d8ba 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -171,6 +171,17 @@ struct kvm {
 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
 
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+	smp_rmb();
+	return kvm->vcpus[i];
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+	for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
+	     idx < atomic_read(&kvm->online_vcpus) && vcpup; \
+	     vcpup = kvm_get_vcpu(kvm, ++idx))
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a8bd466..c010ea2 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -66,10 +66,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 			kvm_is_dm_lowest_prio(irq))
 		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
 
-	for (i = 0; i < KVM_MAX_VCPUS; i++) {
-		vcpu = kvm->vcpus[i];
-
-		if (!vcpu || !kvm_apic_present(vcpu))
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		if (!kvm_apic_present(vcpu))
 			continue;
 
 		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 52d01db..8ea6ddb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -735,10 +735,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 		cpumask_clear(cpus);
 
 	me = get_cpu();
-	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-		vcpu = kvm->vcpus[i];
-		if (!vcpu)
-			continue;
+	kvm_for_each_vcpu(i, vcpu, kvm) {
 		if (test_and_set_bit(req, &vcpu->requests))
 			continue;
 		cpu = vcpu->cpu;
@@ -1703,7 +1700,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 {
 	int r;
-	struct kvm_vcpu *vcpu;
+	struct kvm_vcpu *vcpu, *v;
 
 	vcpu = kvm_arch_vcpu_create(kvm, id);
 	if (IS_ERR(vcpu))
@@ -1721,8 +1718,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
 		goto vcpu_destroy;
 	}
 
-	for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
-		if (kvm->vcpus[r]->vcpu_id == id) {
+	kvm_for_each_vcpu(r, v, kvm)
+		if (v->vcpu_id == id) {
 			r = -EEXIST;
 			goto vcpu_destroy;
 		}
@@ -2505,11 +2502,9 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 	*val = 0;
 	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list)
-		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-			vcpu = kvm->vcpus[i];
-			if (vcpu)
-				*val += *(u32 *)((void *)vcpu + offset);
-		}
+		kvm_for_each_vcpu(i, vcpu, kvm)
+			*val += *(u32 *)((void *)vcpu + offset);
+
 	spin_unlock(&kvm_lock);
 	return 0;
 }
-- 
1.6.2.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-08  9:32 ` [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
@ 2009-06-08 11:20   ` Avi Kivity
  2009-06-08 16:53     ` Hollis Blanchard
  2009-06-09  1:00     ` Zhang, Xiantao
  0 siblings, 2 replies; 16+ messages in thread
From: Avi Kivity @ 2009-06-08 11:20 UTC (permalink / raw)
  To: Gleb Natapov; +Cc: kvm, Carsten Otte, Hollis Blanchard, Zhang, Xiantao

Gleb Natapov wrote:
> @@ -130,6 +130,7 @@ struct kvm {
>  	int nmemslots;
>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
>  					KVM_PRIVATE_MEM_SLOTS];
> +	struct kvm_vcpu *bsp_vcpu;
>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>  	struct list_head vm_list;
>  	struct kvm_io_bus mmio_bus;
>   

The concept of bsp (boot processor) is limited IIUC to x86 and ia64, so 
please wrap with HAVE_KVM_IRQCHIP (which is a close approximation).

Arch maintainers, please confirm.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id.
  2009-06-08  9:32 ` [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
@ 2009-06-08 11:23   ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2009-06-08 11:23 UTC (permalink / raw)
  To: Gleb Natapov; +Cc: kvm

Gleb Natapov wrote:
> Archs are free to use vcpu_id as they see fit. For x86 it is used as
> vcpu's apic id. New ioctl is added to configure boot vcpu id that was
> assumed to be 0 till now.
>
> @@ -2284,6 +2291,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
>  	case KVM_CAP_USER_MEMORY:
>  	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
>  	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
> +	case KVM_CAP_SET_BOOT_CPU_ID:
>  		return 1;
>  #ifdef CONFIG_HAVE_KVM_IRQCHIP
>  	case KVM_CAP_IRQ_ROUTING:
>   

Predicate on HAVE_KVM_IRQCHIP (or a new symbol if it turns out it's more 
appropriate).

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/4] decouple vcpu index from apic id
  2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
                   ` (3 preceding siblings ...)
  2009-06-08  9:32 ` [PATCH 4/4] Use macro to iterate over vcpus Gleb Natapov
@ 2009-06-08 11:25 ` Avi Kivity
  2009-06-09  2:30     ` Zhang, Xiantao
  4 siblings, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2009-06-08 11:25 UTC (permalink / raw)
  To: Gleb Natapov; +Cc: kvm, Carsten Otte, Hollis Blanchard, Zhang, Xiantao

Gleb Natapov wrote:
> Currently vcpu_id is used as an index into vcpus array and as apic id
> on x86.  This is incorrect since apic ids not have to be continuous
> (they can also encode cpu hierarchy information) and may have values
> bigger then vcpu array in case of x2apic. This series removes use of
> vcpu_id as vcpus array index. Each architecture may use it how it sees
> fit. x86 uses it as apic id.
>
>   

Apart from my minor comments, this looks good, but I'd like to get acks 
from arch maintainers to avoid surprises.

Xiantao, does setting the bsp id make sense for ia64?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-08 11:20   ` Avi Kivity
@ 2009-06-08 16:53     ` Hollis Blanchard
  2009-06-08 16:57       ` Avi Kivity
  2009-06-09  1:00     ` Zhang, Xiantao
  1 sibling, 1 reply; 16+ messages in thread
From: Hollis Blanchard @ 2009-06-08 16:53 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Gleb Natapov, kvm, Carsten Otte, Zhang, Xiantao

On Mon, 2009-06-08 at 14:20 +0300, Avi Kivity wrote:
> Gleb Natapov wrote:
> > @@ -130,6 +130,7 @@ struct kvm {
> >  	int nmemslots;
> >  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
> >  					KVM_PRIVATE_MEM_SLOTS];
> > +	struct kvm_vcpu *bsp_vcpu;
> >  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> >  	struct list_head vm_list;
> >  	struct kvm_io_bus mmio_bus;
> >   
> 
> The concept of bsp (boot processor) is limited IIUC to x86 and ia64

That is true.

> please wrap with HAVE_KVM_IRQCHIP (which is a close approximation).

I don't know about that... I've definitely thought about implementing an
in-kernel PIC for PowerPC. (That will make more sense as an optimization
once the processors with hypervisor support start hitting the market.)

-- 
Hollis Blanchard
IBM Linux Technology Center


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-08 16:53     ` Hollis Blanchard
@ 2009-06-08 16:57       ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2009-06-08 16:57 UTC (permalink / raw)
  To: Hollis Blanchard; +Cc: Gleb Natapov, kvm, Carsten Otte, Zhang, Xiantao

Hollis Blanchard wrote:
>> please wrap with HAVE_KVM_IRQCHIP (which is a close approximation).
>>     
>
> I don't know about that... I've definitely thought about implementing an
> in-kernel PIC for PowerPC. (That will make more sense as an optimization
> once the processors with hypervisor support start hitting the market.)
>   

Yes, wasn't such a great idea.  I guess a new symbol 
CONFIG_KVM_APIC_ARCHITECTURE which will also encapsulate the ioapic stuff.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-08 11:20   ` Avi Kivity
  2009-06-08 16:53     ` Hollis Blanchard
@ 2009-06-09  1:00     ` Zhang, Xiantao
  2009-06-09  3:35       ` Avi Kivity
  1 sibling, 1 reply; 16+ messages in thread
From: Zhang, Xiantao @ 2009-06-09  1:00 UTC (permalink / raw)
  To: Avi Kivity, Gleb Natapov; +Cc: kvm, Carsten Otte, Hollis Blanchard

Avi Kivity wrote:
> Gleb Natapov wrote:
>> @@ -130,6 +130,7 @@ struct kvm {
>>  	int nmemslots;
>>  	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + 
>> KVM_PRIVATE_MEM_SLOTS]; +	struct kvm_vcpu *bsp_vcpu;
>>  	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>>  	struct list_head vm_list;
>>  	struct kvm_io_bus mmio_bus;
>> 
> 
> The concept of bsp (boot processor) is limited IIUC to x86 and ia64,
> so please wrap with HAVE_KVM_IRQCHIP (which is a close approximation).
> 
> Arch maintainers, please confirm.

Yes, vcpu[0] should be the bsp for ia64 guests, even if this is not always true for real platforms.   
Xiantao

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 0/4] decouple vcpu index from apic id
@ 2009-06-09  2:30     ` Zhang, Xiantao
  0 siblings, 0 replies; 16+ messages in thread
From: Zhang, Xiantao @ 2009-06-09  2:30 UTC (permalink / raw)
  To: Avi Kivity, Gleb Natapov
  Cc: kvm, Carsten Otte, Hollis Blanchard, Jes Sorensen, kvm-ia64

Avi Kivity wrote:
> Gleb Natapov wrote:
>> Currently vcpu_id is used as an index into vcpus array and as apic id
>> on x86.  This is incorrect since apic ids not have to be continuous
>> (they can also encode cpu hierarchy information) and may have values
>> bigger then vcpu array in case of x2apic. This series removes use of
>> vcpu_id as vcpus array index. Each architecture may use it how it
>> sees fit. x86 uses it as apic id. 
>> 
>> 
> 
> Apart from my minor comments, this looks good, but I'd like to get
> acks from arch maintainers to avoid surprises.
> 
> Xiantao, does setting the bsp id make sense for ia64?

Make sense for ia64, if bsp id is set to 0.
But still have one comment about the patchset, in Patch[3/4], online_vcpus in kvm structure is introduced, but it should have the conflicts with arch.online_vcpus for ia64. So I think it should be fixed before check-in. 
Xiantao

^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [PATCH 0/4] decouple vcpu index from apic id
@ 2009-06-09  2:30     ` Zhang, Xiantao
  0 siblings, 0 replies; 16+ messages in thread
From: Zhang, Xiantao @ 2009-06-09  2:30 UTC (permalink / raw)
  To: kvm-ia64

Avi Kivity wrote:
> Gleb Natapov wrote:
>> Currently vcpu_id is used as an index into vcpus array and as apic id
>> on x86.  This is incorrect since apic ids not have to be continuous
>> (they can also encode cpu hierarchy information) and may have values
>> bigger then vcpu array in case of x2apic. This series removes use of
>> vcpu_id as vcpus array index. Each architecture may use it how it
>> sees fit. x86 uses it as apic id. 
>> 
>> 
> 
> Apart from my minor comments, this looks good, but I'd like to get
> acks from arch maintainers to avoid surprises.
> 
> Xiantao, does setting the bsp id make sense for ia64?

Make sense for ia64, if bsp id is set to 0.
But still have one comment about the patchset, in Patch[3/4], online_vcpus in kvm structure is introduced, but it should have the conflicts with arch.online_vcpus for ia64. So I think it should be fixed before check-in. 
Xiantao

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function.
  2009-06-09  1:00     ` Zhang, Xiantao
@ 2009-06-09  3:35       ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2009-06-09  3:35 UTC (permalink / raw)
  To: Zhang, Xiantao; +Cc: Gleb Natapov, kvm, Carsten Otte, Hollis Blanchard

Zhang, Xiantao wrote:
>> The concept of bsp (boot processor) is limited IIUC to x86 and ia64,
>> so please wrap with HAVE_KVM_IRQCHIP (which is a close approximation).
>>
>> Arch maintainers, please confirm.
>>     
>
> Yes, vcpu[0] should be the bsp for ia64 guests, even if this is not always true for real platforms.   
>   

We are changing it to be potentially not always true for x86 as well.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/4] decouple vcpu index from apic id
  2009-06-09  2:30     ` Zhang, Xiantao
@ 2009-06-09  5:26       ` Gleb Natapov
  -1 siblings, 0 replies; 16+ messages in thread
From: Gleb Natapov @ 2009-06-09  5:26 UTC (permalink / raw)
  To: Zhang, Xiantao
  Cc: Avi Kivity, kvm, Carsten Otte, Hollis Blanchard, Jes Sorensen, kvm-ia64

On Tue, Jun 09, 2009 at 10:30:01AM +0800, Zhang, Xiantao wrote:
> Avi Kivity wrote:
> > Gleb Natapov wrote:
> >> Currently vcpu_id is used as an index into vcpus array and as apic id
> >> on x86.  This is incorrect since apic ids not have to be continuous
> >> (they can also encode cpu hierarchy information) and may have values
> >> bigger then vcpu array in case of x2apic. This series removes use of
> >> vcpu_id as vcpus array index. Each architecture may use it how it
> >> sees fit. x86 uses it as apic id. 
> >> 
> >> 
> > 
> > Apart from my minor comments, this looks good, but I'd like to get
> > acks from arch maintainers to avoid surprises.
> > 
> > Xiantao, does setting the bsp id make sense for ia64?
> 
> Make sense for ia64, if bsp id is set to 0.
> But still have one comment about the patchset, in Patch[3/4], online_vcpus in kvm structure is introduced, but it should have the conflicts with arch.online_vcpus for ia64. So I think it should be fixed before check-in. 
Patch 4/4 removes use of arch.online_vcpus. online_vcpus are used instead.
I'll move arch.online_vcpus removal to patch 3/4 instead.

--
			Gleb.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 0/4] decouple vcpu index from apic id
@ 2009-06-09  5:26       ` Gleb Natapov
  0 siblings, 0 replies; 16+ messages in thread
From: Gleb Natapov @ 2009-06-09  5:26 UTC (permalink / raw)
  To: kvm-ia64

On Tue, Jun 09, 2009 at 10:30:01AM +0800, Zhang, Xiantao wrote:
> Avi Kivity wrote:
> > Gleb Natapov wrote:
> >> Currently vcpu_id is used as an index into vcpus array and as apic id
> >> on x86.  This is incorrect since apic ids not have to be continuous
> >> (they can also encode cpu hierarchy information) and may have values
> >> bigger then vcpu array in case of x2apic. This series removes use of
> >> vcpu_id as vcpus array index. Each architecture may use it how it
> >> sees fit. x86 uses it as apic id. 
> >> 
> >> 
> > 
> > Apart from my minor comments, this looks good, but I'd like to get
> > acks from arch maintainers to avoid surprises.
> > 
> > Xiantao, does setting the bsp id make sense for ia64?
> 
> Make sense for ia64, if bsp id is set to 0.
> But still have one comment about the patchset, in Patch[3/4], online_vcpus in kvm structure is introduced, but it should have the conflicts with arch.online_vcpus for ia64. So I think it should be fixed before check-in. 
Patch 4/4 removes use of arch.online_vcpus. online_vcpus are used instead.
I'll move arch.online_vcpus removal to patch 3/4 instead.

--
			Gleb.

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2009-06-09  5:27 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-06-08  9:32 [PATCH 0/4] decouple vcpu index from apic id Gleb Natapov
2009-06-08  9:32 ` [PATCH 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
2009-06-08 11:20   ` Avi Kivity
2009-06-08 16:53     ` Hollis Blanchard
2009-06-08 16:57       ` Avi Kivity
2009-06-09  1:00     ` Zhang, Xiantao
2009-06-09  3:35       ` Avi Kivity
2009-06-08  9:32 ` [PATCH 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
2009-06-08  9:32 ` [PATCH 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
2009-06-08 11:23   ` Avi Kivity
2009-06-08  9:32 ` [PATCH 4/4] Use macro to iterate over vcpus Gleb Natapov
2009-06-08 11:25 ` [PATCH 0/4] decouple vcpu index from apic id Avi Kivity
2009-06-09  2:30   ` Zhang, Xiantao
2009-06-09  2:30     ` Zhang, Xiantao
2009-06-09  5:26     ` Gleb Natapov
2009-06-09  5:26       ` Gleb Natapov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.