All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2015-01-22  9:39 Bogdan Purcareata
  2015-02-02  9:35 ` Purcareata Bogdan
  0 siblings, 1 reply; 15+ messages in thread
From: Bogdan Purcareata @ 2015-01-22  9:39 UTC (permalink / raw)
  To: linux-rt-users; +Cc: scottwood, Bogdan Purcareata, Mihai Caraman

This patch enables running intensive I/O workloads, e.g. netperf, in a guest
deployed on a RT host. It also enable guests to be SMP.

The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
guarantees that EPR is atomic with exception delivery. The guest VCPU thread
fails due to a BUG_ON(preemptible()) when running netperf.

In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
context, convert the openpic lock to a raw_spinlock. A similar approach can
be seen for x86 platforms in the following commit [1].

Here are some comparative cyclitest measurements run inside a high priority RT
guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
minutes. The guest runs ~750 hackbench processes as background stress.

                  spinlock  raw_spinlock
Min latency (us)  4         4
Avg latency (us)  15        19
Max latency (us)  70        62

Due to the introduction of the raw_spinlock, guests with a high number of VCPUs
may induce great latencies on the underlying RT Linux system (e.g. cyclictest
reports latencies of ~15ms for guests with 24 VCPUs). This can be further
aggravated by sending a lot of external interrupts to the guest. A malicious app
can abuse this scenario, causing a DoS of the host Linux. Until the KVM openpic
code is refactored to use finer lock granularity, impose a limitation on the
number of VCPUs a guest can have when running on a PREEMPT_RT_FULL system with
KVM_MPIC emulation.

Sent against v3.14-rt branch of
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

[1] https://lkml.org/lkml/2010/1/11/289

Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
[ add KVM_MAX_VCPUS limitation ]
Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
Reviewed-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/include/asm/kvm_host.h |  6 +++++
 arch/powerpc/kvm/mpic.c             | 44 ++++++++++++++++++-------------------
 2 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1eaea2d..5ae38c5 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -35,8 +35,14 @@
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_KVM_MPIC)
+/* Limit the number of vcpus due to in-kernel mpic concurrency */
+#define KVM_MAX_VCPUS		4
+#define KVM_MAX_VCORES		4
+#else
 #define KVM_MAX_VCPUS		NR_CPUS
 #define KVM_MAX_VCORES		NR_CPUS
+#endif
 #define KVM_USER_MEM_SLOTS 32
 #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
 
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index efbd996..b9802a3 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -194,7 +194,7 @@ struct openpic {
 	int num_mmio_regions;
 
 	gpa_t reg_base;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/* Behavior control */
 	struct fsl_mpic_info *fsl;
@@ -1105,9 +1105,9 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
 		}
 
-		spin_unlock(&opp->lock);
+		raw_spin_unlock(&opp->lock);
 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
-		spin_lock(&opp->lock);
+		raw_spin_lock(&opp->lock);
 
 		break;
 	}
@@ -1182,12 +1182,12 @@ void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
 	int cpu = vcpu->arch.irq_cpu_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
 
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 }
 
 static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
@@ -1387,9 +1387,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
 		return -EINVAL;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	/*
 	 * Technically only 32-bit accesses are allowed, but be nice to
@@ -1427,10 +1427,10 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
 		return -EOPNOTSUPP;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
 				      *(const u32 *)ptr);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: addr %llx ret %d val %x\n",
 		 __func__, addr, ret, *(const u32 *)ptr);
@@ -1501,14 +1501,14 @@ static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
 	if (addr & 3)
 		return -ENXIO;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (type == ATTR_SET)
 		ret = kvm_mpic_write_internal(opp, addr, *val);
 	else
 		ret = kvm_mpic_read_internal(opp, addr, val);
 
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
 
@@ -1545,9 +1545,9 @@ static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr32 != 0 && attr32 != 1)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		openpic_set_irq(opp, attr->attr, attr32);
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 		return 0;
 	}
 
@@ -1592,9 +1592,9 @@ static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr->attr > MAX_SRC)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		attr32 = opp->src[attr->attr].pending;
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 
 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
 			return -EFAULT;
@@ -1670,7 +1670,7 @@ static int mpic_create(struct kvm_device *dev, u32 type)
 	opp->kvm = dev->kvm;
 	opp->dev = dev;
 	opp->model = type;
-	spin_lock_init(&opp->lock);
+	raw_spin_lock_init(&opp->lock);
 
 	add_mmio_region(opp, &openpic_gbl_mmio);
 	add_mmio_region(opp, &openpic_tmr_mmio);
@@ -1743,7 +1743,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 	if (cpu < 0 || cpu >= MAX_CPU)
 		return -EPERM;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (opp->dst[cpu].vcpu) {
 		ret = -EEXIST;
@@ -1766,7 +1766,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
 
 out:
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 	return ret;
 }
 
@@ -1796,9 +1796,9 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 	openpic_set_irq(opp, irq, level);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
@@ -1810,14 +1810,14 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	/*
 	 * XXX We ignore the target address for now, as we only support
 	 *     a single MSI bank.
 	 */
 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-01-22  9:39 [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock Bogdan Purcareata
@ 2015-02-02  9:35 ` Purcareata Bogdan
  2015-02-17 12:27   ` Purcareata Bogdan
  0 siblings, 1 reply; 15+ messages in thread
From: Purcareata Bogdan @ 2015-02-02  9:35 UTC (permalink / raw)
  To: linux-rt-users; +Cc: scottwood, Mihai Caraman, linux-kernel

Ping?

On 22.01.2015 11:39, Bogdan Purcareata wrote:
> This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> deployed on a RT host. It also enable guests to be SMP.
>
> The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> fails due to a BUG_ON(preemptible()) when running netperf.
>
> In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> context, convert the openpic lock to a raw_spinlock. A similar approach can
> be seen for x86 platforms in the following commit [1].
>
> Here are some comparative cyclitest measurements run inside a high priority RT
> guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
> minutes. The guest runs ~750 hackbench processes as background stress.
>
>                    spinlock  raw_spinlock
> Min latency (us)  4         4
> Avg latency (us)  15        19
> Max latency (us)  70        62
>
> Due to the introduction of the raw_spinlock, guests with a high number of VCPUs
> may induce great latencies on the underlying RT Linux system (e.g. cyclictest
> reports latencies of ~15ms for guests with 24 VCPUs). This can be further
> aggravated by sending a lot of external interrupts to the guest. A malicious app
> can abuse this scenario, causing a DoS of the host Linux. Until the KVM openpic
> code is refactored to use finer lock granularity, impose a limitation on the
> number of VCPUs a guest can have when running on a PREEMPT_RT_FULL system with
> KVM_MPIC emulation.
>
> Sent against v3.14-rt branch of
> git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git
>
> [1] https://lkml.org/lkml/2010/1/11/289
>
> Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
> [ add KVM_MAX_VCPUS limitation ]
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> Reviewed-by: Scott Wood <scottwood@freescale.com>
> ---
>   arch/powerpc/include/asm/kvm_host.h |  6 +++++
>   arch/powerpc/kvm/mpic.c             | 44 ++++++++++++++++++-------------------
>   2 files changed, 28 insertions(+), 22 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 1eaea2d..5ae38c5 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -35,8 +35,14 @@
>   #include <asm/page.h>
>   #include <asm/cacheflush.h>
>
> +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_KVM_MPIC)
> +/* Limit the number of vcpus due to in-kernel mpic concurrency */
> +#define KVM_MAX_VCPUS		4
> +#define KVM_MAX_VCORES		4
> +#else
>   #define KVM_MAX_VCPUS		NR_CPUS
>   #define KVM_MAX_VCORES		NR_CPUS
> +#endif
>   #define KVM_USER_MEM_SLOTS 32
>   #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
>
> diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
> index efbd996..b9802a3 100644
> --- a/arch/powerpc/kvm/mpic.c
> +++ b/arch/powerpc/kvm/mpic.c
> @@ -194,7 +194,7 @@ struct openpic {
>   	int num_mmio_regions;
>
>   	gpa_t reg_base;
> -	spinlock_t lock;
> +	raw_spinlock_t lock;
>
>   	/* Behavior control */
>   	struct fsl_mpic_info *fsl;
> @@ -1105,9 +1105,9 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
>   			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
>   		}
>
> -		spin_unlock(&opp->lock);
> +		raw_spin_unlock(&opp->lock);
>   		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
> -		spin_lock(&opp->lock);
> +		raw_spin_lock(&opp->lock);
>
>   		break;
>   	}
> @@ -1182,12 +1182,12 @@ void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
>   	int cpu = vcpu->arch.irq_cpu_id;
>   	unsigned long flags;
>
> -	spin_lock_irqsave(&opp->lock, flags);
> +	raw_spin_lock_irqsave(&opp->lock, flags);
>
>   	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
>   		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
>
> -	spin_unlock_irqrestore(&opp->lock, flags);
> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>   }
>
>   static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
> @@ -1387,9 +1387,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
>   		return -EINVAL;
>   	}
>
> -	spin_lock_irq(&opp->lock);
> +	raw_spin_lock_irq(&opp->lock);
>   	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
> -	spin_unlock_irq(&opp->lock);
> +	raw_spin_unlock_irq(&opp->lock);
>
>   	/*
>   	 * Technically only 32-bit accesses are allowed, but be nice to
> @@ -1427,10 +1427,10 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
>   		return -EOPNOTSUPP;
>   	}
>
> -	spin_lock_irq(&opp->lock);
> +	raw_spin_lock_irq(&opp->lock);
>   	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
>   				      *(const u32 *)ptr);
> -	spin_unlock_irq(&opp->lock);
> +	raw_spin_unlock_irq(&opp->lock);
>
>   	pr_debug("%s: addr %llx ret %d val %x\n",
>   		 __func__, addr, ret, *(const u32 *)ptr);
> @@ -1501,14 +1501,14 @@ static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
>   	if (addr & 3)
>   		return -ENXIO;
>
> -	spin_lock_irq(&opp->lock);
> +	raw_spin_lock_irq(&opp->lock);
>
>   	if (type == ATTR_SET)
>   		ret = kvm_mpic_write_internal(opp, addr, *val);
>   	else
>   		ret = kvm_mpic_read_internal(opp, addr, val);
>
> -	spin_unlock_irq(&opp->lock);
> +	raw_spin_unlock_irq(&opp->lock);
>
>   	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
>
> @@ -1545,9 +1545,9 @@ static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>   		if (attr32 != 0 && attr32 != 1)
>   			return -EINVAL;
>
> -		spin_lock_irq(&opp->lock);
> +		raw_spin_lock_irq(&opp->lock);
>   		openpic_set_irq(opp, attr->attr, attr32);
> -		spin_unlock_irq(&opp->lock);
> +		raw_spin_unlock_irq(&opp->lock);
>   		return 0;
>   	}
>
> @@ -1592,9 +1592,9 @@ static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>   		if (attr->attr > MAX_SRC)
>   			return -EINVAL;
>
> -		spin_lock_irq(&opp->lock);
> +		raw_spin_lock_irq(&opp->lock);
>   		attr32 = opp->src[attr->attr].pending;
> -		spin_unlock_irq(&opp->lock);
> +		raw_spin_unlock_irq(&opp->lock);
>
>   		if (put_user(attr32, (u32 __user *)(long)attr->addr))
>   			return -EFAULT;
> @@ -1670,7 +1670,7 @@ static int mpic_create(struct kvm_device *dev, u32 type)
>   	opp->kvm = dev->kvm;
>   	opp->dev = dev;
>   	opp->model = type;
> -	spin_lock_init(&opp->lock);
> +	raw_spin_lock_init(&opp->lock);
>
>   	add_mmio_region(opp, &openpic_gbl_mmio);
>   	add_mmio_region(opp, &openpic_tmr_mmio);
> @@ -1743,7 +1743,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
>   	if (cpu < 0 || cpu >= MAX_CPU)
>   		return -EPERM;
>
> -	spin_lock_irq(&opp->lock);
> +	raw_spin_lock_irq(&opp->lock);
>
>   	if (opp->dst[cpu].vcpu) {
>   		ret = -EEXIST;
> @@ -1766,7 +1766,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
>   		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
>
>   out:
> -	spin_unlock_irq(&opp->lock);
> +	raw_spin_unlock_irq(&opp->lock);
>   	return ret;
>   }
>
> @@ -1796,9 +1796,9 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
>   	struct openpic *opp = kvm->arch.mpic;
>   	unsigned long flags;
>
> -	spin_lock_irqsave(&opp->lock, flags);
> +	raw_spin_lock_irqsave(&opp->lock, flags);
>   	openpic_set_irq(opp, irq, level);
> -	spin_unlock_irqrestore(&opp->lock, flags);
> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>
>   	/* All code paths we care about don't check for the return value */
>   	return 0;
> @@ -1810,14 +1810,14 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
>   	struct openpic *opp = kvm->arch.mpic;
>   	unsigned long flags;
>
> -	spin_lock_irqsave(&opp->lock, flags);
> +	raw_spin_lock_irqsave(&opp->lock, flags);
>
>   	/*
>   	 * XXX We ignore the target address for now, as we only support
>   	 *     a single MSI bank.
>   	 */
>   	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
> -	spin_unlock_irqrestore(&opp->lock, flags);
> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>
>   	/* All code paths we care about don't check for the return value */
>   	return 0;
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-02-02  9:35 ` Purcareata Bogdan
@ 2015-02-17 12:27   ` Purcareata Bogdan
  2015-02-17 17:53     ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 15+ messages in thread
From: Purcareata Bogdan @ 2015-02-17 12:27 UTC (permalink / raw)
  To: linux-rt-users; +Cc: scottwood, Mihai Caraman, linux-kernel

Ping?

On 02.02.2015 11:35, Purcareata Bogdan wrote:
> Ping?
>
> On 22.01.2015 11:39, Bogdan Purcareata wrote:
>> This patch enables running intensive I/O workloads, e.g. netperf, in a guest
>> deployed on a RT host. It also enable guests to be SMP.
>>
>> The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
>> guarantees that EPR is atomic with exception delivery. The guest VCPU thread
>> fails due to a BUG_ON(preemptible()) when running netperf.
>>
>> In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
>> context, convert the openpic lock to a raw_spinlock. A similar approach can
>> be seen for x86 platforms in the following commit [1].
>>
>> Here are some comparative cyclitest measurements run inside a high priority RT
>> guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
>> minutes. The guest runs ~750 hackbench processes as background stress.
>>
>>                     spinlock  raw_spinlock
>> Min latency (us)  4         4
>> Avg latency (us)  15        19
>> Max latency (us)  70        62
>>
>> Due to the introduction of the raw_spinlock, guests with a high number of VCPUs
>> may induce great latencies on the underlying RT Linux system (e.g. cyclictest
>> reports latencies of ~15ms for guests with 24 VCPUs). This can be further
>> aggravated by sending a lot of external interrupts to the guest. A malicious app
>> can abuse this scenario, causing a DoS of the host Linux. Until the KVM openpic
>> code is refactored to use finer lock granularity, impose a limitation on the
>> number of VCPUs a guest can have when running on a PREEMPT_RT_FULL system with
>> KVM_MPIC emulation.
>>
>> Sent against v3.14-rt branch of
>> git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git
>>
>> [1] https://lkml.org/lkml/2010/1/11/289
>>
>> Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
>> [ add KVM_MAX_VCPUS limitation ]
>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>> Reviewed-by: Scott Wood <scottwood@freescale.com>
>> ---
>>    arch/powerpc/include/asm/kvm_host.h |  6 +++++
>>    arch/powerpc/kvm/mpic.c             | 44 ++++++++++++++++++-------------------
>>    2 files changed, 28 insertions(+), 22 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
>> index 1eaea2d..5ae38c5 100644
>> --- a/arch/powerpc/include/asm/kvm_host.h
>> +++ b/arch/powerpc/include/asm/kvm_host.h
>> @@ -35,8 +35,14 @@
>>    #include <asm/page.h>
>>    #include <asm/cacheflush.h>
>>
>> +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_KVM_MPIC)
>> +/* Limit the number of vcpus due to in-kernel mpic concurrency */
>> +#define KVM_MAX_VCPUS		4
>> +#define KVM_MAX_VCORES		4
>> +#else
>>    #define KVM_MAX_VCPUS		NR_CPUS
>>    #define KVM_MAX_VCORES		NR_CPUS
>> +#endif
>>    #define KVM_USER_MEM_SLOTS 32
>>    #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
>>
>> diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
>> index efbd996..b9802a3 100644
>> --- a/arch/powerpc/kvm/mpic.c
>> +++ b/arch/powerpc/kvm/mpic.c
>> @@ -194,7 +194,7 @@ struct openpic {
>>    	int num_mmio_regions;
>>
>>    	gpa_t reg_base;
>> -	spinlock_t lock;
>> +	raw_spinlock_t lock;
>>
>>    	/* Behavior control */
>>    	struct fsl_mpic_info *fsl;
>> @@ -1105,9 +1105,9 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
>>    			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
>>    		}
>>
>> -		spin_unlock(&opp->lock);
>> +		raw_spin_unlock(&opp->lock);
>>    		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
>> -		spin_lock(&opp->lock);
>> +		raw_spin_lock(&opp->lock);
>>
>>    		break;
>>    	}
>> @@ -1182,12 +1182,12 @@ void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
>>    	int cpu = vcpu->arch.irq_cpu_id;
>>    	unsigned long flags;
>>
>> -	spin_lock_irqsave(&opp->lock, flags);
>> +	raw_spin_lock_irqsave(&opp->lock, flags);
>>
>>    	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
>>    		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
>>
>> -	spin_unlock_irqrestore(&opp->lock, flags);
>> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>>    }
>>
>>    static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
>> @@ -1387,9 +1387,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
>>    		return -EINVAL;
>>    	}
>>
>> -	spin_lock_irq(&opp->lock);
>> +	raw_spin_lock_irq(&opp->lock);
>>    	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
>> -	spin_unlock_irq(&opp->lock);
>> +	raw_spin_unlock_irq(&opp->lock);
>>
>>    	/*
>>    	 * Technically only 32-bit accesses are allowed, but be nice to
>> @@ -1427,10 +1427,10 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
>>    		return -EOPNOTSUPP;
>>    	}
>>
>> -	spin_lock_irq(&opp->lock);
>> +	raw_spin_lock_irq(&opp->lock);
>>    	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
>>    				      *(const u32 *)ptr);
>> -	spin_unlock_irq(&opp->lock);
>> +	raw_spin_unlock_irq(&opp->lock);
>>
>>    	pr_debug("%s: addr %llx ret %d val %x\n",
>>    		 __func__, addr, ret, *(const u32 *)ptr);
>> @@ -1501,14 +1501,14 @@ static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
>>    	if (addr & 3)
>>    		return -ENXIO;
>>
>> -	spin_lock_irq(&opp->lock);
>> +	raw_spin_lock_irq(&opp->lock);
>>
>>    	if (type == ATTR_SET)
>>    		ret = kvm_mpic_write_internal(opp, addr, *val);
>>    	else
>>    		ret = kvm_mpic_read_internal(opp, addr, val);
>>
>> -	spin_unlock_irq(&opp->lock);
>> +	raw_spin_unlock_irq(&opp->lock);
>>
>>    	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
>>
>> @@ -1545,9 +1545,9 @@ static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>>    		if (attr32 != 0 && attr32 != 1)
>>    			return -EINVAL;
>>
>> -		spin_lock_irq(&opp->lock);
>> +		raw_spin_lock_irq(&opp->lock);
>>    		openpic_set_irq(opp, attr->attr, attr32);
>> -		spin_unlock_irq(&opp->lock);
>> +		raw_spin_unlock_irq(&opp->lock);
>>    		return 0;
>>    	}
>>
>> @@ -1592,9 +1592,9 @@ static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
>>    		if (attr->attr > MAX_SRC)
>>    			return -EINVAL;
>>
>> -		spin_lock_irq(&opp->lock);
>> +		raw_spin_lock_irq(&opp->lock);
>>    		attr32 = opp->src[attr->attr].pending;
>> -		spin_unlock_irq(&opp->lock);
>> +		raw_spin_unlock_irq(&opp->lock);
>>
>>    		if (put_user(attr32, (u32 __user *)(long)attr->addr))
>>    			return -EFAULT;
>> @@ -1670,7 +1670,7 @@ static int mpic_create(struct kvm_device *dev, u32 type)
>>    	opp->kvm = dev->kvm;
>>    	opp->dev = dev;
>>    	opp->model = type;
>> -	spin_lock_init(&opp->lock);
>> +	raw_spin_lock_init(&opp->lock);
>>
>>    	add_mmio_region(opp, &openpic_gbl_mmio);
>>    	add_mmio_region(opp, &openpic_tmr_mmio);
>> @@ -1743,7 +1743,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
>>    	if (cpu < 0 || cpu >= MAX_CPU)
>>    		return -EPERM;
>>
>> -	spin_lock_irq(&opp->lock);
>> +	raw_spin_lock_irq(&opp->lock);
>>
>>    	if (opp->dst[cpu].vcpu) {
>>    		ret = -EEXIST;
>> @@ -1766,7 +1766,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
>>    		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
>>
>>    out:
>> -	spin_unlock_irq(&opp->lock);
>> +	raw_spin_unlock_irq(&opp->lock);
>>    	return ret;
>>    }
>>
>> @@ -1796,9 +1796,9 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
>>    	struct openpic *opp = kvm->arch.mpic;
>>    	unsigned long flags;
>>
>> -	spin_lock_irqsave(&opp->lock, flags);
>> +	raw_spin_lock_irqsave(&opp->lock, flags);
>>    	openpic_set_irq(opp, irq, level);
>> -	spin_unlock_irqrestore(&opp->lock, flags);
>> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>>
>>    	/* All code paths we care about don't check for the return value */
>>    	return 0;
>> @@ -1810,14 +1810,14 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
>>    	struct openpic *opp = kvm->arch.mpic;
>>    	unsigned long flags;
>>
>> -	spin_lock_irqsave(&opp->lock, flags);
>> +	raw_spin_lock_irqsave(&opp->lock, flags);
>>
>>    	/*
>>    	 * XXX We ignore the target address for now, as we only support
>>    	 *     a single MSI bank.
>>    	 */
>>    	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
>> -	spin_unlock_irqrestore(&opp->lock, flags);
>> +	raw_spin_unlock_irqrestore(&opp->lock, flags);
>>
>>    	/* All code paths we care about don't check for the return value */
>>    	return 0;
>>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-02-17 12:27   ` Purcareata Bogdan
@ 2015-02-17 17:53     ` Sebastian Andrzej Siewior
  2015-02-17 17:59       ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 15+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-02-17 17:53 UTC (permalink / raw)
  To: Purcareata Bogdan; +Cc: linux-rt-users, scottwood, Mihai Caraman, linux-kernel

* Purcareata Bogdan | 2015-02-17 14:27:44 [+0200]:

>Ping?
>
>On 02.02.2015 11:35, Purcareata Bogdan wrote:
>>Ping?

No body?


Sebatian

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-02-17 17:53     ` Sebastian Andrzej Siewior
@ 2015-02-17 17:59       ` Sebastian Andrzej Siewior
  2015-02-18  8:31         ` Purcareata Bogdan
  0 siblings, 1 reply; 15+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-02-17 17:59 UTC (permalink / raw)
  To: Purcareata Bogdan; +Cc: linux-rt-users, scottwood, Mihai Caraman, linux-kernel

* Sebastian Andrzej Siewior | 2015-02-17 18:53:17 [+0100]:

>* Purcareata Bogdan | 2015-02-17 14:27:44 [+0200]:
>
>>Ping?
>>
>>On 02.02.2015 11:35, Purcareata Bogdan wrote:
>>>Ping?
>
>No body?
bah! That mutt thing is too fast.

The raw conversation looks sane and could go upstream. This other chunk:

|+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_KVM_MPIC)
|+/* Limit the number of vcpus due to in-kernel mpic concurrency */
|+#define KVM_MAX_VCPUS          4
|+#define KVM_MAX_VCORES         4
|+#else
| #define KVM_MAX_VCPUS          NR_CPUS
| #define KVM_MAX_VCORES         NR_CPUS
|+#endif

should be a separate patch. Please repost including ppc ml.

This remains of my multiple-MSI patch which someone other posted a while
ago. What happend to it?

Sebatian

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-02-17 17:59       ` Sebastian Andrzej Siewior
@ 2015-02-18  8:31         ` Purcareata Bogdan
  2015-02-18  8:40           ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 15+ messages in thread
From: Purcareata Bogdan @ 2015-02-18  8:31 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: linux-rt-users, scottwood, Mihai Caraman, linux-kernel

On 17.02.2015 19:59, Sebastian Andrzej Siewior wrote:
> * Sebastian Andrzej Siewior | 2015-02-17 18:53:17 [+0100]:
>
>> * Purcareata Bogdan | 2015-02-17 14:27:44 [+0200]:
>>
>>> Ping?
>>>
>>> On 02.02.2015 11:35, Purcareata Bogdan wrote:
>>>> Ping?
>>
>> No body?
> bah! That mutt thing is too fast.
>
> The raw conversation looks sane and could go upstream. This other chunk:
>
> |+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_KVM_MPIC)
> |+/* Limit the number of vcpus due to in-kernel mpic concurrency */
> |+#define KVM_MAX_VCPUS          4
> |+#define KVM_MAX_VCORES         4
> |+#else
> | #define KVM_MAX_VCPUS          NR_CPUS
> | #define KVM_MAX_VCORES         NR_CPUS
> |+#endif
>
> should be a separate patch. Please repost including ppc ml.

Thanks! Will send a patchset separating these 2 functional changes - the 
openpic raw_spinlock for upstream ppc (since it doesn't bring any 
changes anyway), and the MAX_VCPUS limitation for the RT tree.

> This remains of my multiple-MSI patch which someone other posted a while
> ago. What happend to it?

I'm not aware of this patch, could you give more details, please?

Bogdan P.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2015-02-18  8:31         ` Purcareata Bogdan
@ 2015-02-18  8:40           ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 15+ messages in thread
From: Sebastian Andrzej Siewior @ 2015-02-18  8:40 UTC (permalink / raw)
  To: Purcareata Bogdan; +Cc: linux-rt-users, scottwood, Mihai Caraman, linux-kernel

On 02/18/2015 09:31 AM, Purcareata Bogdan wrote:
> Thanks! Will send a patchset separating these 2 functional changes - the
> openpic raw_spinlock for upstream ppc (since it doesn't bring any
> changes anyway), and the MAX_VCPUS limitation for the RT tree.

thanks. please cc me on both.

>> This remains of my multiple-MSI patch which someone other posted a while
>> ago. What happend to it?
> 
> I'm not aware of this patch, could you give more details, please?

I just grabed them from archive and it seems that there were change
requests from Scott which were not done. Forget it then, I had it
differently in memory.

> 
> Bogdan P.

Sebastian

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2014-09-12 14:12     ` bogdan.purcareata
@ 2014-09-12 17:50       ` Scott Wood
  -1 siblings, 0 replies; 15+ messages in thread
From: Scott Wood @ 2014-09-12 17:50 UTC (permalink / raw)
  To: Purcareata Bogdan-B43198
  Cc: kvm-ppc, kvm, Caraman Mihai Claudiu-B02008, Tudor Laurentiu-B10716

On Fri, 2014-09-12 at 09:12 -0500, Purcareata Bogdan-B43198 wrote:
> > -----Original Message-----
> > From: Wood Scott-B07421
> > Sent: Thursday, September 11, 2014 9:19 PM
> > To: Purcareata Bogdan-B43198
> > Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org
> > Subject: Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
> > 
> > On Thu, 2014-09-11 at 15:25 -0400, Bogdan Purcareata wrote:
> > > This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> > > deployed on a RT host. No change for !RT kernels.
> > >
> > > The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> > > guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> > > fails due to a BUG_ON(preemptible()) when running netperf.
> > >
> > > In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> > > context, convert the openpic lock to a raw_spinlock. A similar approach can
> > > be seen for x86 platforms in the following commit [1].
> > >
> > > Here are some comparative cyclitest measurements run inside a high priority
> > RT
> > > guest run on a RT host. The guest has 1 VCPU and the test has been run for
> > 15
> > > minutes. The guest runs ~750 hackbench processes as background stress.
> > 
> > Does hackbench involve triggering interrupts that would go through the
> > MPIC?  You may want to try an I/O-heavy benchmark to stress the MPIC
> > code (the more interrupt sources are active at once, the "better").
> 
> Before this patch, running netperf/iperf in the guest always resulted
> in hitting the afore-mentioned BUG_ON, when the host was RT. This is
> why I can't provide comparative cyclitest measurements before and after
> the patch, with heavy I/O stress. Since I had no problem running
> hackbench before, I'm assuming it doesn't involve interrupts passing
> through the MPIC. The measurements were posted just to show that the
> patch doesn't mess up anything somewhere else.

I know you can't provide before/after, but it would be nice to see what
the after numbers are with heavy MPIC activity.

> > Also try a guest with many vcpus.
> 
> AFAIK, without the MSI affinity patches [1], all vfio interrupts will
> go to core 0 in the guest. In this case, I guess there won't be
> contention induced latencies due to multiple VCPUs expecting to have
> their interrupts delivered. Am I getting it wrong?

It's not about contention, but about loops in the MPIC code that iterate
over the entire set of vcpus.

-Scott

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2014-09-12 17:50       ` Scott Wood
  0 siblings, 0 replies; 15+ messages in thread
From: Scott Wood @ 2014-09-12 17:50 UTC (permalink / raw)
  To: Purcareata Bogdan-B43198
  Cc: kvm-ppc, kvm, Caraman Mihai Claudiu-B02008, Tudor Laurentiu-B10716

On Fri, 2014-09-12 at 09:12 -0500, Purcareata Bogdan-B43198 wrote:
> > -----Original Message-----
> > From: Wood Scott-B07421
> > Sent: Thursday, September 11, 2014 9:19 PM
> > To: Purcareata Bogdan-B43198
> > Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org
> > Subject: Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
> > 
> > On Thu, 2014-09-11 at 15:25 -0400, Bogdan Purcareata wrote:
> > > This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> > > deployed on a RT host. No change for !RT kernels.
> > >
> > > The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> > > guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> > > fails due to a BUG_ON(preemptible()) when running netperf.
> > >
> > > In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> > > context, convert the openpic lock to a raw_spinlock. A similar approach can
> > > be seen for x86 platforms in the following commit [1].
> > >
> > > Here are some comparative cyclitest measurements run inside a high priority
> > RT
> > > guest run on a RT host. The guest has 1 VCPU and the test has been run for
> > 15
> > > minutes. The guest runs ~750 hackbench processes as background stress.
> > 
> > Does hackbench involve triggering interrupts that would go through the
> > MPIC?  You may want to try an I/O-heavy benchmark to stress the MPIC
> > code (the more interrupt sources are active at once, the "better").
> 
> Before this patch, running netperf/iperf in the guest always resulted
> in hitting the afore-mentioned BUG_ON, when the host was RT. This is
> why I can't provide comparative cyclitest measurements before and after
> the patch, with heavy I/O stress. Since I had no problem running
> hackbench before, I'm assuming it doesn't involve interrupts passing
> through the MPIC. The measurements were posted just to show that the
> patch doesn't mess up anything somewhere else.

I know you can't provide before/after, but it would be nice to see what
the after numbers are with heavy MPIC activity.

> > Also try a guest with many vcpus.
> 
> AFAIK, without the MSI affinity patches [1], all vfio interrupts will
> go to core 0 in the guest. In this case, I guess there won't be
> contention induced latencies due to multiple VCPUs expecting to have
> their interrupts delivered. Am I getting it wrong?

It's not about contention, but about loops in the MPIC code that iterate
over the entire set of vcpus.

-Scott


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2014-09-11 18:19   ` Scott Wood
@ 2014-09-12 14:12     ` bogdan.purcareata
  -1 siblings, 0 replies; 15+ messages in thread
From: bogdan.purcareata @ 2014-09-12 14:12 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm-ppc, kvm, mihai.caraman, Laurentiu.Tudor

> -----Original Message-----
> From: Wood Scott-B07421
> Sent: Thursday, September 11, 2014 9:19 PM
> To: Purcareata Bogdan-B43198
> Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org
> Subject: Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
> 
> On Thu, 2014-09-11 at 15:25 -0400, Bogdan Purcareata wrote:
> > This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> > deployed on a RT host. No change for !RT kernels.
> >
> > The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> > guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> > fails due to a BUG_ON(preemptible()) when running netperf.
> >
> > In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> > context, convert the openpic lock to a raw_spinlock. A similar approach can
> > be seen for x86 platforms in the following commit [1].
> >
> > Here are some comparative cyclitest measurements run inside a high priority
> RT
> > guest run on a RT host. The guest has 1 VCPU and the test has been run for
> 15
> > minutes. The guest runs ~750 hackbench processes as background stress.
> 
> Does hackbench involve triggering interrupts that would go through the
> MPIC?  You may want to try an I/O-heavy benchmark to stress the MPIC
> code (the more interrupt sources are active at once, the "better").

Before this patch, running netperf/iperf in the guest always resulted in hitting the afore-mentioned BUG_ON, when the host was RT. This is why I can't provide comparative cyclitest measurements before and after the patch, with heavy I/O stress. Since I had no problem running hackbench before, I'm assuming it doesn't involve interrupts passing through the MPIC. The measurements were posted just to show that the patch doesn't mess up anything somewhere else.

> Also try a guest with many vcpus.

AFAIK, without the MSI affinity patches [1], all vfio interrupts will go to core 0 in the guest. In this case, I guess there won't be contention induced latencies due to multiple VCPUs expecting to have their interrupts delivered. Am I getting it wrong?

[1] https://lists.ozlabs.org/pipermail/linuxppc-dev/2014-August/120247.html

Thanks,
Bogdan P.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2014-09-12 14:12     ` bogdan.purcareata
  0 siblings, 0 replies; 15+ messages in thread
From: bogdan.purcareata @ 2014-09-12 14:12 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm-ppc, kvm, mihai.caraman, Laurentiu.Tudor

PiAtLS0tLU9yaWdpbmFsIE1lc3NhZ2UtLS0tLQ0KPiBGcm9tOiBXb29kIFNjb3R0LUIwNzQyMQ0K
PiBTZW50OiBUaHVyc2RheSwgU2VwdGVtYmVyIDExLCAyMDE0IDk6MTkgUE0NCj4gVG86IFB1cmNh
cmVhdGEgQm9nZGFuLUI0MzE5OA0KPiBDYzoga3ZtLXBwY0B2Z2VyLmtlcm5lbC5vcmc7IGt2bUB2
Z2VyLmtlcm5lbC5vcmcNCj4gU3ViamVjdDogUmU6IFtQQVRDSF0gS1ZNOiBQUEM6IENvbnZlcnQg
b3BlbnBpYyBsb2NrIHRvIHJhd19zcGlubG9jaw0KPiANCj4gT24gVGh1LCAyMDE0LTA5LTExIGF0
IDE1OjI1IC0wNDAwLCBCb2dkYW4gUHVyY2FyZWF0YSB3cm90ZToNCj4gPiBUaGlzIHBhdGNoIGVu
YWJsZXMgcnVubmluZyBpbnRlbnNpdmUgSS9PIHdvcmtsb2FkcywgZS5nLiBuZXRwZXJmLCBpbiBh
IGd1ZXN0DQo+ID4gZGVwbG95ZWQgb24gYSBSVCBob3N0LiBObyBjaGFuZ2UgZm9yICFSVCBrZXJu
ZWxzLg0KPiA+DQo+ID4gVGhlIG9wZW5waWMgc3BpbmxvY2sgYmVjb21lcyBhIHNsZWVwaW5nIG11
dGV4IG9uIGEgUlQgc3lzdGVtLiBUaGlzIG5vIGxvbmdlcg0KPiA+IGd1YXJhbnRlZXMgdGhhdCBF
UFIgaXMgYXRvbWljIHdpdGggZXhjZXB0aW9uIGRlbGl2ZXJ5LiBUaGUgZ3Vlc3QgVkNQVSB0aHJl
YWQNCj4gPiBmYWlscyBkdWUgdG8gYSBCVUdfT04ocHJlZW1wdGlibGUoKSkgd2hlbiBydW5uaW5n
IG5ldHBlcmYuDQo+ID4NCj4gPiBJbiBvcmRlciB0byBtYWtlIHRoZSBrdm1wcGNfbXBpY19zZXRf
ZXByKCkgY2FsbCBzYWZlIG9uIFJUIGZyb20gbm9uLWF0b21pYw0KPiA+IGNvbnRleHQsIGNvbnZl
cnQgdGhlIG9wZW5waWMgbG9jayB0byBhIHJhd19zcGlubG9jay4gQSBzaW1pbGFyIGFwcHJvYWNo
IGNhbg0KPiA+IGJlIHNlZW4gZm9yIHg4NiBwbGF0Zm9ybXMgaW4gdGhlIGZvbGxvd2luZyBjb21t
aXQgWzFdLg0KPiA+DQo+ID4gSGVyZSBhcmUgc29tZSBjb21wYXJhdGl2ZSBjeWNsaXRlc3QgbWVh
c3VyZW1lbnRzIHJ1biBpbnNpZGUgYSBoaWdoIHByaW9yaXR5DQo+IFJUDQo+ID4gZ3Vlc3QgcnVu
IG9uIGEgUlQgaG9zdC4gVGhlIGd1ZXN0IGhhcyAxIFZDUFUgYW5kIHRoZSB0ZXN0IGhhcyBiZWVu
IHJ1biBmb3INCj4gMTUNCj4gPiBtaW51dGVzLiBUaGUgZ3Vlc3QgcnVucyB+NzUwIGhhY2tiZW5j
aCBwcm9jZXNzZXMgYXMgYmFja2dyb3VuZCBzdHJlc3MuDQo+IA0KPiBEb2VzIGhhY2tiZW5jaCBp
bnZvbHZlIHRyaWdnZXJpbmcgaW50ZXJydXB0cyB0aGF0IHdvdWxkIGdvIHRocm91Z2ggdGhlDQo+
IE1QSUM/ICBZb3UgbWF5IHdhbnQgdG8gdHJ5IGFuIEkvTy1oZWF2eSBiZW5jaG1hcmsgdG8gc3Ry
ZXNzIHRoZSBNUElDDQo+IGNvZGUgKHRoZSBtb3JlIGludGVycnVwdCBzb3VyY2VzIGFyZSBhY3Rp
dmUgYXQgb25jZSwgdGhlICJiZXR0ZXIiKS4NCg0KQmVmb3JlIHRoaXMgcGF0Y2gsIHJ1bm5pbmcg
bmV0cGVyZi9pcGVyZiBpbiB0aGUgZ3Vlc3QgYWx3YXlzIHJlc3VsdGVkIGluIGhpdHRpbmcgdGhl
IGFmb3JlLW1lbnRpb25lZCBCVUdfT04sIHdoZW4gdGhlIGhvc3Qgd2FzIFJULiBUaGlzIGlzIHdo
eSBJIGNhbid0IHByb3ZpZGUgY29tcGFyYXRpdmUgY3ljbGl0ZXN0IG1lYXN1cmVtZW50cyBiZWZv
cmUgYW5kIGFmdGVyIHRoZSBwYXRjaCwgd2l0aCBoZWF2eSBJL08gc3RyZXNzLiBTaW5jZSBJIGhh
ZCBubyBwcm9ibGVtIHJ1bm5pbmcgaGFja2JlbmNoIGJlZm9yZSwgSSdtIGFzc3VtaW5nIGl0IGRv
ZXNuJ3QgaW52b2x2ZSBpbnRlcnJ1cHRzIHBhc3NpbmcgdGhyb3VnaCB0aGUgTVBJQy4gVGhlIG1l
YXN1cmVtZW50cyB3ZXJlIHBvc3RlZCBqdXN0IHRvIHNob3cgdGhhdCB0aGUgcGF0Y2ggZG9lc24n
dCBtZXNzIHVwIGFueXRoaW5nIHNvbWV3aGVyZSBlbHNlLg0KDQo+IEFsc28gdHJ5IGEgZ3Vlc3Qg
d2l0aCBtYW55IHZjcHVzLg0KDQpBRkFJSywgd2l0aG91dCB0aGUgTVNJIGFmZmluaXR5IHBhdGNo
ZXMgWzFdLCBhbGwgdmZpbyBpbnRlcnJ1cHRzIHdpbGwgZ28gdG8gY29yZSAwIGluIHRoZSBndWVz
dC4gSW4gdGhpcyBjYXNlLCBJIGd1ZXNzIHRoZXJlIHdvbid0IGJlIGNvbnRlbnRpb24gaW5kdWNl
ZCBsYXRlbmNpZXMgZHVlIHRvIG11bHRpcGxlIFZDUFVzIGV4cGVjdGluZyB0byBoYXZlIHRoZWly
IGludGVycnVwdHMgZGVsaXZlcmVkLiBBbSBJIGdldHRpbmcgaXQgd3Jvbmc/DQoNClsxXSBodHRw
czovL2xpc3RzLm96bGFicy5vcmcvcGlwZXJtYWlsL2xpbnV4cHBjLWRldi8yMDE0LUF1Z3VzdC8x
MjAyNDcuaHRtbA0KDQpUaGFua3MsDQpCb2dkYW4gUC4NCg=

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2014-09-11 13:06 ` Bogdan Purcareata
  0 siblings, 0 replies; 15+ messages in thread
From: Bogdan Purcareata @ 2014-09-11 19:25 UTC (permalink / raw)
  To: kvm-ppc; +Cc: kvm, Bogdan Purcareata

This patch enables running intensive I/O workloads, e.g. netperf, in a guest
deployed on a RT host. No change for !RT kernels.

The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
guarantees that EPR is atomic with exception delivery. The guest VCPU thread
fails due to a BUG_ON(preemptible()) when running netperf.

In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
context, convert the openpic lock to a raw_spinlock. A similar approach can
be seen for x86 platforms in the following commit [1].

Here are some comparative cyclitest measurements run inside a high priority RT
guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
minutes. The guest runs ~750 hackbench processes as background stress.

                  spinlock  raw_spinlock
Min latency (us)  4         4
Avg latency (us)  15        19
Max latency (us)  70        62

[1] https://lkml.org/lkml/2010/1/11/289

Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
---
 arch/powerpc/kvm/mpic.c |   44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 2861ae9..309036c 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -194,7 +194,7 @@ struct openpic {
 	int num_mmio_regions;
 
 	gpa_t reg_base;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/* Behavior control */
 	struct fsl_mpic_info *fsl;
@@ -1105,9 +1105,9 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
 		}
 
-		spin_unlock(&opp->lock);
+		raw_spin_unlock(&opp->lock);
 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
-		spin_lock(&opp->lock);
+		raw_spin_lock(&opp->lock);
 
 		break;
 	}
@@ -1182,12 +1182,12 @@ void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
 	int cpu = vcpu->arch.irq_cpu_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
 
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 }
 
 static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
@@ -1387,9 +1387,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
 		return -EINVAL;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	/*
 	 * Technically only 32-bit accesses are allowed, but be nice to
@@ -1427,10 +1427,10 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
 		return -EOPNOTSUPP;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
 				      *(const u32 *)ptr);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: addr %llx ret %d val %x\n",
 		 __func__, addr, ret, *(const u32 *)ptr);
@@ -1501,14 +1501,14 @@ static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
 	if (addr & 3)
 		return -ENXIO;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (type == ATTR_SET)
 		ret = kvm_mpic_write_internal(opp, addr, *val);
 	else
 		ret = kvm_mpic_read_internal(opp, addr, val);
 
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
 
@@ -1545,9 +1545,9 @@ static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr32 != 0 && attr32 != 1)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		openpic_set_irq(opp, attr->attr, attr32);
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 		return 0;
 	}
 
@@ -1592,9 +1592,9 @@ static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr->attr > MAX_SRC)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		attr32 = opp->src[attr->attr].pending;
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 
 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
 			return -EFAULT;
@@ -1669,7 +1669,7 @@ static int mpic_create(struct kvm_device *dev, u32 type)
 	opp->kvm = dev->kvm;
 	opp->dev = dev;
 	opp->model = type;
-	spin_lock_init(&opp->lock);
+	raw_spin_lock_init(&opp->lock);
 
 	add_mmio_region(opp, &openpic_gbl_mmio);
 	add_mmio_region(opp, &openpic_tmr_mmio);
@@ -1742,7 +1742,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 	if (cpu < 0 || cpu >= MAX_CPU)
 		return -EPERM;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (opp->dst[cpu].vcpu) {
 		ret = -EEXIST;
@@ -1765,7 +1765,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
 
 out:
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 	return ret;
 }
 
@@ -1795,9 +1795,9 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 	openpic_set_irq(opp, irq, level);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
@@ -1809,14 +1809,14 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	/*
 	 * XXX We ignore the target address for now, as we only support
 	 *     a single MSI bank.
 	 */
 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
  2014-09-11 13:06 ` Bogdan Purcareata
@ 2014-09-11 18:19   ` Scott Wood
  -1 siblings, 0 replies; 15+ messages in thread
From: Scott Wood @ 2014-09-11 18:19 UTC (permalink / raw)
  To: Bogdan Purcareata; +Cc: kvm-ppc, kvm

On Thu, 2014-09-11 at 15:25 -0400, Bogdan Purcareata wrote:
> This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> deployed on a RT host. No change for !RT kernels.
> 
> The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> fails due to a BUG_ON(preemptible()) when running netperf.
> 
> In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> context, convert the openpic lock to a raw_spinlock. A similar approach can
> be seen for x86 platforms in the following commit [1].
> 
> Here are some comparative cyclitest measurements run inside a high priority RT
> guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
> minutes. The guest runs ~750 hackbench processes as background stress.

Does hackbench involve triggering interrupts that would go through the
MPIC?  You may want to try an I/O-heavy benchmark to stress the MPIC
code (the more interrupt sources are active at once, the "better").
Also try a guest with many vcpus.

-Scott

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2014-09-11 18:19   ` Scott Wood
  0 siblings, 0 replies; 15+ messages in thread
From: Scott Wood @ 2014-09-11 18:19 UTC (permalink / raw)
  To: Bogdan Purcareata; +Cc: kvm-ppc, kvm

On Thu, 2014-09-11 at 15:25 -0400, Bogdan Purcareata wrote:
> This patch enables running intensive I/O workloads, e.g. netperf, in a guest
> deployed on a RT host. No change for !RT kernels.
> 
> The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
> guarantees that EPR is atomic with exception delivery. The guest VCPU thread
> fails due to a BUG_ON(preemptible()) when running netperf.
> 
> In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
> context, convert the openpic lock to a raw_spinlock. A similar approach can
> be seen for x86 platforms in the following commit [1].
> 
> Here are some comparative cyclitest measurements run inside a high priority RT
> guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
> minutes. The guest runs ~750 hackbench processes as background stress.

Does hackbench involve triggering interrupts that would go through the
MPIC?  You may want to try an I/O-heavy benchmark to stress the MPIC
code (the more interrupt sources are active at once, the "better").
Also try a guest with many vcpus.

-Scott



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock
@ 2014-09-11 13:06 ` Bogdan Purcareata
  0 siblings, 0 replies; 15+ messages in thread
From: Bogdan Purcareata @ 2014-09-11 13:06 UTC (permalink / raw)
  To: kvm-ppc; +Cc: kvm, Bogdan Purcareata

This patch enables running intensive I/O workloads, e.g. netperf, in a guest
deployed on a RT host. No change for !RT kernels.

The openpic spinlock becomes a sleeping mutex on a RT system. This no longer
guarantees that EPR is atomic with exception delivery. The guest VCPU thread
fails due to a BUG_ON(preemptible()) when running netperf.

In order to make the kvmppc_mpic_set_epr() call safe on RT from non-atomic
context, convert the openpic lock to a raw_spinlock. A similar approach can
be seen for x86 platforms in the following commit [1].

Here are some comparative cyclitest measurements run inside a high priority RT
guest run on a RT host. The guest has 1 VCPU and the test has been run for 15
minutes. The guest runs ~750 hackbench processes as background stress.

                  spinlock  raw_spinlock
Min latency (us)  4         4
Avg latency (us)  15        19
Max latency (us)  70        62

[1] https://lkml.org/lkml/2010/1/11/289

Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
---
 arch/powerpc/kvm/mpic.c |   44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 2861ae9..309036c 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -194,7 +194,7 @@ struct openpic {
 	int num_mmio_regions;
 
 	gpa_t reg_base;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/* Behavior control */
 	struct fsl_mpic_info *fsl;
@@ -1105,9 +1105,9 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
 			mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
 		}
 
-		spin_unlock(&opp->lock);
+		raw_spin_unlock(&opp->lock);
 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
-		spin_lock(&opp->lock);
+		raw_spin_lock(&opp->lock);
 
 		break;
 	}
@@ -1182,12 +1182,12 @@ void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
 	int cpu = vcpu->arch.irq_cpu_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	if ((opp->gcr & opp->mpic_mode_mask) = GCR_MODE_PROXY)
 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
 
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 }
 
 static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
@@ -1387,9 +1387,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
 		return -EINVAL;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	/*
 	 * Technically only 32-bit accesses are allowed, but be nice to
@@ -1427,10 +1427,10 @@ static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
 		return -EOPNOTSUPP;
 	}
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 	ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
 				      *(const u32 *)ptr);
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: addr %llx ret %d val %x\n",
 		 __func__, addr, ret, *(const u32 *)ptr);
@@ -1501,14 +1501,14 @@ static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
 	if (addr & 3)
 		return -ENXIO;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (type = ATTR_SET)
 		ret = kvm_mpic_write_internal(opp, addr, *val);
 	else
 		ret = kvm_mpic_read_internal(opp, addr, val);
 
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 
 	pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
 
@@ -1545,9 +1545,9 @@ static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr32 != 0 && attr32 != 1)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		openpic_set_irq(opp, attr->attr, attr32);
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 		return 0;
 	}
 
@@ -1592,9 +1592,9 @@ static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 		if (attr->attr > MAX_SRC)
 			return -EINVAL;
 
-		spin_lock_irq(&opp->lock);
+		raw_spin_lock_irq(&opp->lock);
 		attr32 = opp->src[attr->attr].pending;
-		spin_unlock_irq(&opp->lock);
+		raw_spin_unlock_irq(&opp->lock);
 
 		if (put_user(attr32, (u32 __user *)(long)attr->addr))
 			return -EFAULT;
@@ -1669,7 +1669,7 @@ static int mpic_create(struct kvm_device *dev, u32 type)
 	opp->kvm = dev->kvm;
 	opp->dev = dev;
 	opp->model = type;
-	spin_lock_init(&opp->lock);
+	raw_spin_lock_init(&opp->lock);
 
 	add_mmio_region(opp, &openpic_gbl_mmio);
 	add_mmio_region(opp, &openpic_tmr_mmio);
@@ -1742,7 +1742,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 	if (cpu < 0 || cpu >= MAX_CPU)
 		return -EPERM;
 
-	spin_lock_irq(&opp->lock);
+	raw_spin_lock_irq(&opp->lock);
 
 	if (opp->dst[cpu].vcpu) {
 		ret = -EEXIST;
@@ -1765,7 +1765,7 @@ int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
 
 out:
-	spin_unlock_irq(&opp->lock);
+	raw_spin_unlock_irq(&opp->lock);
 	return ret;
 }
 
@@ -1795,9 +1795,9 @@ static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 	openpic_set_irq(opp, irq, level);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
@@ -1809,14 +1809,14 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
 	struct openpic *opp = kvm->arch.mpic;
 	unsigned long flags;
 
-	spin_lock_irqsave(&opp->lock, flags);
+	raw_spin_lock_irqsave(&opp->lock, flags);
 
 	/*
 	 * XXX We ignore the target address for now, as we only support
 	 *     a single MSI bank.
 	 */
 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
-	spin_unlock_irqrestore(&opp->lock, flags);
+	raw_spin_unlock_irqrestore(&opp->lock, flags);
 
 	/* All code paths we care about don't check for the return value */
 	return 0;
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2015-02-18  8:47 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-22  9:39 [PATCH] KVM: PPC: Convert openpic lock to raw_spinlock Bogdan Purcareata
2015-02-02  9:35 ` Purcareata Bogdan
2015-02-17 12:27   ` Purcareata Bogdan
2015-02-17 17:53     ` Sebastian Andrzej Siewior
2015-02-17 17:59       ` Sebastian Andrzej Siewior
2015-02-18  8:31         ` Purcareata Bogdan
2015-02-18  8:40           ` Sebastian Andrzej Siewior
  -- strict thread matches above, loose matches on Subject: below --
2014-09-11 19:25 Bogdan Purcareata
2014-09-11 13:06 ` Bogdan Purcareata
2014-09-11 18:19 ` Scott Wood
2014-09-11 18:19   ` Scott Wood
2014-09-12 14:12   ` bogdan.purcareata
2014-09-12 14:12     ` bogdan.purcareata
2014-09-12 17:50     ` Scott Wood
2014-09-12 17:50       ` Scott Wood

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.