* Potential deadlock in vgic
@ 2018-05-04 15:17 ` Andre Przywara
0 siblings, 0 replies; 22+ messages in thread
From: Andre Przywara @ 2018-05-04 15:17 UTC (permalink / raw)
To: linux-arm-kernel
Hi Jan,
can you please test this patch with your setup, to see if it still
screams? That converts two forgotten irq_lock's over to be irqsafe,
plus lets lpi_list_lock join them (which you already did, IIUC).
That should appease lockdep, hopefully.
Cheers,
Andre.
---
virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
virt/kvm/arm/vgic/vgic-its.c | 15 +++++++++------
virt/kvm/arm/vgic/vgic.c | 12 +++++++-----
3 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 10b38178cff2..4ffc0b5e6105 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL;
+ unsigned long flags;
if (iter->dist_id == 0) {
print_dist_state(s, &kvm->arch.vgic);
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
}
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
return 0;
}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index a8f07243aa9f..51a80b600632 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
+ unsigned long flags;
int ret;
/* In this case there is no put, since we keep the reference. */
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->intid = intid;
irq->target_vcpu = vcpu;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
/*
* There could be a race with another vgic_add_lpi(), so we need to
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++;
out_unlock:
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/*
* We "cache" the configuration table entries in our struct vgic_irq's.
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_irq *irq;
+ unsigned long flags;
u32 *intids;
int irq_count, i = 0;
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids)
return -ENOMEM;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count)
break;
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue;
intids[i++] = irq->intid;
}
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
int ret = 0;
+ unsigned long flags;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->target_vcpu = vcpu;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) {
struct its_vlpi_map map;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 5f52a2bca36f..6efcddfb5167 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -75,8 +75,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
+ unsigned long flags;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid)
@@ -92,7 +93,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL;
out_unlock:
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
}
@@ -137,19 +138,20 @@ static void vgic_irq_release(struct kref *ref)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
+ unsigned long flags;
if (irq->intid < VGIC_MIN_LPI)
return;
- spin_lock(&dist->lpi_list_lock);
+ spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) {
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return;
};
list_del(&irq->lpi_list);
dist->lpi_list_count--;
- spin_unlock(&dist->lpi_list_lock);
+ spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq);
}
--
2.14.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: Potential deadlock in vgic
2018-05-04 15:17 ` Andre Przywara
@ 2018-05-04 16:26 ` Jan Glauber
-1 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-04 16:26 UTC (permalink / raw)
To: Andre Przywara; +Cc: Marc Zyngier, linux-arm-kernel, kvmarm
On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> can you please test this patch with your setup, to see if it still
> screams? That converts two forgotten irq_lock's over to be irqsafe,
> plus lets lpi_list_lock join them (which you already did, IIUC).
> That should appease lockdep, hopefully.
Hi Andre,
that solves the issue for me, no more lockdep complains.
thanks!
Jan
> Cheers,
> Andre.
> ---
> virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
> virt/kvm/arm/vgic/vgic-its.c | 15 +++++++++------
> virt/kvm/arm/vgic/vgic.c | 12 +++++++-----
> 3 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
> index 10b38178cff2..4ffc0b5e6105 100644
> --- a/virt/kvm/arm/vgic/vgic-debug.c
> +++ b/virt/kvm/arm/vgic/vgic-debug.c
> @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
> struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
> struct vgic_irq *irq;
> struct kvm_vcpu *vcpu = NULL;
> + unsigned long flags;
>
> if (iter->dist_id == 0) {
> print_dist_state(s, &kvm->arch.vgic);
> @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
> irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
> }
>
> - spin_lock(&irq->irq_lock);
> + spin_lock_irqsave(&irq->irq_lock, flags);
> print_irq_state(s, irq, vcpu);
> - spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>
> return 0;
> }
> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
> index a8f07243aa9f..51a80b600632 100644
> --- a/virt/kvm/arm/vgic/vgic-its.c
> +++ b/virt/kvm/arm/vgic/vgic-its.c
> @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
> + unsigned long flags;
> int ret;
>
> /* In this case there is no put, since we keep the reference. */
> @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> irq->intid = intid;
> irq->target_vcpu = vcpu;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>
> /*
> * There could be a race with another vgic_add_lpi(), so we need to
> @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> dist->lpi_list_count++;
>
> out_unlock:
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> /*
> * We "cache" the configuration table entries in our struct vgic_irq's.
> @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> {
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> struct vgic_irq *irq;
> + unsigned long flags;
> u32 *intids;
> int irq_count, i = 0;
>
> @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> if (!intids)
> return -ENOMEM;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
> if (i == irq_count)
> break;
> @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> continue;
> intids[i++] = irq->intid;
> }
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> *intid_ptr = intids;
> return i;
> @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
> {
> int ret = 0;
> + unsigned long flags;
>
> - spin_lock(&irq->irq_lock);
> + spin_lock_irqsave(&irq->irq_lock, flags);
> irq->target_vcpu = vcpu;
> - spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>
> if (irq->hw) {
> struct its_vlpi_map map;
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index 5f52a2bca36f..6efcddfb5167 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -75,8 +75,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> struct vgic_irq *irq = NULL;
> + unsigned long flags;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>
> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
> if (irq->intid != intid)
> @@ -92,7 +93,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
> irq = NULL;
>
> out_unlock:
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> return irq;
> }
> @@ -137,19 +138,20 @@ static void vgic_irq_release(struct kref *ref)
> void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> + unsigned long flags;
>
> if (irq->intid < VGIC_MIN_LPI)
> return;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
> if (!kref_put(&irq->refcount, vgic_irq_release)) {
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
> return;
> };
>
> list_del(&irq->lpi_list);
> dist->lpi_list_count--;
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> kfree(irq);
> }
> --
> 2.14.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* Potential deadlock in vgic
@ 2018-05-04 16:26 ` Jan Glauber
0 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-04 16:26 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> can you please test this patch with your setup, to see if it still
> screams? That converts two forgotten irq_lock's over to be irqsafe,
> plus lets lpi_list_lock join them (which you already did, IIUC).
> That should appease lockdep, hopefully.
Hi Andre,
that solves the issue for me, no more lockdep complains.
thanks!
Jan
> Cheers,
> Andre.
> ---
> virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
> virt/kvm/arm/vgic/vgic-its.c | 15 +++++++++------
> virt/kvm/arm/vgic/vgic.c | 12 +++++++-----
> 3 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
> index 10b38178cff2..4ffc0b5e6105 100644
> --- a/virt/kvm/arm/vgic/vgic-debug.c
> +++ b/virt/kvm/arm/vgic/vgic-debug.c
> @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
> struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
> struct vgic_irq *irq;
> struct kvm_vcpu *vcpu = NULL;
> + unsigned long flags;
>
> if (iter->dist_id == 0) {
> print_dist_state(s, &kvm->arch.vgic);
> @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
> irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
> }
>
> - spin_lock(&irq->irq_lock);
> + spin_lock_irqsave(&irq->irq_lock, flags);
> print_irq_state(s, irq, vcpu);
> - spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>
> return 0;
> }
> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
> index a8f07243aa9f..51a80b600632 100644
> --- a/virt/kvm/arm/vgic/vgic-its.c
> +++ b/virt/kvm/arm/vgic/vgic-its.c
> @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
> + unsigned long flags;
> int ret;
>
> /* In this case there is no put, since we keep the reference. */
> @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> irq->intid = intid;
> irq->target_vcpu = vcpu;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>
> /*
> * There could be a race with another vgic_add_lpi(), so we need to
> @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
> dist->lpi_list_count++;
>
> out_unlock:
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> /*
> * We "cache" the configuration table entries in our struct vgic_irq's.
> @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> {
> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
> struct vgic_irq *irq;
> + unsigned long flags;
> u32 *intids;
> int irq_count, i = 0;
>
> @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> if (!intids)
> return -ENOMEM;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
> if (i == irq_count)
> break;
> @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> continue;
> intids[i++] = irq->intid;
> }
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> *intid_ptr = intids;
> return i;
> @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
> static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
> {
> int ret = 0;
> + unsigned long flags;
>
> - spin_lock(&irq->irq_lock);
> + spin_lock_irqsave(&irq->irq_lock, flags);
> irq->target_vcpu = vcpu;
> - spin_unlock(&irq->irq_lock);
> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>
> if (irq->hw) {
> struct its_vlpi_map map;
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index 5f52a2bca36f..6efcddfb5167 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -75,8 +75,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> struct vgic_irq *irq = NULL;
> + unsigned long flags;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>
> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
> if (irq->intid != intid)
> @@ -92,7 +93,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
> irq = NULL;
>
> out_unlock:
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> return irq;
> }
> @@ -137,19 +138,20 @@ static void vgic_irq_release(struct kref *ref)
> void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
> {
> struct vgic_dist *dist = &kvm->arch.vgic;
> + unsigned long flags;
>
> if (irq->intid < VGIC_MIN_LPI)
> return;
>
> - spin_lock(&dist->lpi_list_lock);
> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
> if (!kref_put(&irq->refcount, vgic_irq_release)) {
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
> return;
> };
>
> list_del(&irq->lpi_list);
> dist->lpi_list_count--;
> - spin_unlock(&dist->lpi_list_lock);
> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>
> kfree(irq);
> }
> --
> 2.14.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: Potential deadlock in vgic
2018-05-04 16:26 ` Jan Glauber
@ 2018-05-04 16:29 ` Andre Przywara
-1 siblings, 0 replies; 22+ messages in thread
From: Andre Przywara @ 2018-05-04 16:29 UTC (permalink / raw)
To: Jan Glauber; +Cc: Marc Zyngier, linux-arm-kernel, kvmarm
Hi Jan,
On 04/05/18 17:26, Jan Glauber wrote:
> On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
>> Hi Jan,
>>
>> can you please test this patch with your setup, to see if it still
>> screams? That converts two forgotten irq_lock's over to be irqsafe,
>> plus lets lpi_list_lock join them (which you already did, IIUC).
>> That should appease lockdep, hopefully.
>
> Hi Andre,
>
> that solves the issue for me, no more lockdep complains.
Thanks for the confirmation, will send out a proper patch shortly.
Schönes Wochenende!
Andre.
>> ---
>> virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
>> virt/kvm/arm/vgic/vgic-its.c | 15 +++++++++------
>> virt/kvm/arm/vgic/vgic.c | 12 +++++++-----
>> 3 files changed, 19 insertions(+), 13 deletions(-)
>>
>> diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
>> index 10b38178cff2..4ffc0b5e6105 100644
>> --- a/virt/kvm/arm/vgic/vgic-debug.c
>> +++ b/virt/kvm/arm/vgic/vgic-debug.c
>> @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
>> struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
>> struct vgic_irq *irq;
>> struct kvm_vcpu *vcpu = NULL;
>> + unsigned long flags;
>>
>> if (iter->dist_id == 0) {
>> print_dist_state(s, &kvm->arch.vgic);
>> @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
>> irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
>> }
>>
>> - spin_lock(&irq->irq_lock);
>> + spin_lock_irqsave(&irq->irq_lock, flags);
>> print_irq_state(s, irq, vcpu);
>> - spin_unlock(&irq->irq_lock);
>> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>>
>> return 0;
>> }
>> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
>> index a8f07243aa9f..51a80b600632 100644
>> --- a/virt/kvm/arm/vgic/vgic-its.c
>> +++ b/virt/kvm/arm/vgic/vgic-its.c
>> @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
>> + unsigned long flags;
>> int ret;
>>
>> /* In this case there is no put, since we keep the reference. */
>> @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> irq->intid = intid;
>> irq->target_vcpu = vcpu;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>>
>> /*
>> * There could be a race with another vgic_add_lpi(), so we need to
>> @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> dist->lpi_list_count++;
>>
>> out_unlock:
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> /*
>> * We "cache" the configuration table entries in our struct vgic_irq's.
>> @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> {
>> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> struct vgic_irq *irq;
>> + unsigned long flags;
>> u32 *intids;
>> int irq_count, i = 0;
>>
>> @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> if (!intids)
>> return -ENOMEM;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
>> if (i == irq_count)
>> break;
>> @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> continue;
>> intids[i++] = irq->intid;
>> }
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> *intid_ptr = intids;
>> return i;
>> @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
>> {
>> int ret = 0;
>> + unsigned long flags;
>>
>> - spin_lock(&irq->irq_lock);
>> + spin_lock_irqsave(&irq->irq_lock, flags);
>> irq->target_vcpu = vcpu;
>> - spin_unlock(&irq->irq_lock);
>> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>>
>> if (irq->hw) {
>> struct its_vlpi_map map;
>> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
>> index 5f52a2bca36f..6efcddfb5167 100644
>> --- a/virt/kvm/arm/vgic/vgic.c
>> +++ b/virt/kvm/arm/vgic/vgic.c
>> @@ -75,8 +75,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> struct vgic_irq *irq = NULL;
>> + unsigned long flags;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>>
>> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
>> if (irq->intid != intid)
>> @@ -92,7 +93,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
>> irq = NULL;
>>
>> out_unlock:
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> return irq;
>> }
>> @@ -137,19 +138,20 @@ static void vgic_irq_release(struct kref *ref)
>> void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> + unsigned long flags;
>>
>> if (irq->intid < VGIC_MIN_LPI)
>> return;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>> if (!kref_put(&irq->refcount, vgic_irq_release)) {
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>> return;
>> };
>>
>> list_del(&irq->lpi_list);
>> dist->lpi_list_count--;
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> kfree(irq);
>> }
>> --
>> 2.14.1
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 22+ messages in thread
* Potential deadlock in vgic
@ 2018-05-04 16:29 ` Andre Przywara
0 siblings, 0 replies; 22+ messages in thread
From: Andre Przywara @ 2018-05-04 16:29 UTC (permalink / raw)
To: linux-arm-kernel
Hi Jan,
On 04/05/18 17:26, Jan Glauber wrote:
> On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
>> Hi Jan,
>>
>> can you please test this patch with your setup, to see if it still
>> screams? That converts two forgotten irq_lock's over to be irqsafe,
>> plus lets lpi_list_lock join them (which you already did, IIUC).
>> That should appease lockdep, hopefully.
>
> Hi Andre,
>
> that solves the issue for me, no more lockdep complains.
Thanks for the confirmation, will send out a proper patch shortly.
Sch?nes Wochenende!
Andre.
>> ---
>> virt/kvm/arm/vgic/vgic-debug.c | 5 +++--
>> virt/kvm/arm/vgic/vgic-its.c | 15 +++++++++------
>> virt/kvm/arm/vgic/vgic.c | 12 +++++++-----
>> 3 files changed, 19 insertions(+), 13 deletions(-)
>>
>> diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
>> index 10b38178cff2..4ffc0b5e6105 100644
>> --- a/virt/kvm/arm/vgic/vgic-debug.c
>> +++ b/virt/kvm/arm/vgic/vgic-debug.c
>> @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
>> struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
>> struct vgic_irq *irq;
>> struct kvm_vcpu *vcpu = NULL;
>> + unsigned long flags;
>>
>> if (iter->dist_id == 0) {
>> print_dist_state(s, &kvm->arch.vgic);
>> @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
>> irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
>> }
>>
>> - spin_lock(&irq->irq_lock);
>> + spin_lock_irqsave(&irq->irq_lock, flags);
>> print_irq_state(s, irq, vcpu);
>> - spin_unlock(&irq->irq_lock);
>> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>>
>> return 0;
>> }
>> diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
>> index a8f07243aa9f..51a80b600632 100644
>> --- a/virt/kvm/arm/vgic/vgic-its.c
>> +++ b/virt/kvm/arm/vgic/vgic-its.c
>> @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
>> + unsigned long flags;
>> int ret;
>>
>> /* In this case there is no put, since we keep the reference. */
>> @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> irq->intid = intid;
>> irq->target_vcpu = vcpu;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>>
>> /*
>> * There could be a race with another vgic_add_lpi(), so we need to
>> @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
>> dist->lpi_list_count++;
>>
>> out_unlock:
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> /*
>> * We "cache" the configuration table entries in our struct vgic_irq's.
>> @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> {
>> struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> struct vgic_irq *irq;
>> + unsigned long flags;
>> u32 *intids;
>> int irq_count, i = 0;
>>
>> @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> if (!intids)
>> return -ENOMEM;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
>> if (i == irq_count)
>> break;
>> @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> continue;
>> intids[i++] = irq->intid;
>> }
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> *intid_ptr = intids;
>> return i;
>> @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
>> static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
>> {
>> int ret = 0;
>> + unsigned long flags;
>>
>> - spin_lock(&irq->irq_lock);
>> + spin_lock_irqsave(&irq->irq_lock, flags);
>> irq->target_vcpu = vcpu;
>> - spin_unlock(&irq->irq_lock);
>> + spin_unlock_irqrestore(&irq->irq_lock, flags);
>>
>> if (irq->hw) {
>> struct its_vlpi_map map;
>> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
>> index 5f52a2bca36f..6efcddfb5167 100644
>> --- a/virt/kvm/arm/vgic/vgic.c
>> +++ b/virt/kvm/arm/vgic/vgic.c
>> @@ -75,8 +75,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> struct vgic_irq *irq = NULL;
>> + unsigned long flags;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>>
>> list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
>> if (irq->intid != intid)
>> @@ -92,7 +93,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
>> irq = NULL;
>>
>> out_unlock:
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> return irq;
>> }
>> @@ -137,19 +138,20 @@ static void vgic_irq_release(struct kref *ref)
>> void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
>> {
>> struct vgic_dist *dist = &kvm->arch.vgic;
>> + unsigned long flags;
>>
>> if (irq->intid < VGIC_MIN_LPI)
>> return;
>>
>> - spin_lock(&dist->lpi_list_lock);
>> + spin_lock_irqsave(&dist->lpi_list_lock, flags);
>> if (!kref_put(&irq->refcount, vgic_irq_release)) {
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>> return;
>> };
>>
>> list_del(&irq->lpi_list);
>> dist->lpi_list_count--;
>> - spin_unlock(&dist->lpi_list_lock);
>> + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
>>
>> kfree(irq);
>> }
>> --
>> 2.14.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: Potential deadlock in vgic
2018-05-04 15:17 ` Andre Przywara
@ 2018-05-04 16:31 ` Jan Glauber
-1 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-04 16:31 UTC (permalink / raw)
To: Andre Przywara; +Cc: Marc Zyngier, linux-arm-kernel, kvmarm
On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> can you please test this patch with your setup, to see if it still
> screams? That converts two forgotten irq_lock's over to be irqsafe,
> plus lets lpi_list_lock join them (which you already did, IIUC).
> That should appease lockdep, hopefully.
Hit send too soon, on halting the guest I get:
[ 1025.694857] =============================
[ 1025.694862] WARNING: suspicious RCU usage
[ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
[ 1025.694873] -----------------------------
[ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
[ 1025.694884]
other info that might help us debug this:
[ 1025.694890]
rcu_scheduler_active = 2, debug_locks = 1
[ 1025.694896] 18 locks held by qemu-system-aar/5540:
[ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
[ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
[ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695482]
stack backtrace:
[ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
[ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
[ 1025.695499] Call trace:
[ 1025.695505] dump_backtrace+0x0/0x160
[ 1025.695510] show_stack+0x24/0x30
[ 1025.695517] dump_stack+0x9c/0xd4
[ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
[ 1025.695537] gfn_to_memslot+0x174/0x190
[ 1025.695546] kvm_read_guest+0x50/0xb0
[ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
[ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
[ 1025.695567] vgic_its_set_attr+0x330/0x388
[ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
[ 1025.695579] kvm_device_ioctl+0x8c/0xf8
[ 1025.695587] do_vfs_ioctl+0xc4/0x938
[ 1025.695594] ksys_ioctl+0x8c/0x98
[ 1025.695601] sys_ioctl+0x34/0x48
[ 1025.695609] el0_svc_naked+0x44/0x48
--Jan
^ permalink raw reply [flat|nested] 22+ messages in thread
* Potential deadlock in vgic
@ 2018-05-04 16:31 ` Jan Glauber
0 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-04 16:31 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> can you please test this patch with your setup, to see if it still
> screams? That converts two forgotten irq_lock's over to be irqsafe,
> plus lets lpi_list_lock join them (which you already did, IIUC).
> That should appease lockdep, hopefully.
Hit send too soon, on halting the guest I get:
[ 1025.694857] =============================
[ 1025.694862] WARNING: suspicious RCU usage
[ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
[ 1025.694873] -----------------------------
[ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
[ 1025.694884]
other info that might help us debug this:
[ 1025.694890]
rcu_scheduler_active = 2, debug_locks = 1
[ 1025.694896] 18 locks held by qemu-system-aar/5540:
[ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
[ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
[ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
[ 1025.695482]
stack backtrace:
[ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
[ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
[ 1025.695499] Call trace:
[ 1025.695505] dump_backtrace+0x0/0x160
[ 1025.695510] show_stack+0x24/0x30
[ 1025.695517] dump_stack+0x9c/0xd4
[ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
[ 1025.695537] gfn_to_memslot+0x174/0x190
[ 1025.695546] kvm_read_guest+0x50/0xb0
[ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
[ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
[ 1025.695567] vgic_its_set_attr+0x330/0x388
[ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
[ 1025.695579] kvm_device_ioctl+0x8c/0xf8
[ 1025.695587] do_vfs_ioctl+0xc4/0x938
[ 1025.695594] ksys_ioctl+0x8c/0x98
[ 1025.695601] sys_ioctl+0x34/0x48
[ 1025.695609] el0_svc_naked+0x44/0x48
--Jan
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: Potential deadlock in vgic
2018-05-04 16:31 ` Jan Glauber
@ 2018-05-11 14:29 ` Andre Przywara
-1 siblings, 0 replies; 22+ messages in thread
From: Andre Przywara @ 2018-05-11 14:29 UTC (permalink / raw)
To: Jan Glauber; +Cc: Marc Zyngier, linux-arm-kernel, kvmarm
Hi Jan,
On 04/05/18 17:31, Jan Glauber wrote:
> On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
>> Hi Jan,
>>
>> can you please test this patch with your setup, to see if it still
>> screams? That converts two forgotten irq_lock's over to be irqsafe,
>> plus lets lpi_list_lock join them (which you already did, IIUC).
>> That should appease lockdep, hopefully.
>
> Hit send too soon, on halting the guest I get:
So I managed to finally wrap my head around this one.
I sent out a series [1], and failed Cc:ing you under the assumption that
Reported-by: would be picked up by git send-email. Apologies for that,
but you should be able to pick it from one of the lists.
Can you please confirm that the last two patches fix the splat below for
you?
Thanks for testing and reporting!
Andre.
[1]
http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/577054.html
>
> [ 1025.694857] =============================
> [ 1025.694862] WARNING: suspicious RCU usage
> [ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
> [ 1025.694873] -----------------------------
> [ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
> [ 1025.694884]
> other info that might help us debug this:
>
> [ 1025.694890]
> rcu_scheduler_active = 2, debug_locks = 1
> [ 1025.694896] 18 locks held by qemu-system-aar/5540:
> [ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
> [ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
> [ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695482]
> stack backtrace:
> [ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
> [ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
> [ 1025.695499] Call trace:
> [ 1025.695505] dump_backtrace+0x0/0x160
> [ 1025.695510] show_stack+0x24/0x30
> [ 1025.695517] dump_stack+0x9c/0xd4
> [ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
> [ 1025.695537] gfn_to_memslot+0x174/0x190
> [ 1025.695546] kvm_read_guest+0x50/0xb0
> [ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
> [ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
> [ 1025.695567] vgic_its_set_attr+0x330/0x388
> [ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
> [ 1025.695579] kvm_device_ioctl+0x8c/0xf8
> [ 1025.695587] do_vfs_ioctl+0xc4/0x938
> [ 1025.695594] ksys_ioctl+0x8c/0x98
> [ 1025.695601] sys_ioctl+0x34/0x48
> [ 1025.695609] el0_svc_naked+0x44/0x48
>
> --Jan
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Potential deadlock in vgic
@ 2018-05-11 14:29 ` Andre Przywara
0 siblings, 0 replies; 22+ messages in thread
From: Andre Przywara @ 2018-05-11 14:29 UTC (permalink / raw)
To: linux-arm-kernel
Hi Jan,
On 04/05/18 17:31, Jan Glauber wrote:
> On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
>> Hi Jan,
>>
>> can you please test this patch with your setup, to see if it still
>> screams? That converts two forgotten irq_lock's over to be irqsafe,
>> plus lets lpi_list_lock join them (which you already did, IIUC).
>> That should appease lockdep, hopefully.
>
> Hit send too soon, on halting the guest I get:
So I managed to finally wrap my head around this one.
I sent out a series [1], and failed Cc:ing you under the assumption that
Reported-by: would be picked up by git send-email. Apologies for that,
but you should be able to pick it from one of the lists.
Can you please confirm that the last two patches fix the splat below for
you?
Thanks for testing and reporting!
Andre.
[1]
http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/577054.html
>
> [ 1025.694857] =============================
> [ 1025.694862] WARNING: suspicious RCU usage
> [ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
> [ 1025.694873] -----------------------------
> [ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
> [ 1025.694884]
> other info that might help us debug this:
>
> [ 1025.694890]
> rcu_scheduler_active = 2, debug_locks = 1
> [ 1025.694896] 18 locks held by qemu-system-aar/5540:
> [ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
> [ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
> [ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> [ 1025.695482]
> stack backtrace:
> [ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
> [ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
> [ 1025.695499] Call trace:
> [ 1025.695505] dump_backtrace+0x0/0x160
> [ 1025.695510] show_stack+0x24/0x30
> [ 1025.695517] dump_stack+0x9c/0xd4
> [ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
> [ 1025.695537] gfn_to_memslot+0x174/0x190
> [ 1025.695546] kvm_read_guest+0x50/0xb0
> [ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
> [ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
> [ 1025.695567] vgic_its_set_attr+0x330/0x388
> [ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
> [ 1025.695579] kvm_device_ioctl+0x8c/0xf8
> [ 1025.695587] do_vfs_ioctl+0xc4/0x938
> [ 1025.695594] ksys_ioctl+0x8c/0x98
> [ 1025.695601] sys_ioctl+0x34/0x48
> [ 1025.695609] el0_svc_naked+0x44/0x48
>
> --Jan
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: Potential deadlock in vgic
2018-05-11 14:29 ` Andre Przywara
@ 2018-05-15 11:54 ` Jan Glauber
-1 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-15 11:54 UTC (permalink / raw)
To: Andre Przywara; +Cc: Marc Zyngier, Christoffer Dall, linux-arm-kernel, kvmarm
On Fri, May 11, 2018 at 03:29:43PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> On 04/05/18 17:31, Jan Glauber wrote:
> > On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> >> Hi Jan,
> >>
> >> can you please test this patch with your setup, to see if it still
> >> screams? That converts two forgotten irq_lock's over to be irqsafe,
> >> plus lets lpi_list_lock join them (which you already did, IIUC).
> >> That should appease lockdep, hopefully.
> >
> > Hit send too soon, on halting the guest I get:
>
> So I managed to finally wrap my head around this one.
> I sent out a series [1], and failed Cc:ing you under the assumption that
> Reported-by: would be picked up by git send-email. Apologies for that,
> but you should be able to pick it from one of the lists.
>
> Can you please confirm that the last two patches fix the splat below for
> you?
Sorry for the late response, I was offline. With all 4 patches applied
both the locking and the RCU warnings are gone.
Thanks for fixing this!
Grüße,
Jan
> Thanks for testing and reporting!
> Andre.
>
> [1]
> http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/577054.html
>
> >
> > [ 1025.694857] =============================
> > [ 1025.694862] WARNING: suspicious RCU usage
> > [ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
> > [ 1025.694873] -----------------------------
> > [ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
> > [ 1025.694884]
> > other info that might help us debug this:
> >
> > [ 1025.694890]
> > rcu_scheduler_active = 2, debug_locks = 1
> > [ 1025.694896] 18 locks held by qemu-system-aar/5540:
> > [ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
> > [ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
> > [ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695482]
> > stack backtrace:
> > [ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
> > [ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
> > [ 1025.695499] Call trace:
> > [ 1025.695505] dump_backtrace+0x0/0x160
> > [ 1025.695510] show_stack+0x24/0x30
> > [ 1025.695517] dump_stack+0x9c/0xd4
> > [ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
> > [ 1025.695537] gfn_to_memslot+0x174/0x190
> > [ 1025.695546] kvm_read_guest+0x50/0xb0
> > [ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
> > [ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
> > [ 1025.695567] vgic_its_set_attr+0x330/0x388
> > [ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
> > [ 1025.695579] kvm_device_ioctl+0x8c/0xf8
> > [ 1025.695587] do_vfs_ioctl+0xc4/0x938
> > [ 1025.695594] ksys_ioctl+0x8c/0x98
> > [ 1025.695601] sys_ioctl+0x34/0x48
> > [ 1025.695609] el0_svc_naked+0x44/0x48
> >
> > --Jan
> >
^ permalink raw reply [flat|nested] 22+ messages in thread
* Potential deadlock in vgic
@ 2018-05-15 11:54 ` Jan Glauber
0 siblings, 0 replies; 22+ messages in thread
From: Jan Glauber @ 2018-05-15 11:54 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, May 11, 2018 at 03:29:43PM +0100, Andre Przywara wrote:
> Hi Jan,
>
> On 04/05/18 17:31, Jan Glauber wrote:
> > On Fri, May 04, 2018 at 04:17:40PM +0100, Andre Przywara wrote:
> >> Hi Jan,
> >>
> >> can you please test this patch with your setup, to see if it still
> >> screams? That converts two forgotten irq_lock's over to be irqsafe,
> >> plus lets lpi_list_lock join them (which you already did, IIUC).
> >> That should appease lockdep, hopefully.
> >
> > Hit send too soon, on halting the guest I get:
>
> So I managed to finally wrap my head around this one.
> I sent out a series [1], and failed Cc:ing you under the assumption that
> Reported-by: would be picked up by git send-email. Apologies for that,
> but you should be able to pick it from one of the lists.
>
> Can you please confirm that the last two patches fix the splat below for
> you?
Sorry for the late response, I was offline. With all 4 patches applied
both the locking and the RCU warnings are gone.
Thanks for fixing this!
Gr??e,
Jan
> Thanks for testing and reporting!
> Andre.
>
> [1]
> http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/577054.html
>
> >
> > [ 1025.694857] =============================
> > [ 1025.694862] WARNING: suspicious RCU usage
> > [ 1025.694868] 4.17.0-rc3-jang+ #73 Not tainted
> > [ 1025.694873] -----------------------------
> > [ 1025.694880] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage!
> > [ 1025.694884]
> > other info that might help us debug this:
> >
> > [ 1025.694890]
> > rcu_scheduler_active = 2, debug_locks = 1
> > [ 1025.694896] 18 locks held by qemu-system-aar/5540:
> > [ 1025.694901] #0: 000000005e03488a (&kvm->lock){+.+.}, at: vgic_its_set_attr+0x230/0x388
> > [ 1025.694937] #1: 000000004b1a3bb5 (&its->its_lock){+.+.}, at: vgic_its_set_attr+0x23c/0x388
> > [ 1025.694965] #2: 000000003ca8213c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.694993] #3: 00000000adb6ae51 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695021] #4: 0000000000563df7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695048] #5: 00000000da16277a (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695076] #6: 00000000bf36d9aa (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695103] #7: 00000000607eaa4f (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695130] #8: 0000000046dadf65 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695157] #9: 00000000197747b2 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695184] #10: 00000000e4f1282c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695211] #11: 000000007471b896 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695239] #12: 000000005be54486 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695266] #13: 000000000f1fa184 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695293] #14: 0000000093fdb28b (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695396] #15: 0000000097cc103c (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695426] #16: 00000000d24dd32e (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695453] #17: 000000002606c3a7 (&vcpu->mutex){+.+.}, at: lock_all_vcpus+0x64/0xc0
> > [ 1025.695482]
> > stack backtrace:
> > [ 1025.695489] CPU: 29 PID: 5540 Comm: qemu-system-aar Not tainted 4.17.0-rc3-jang+ #73
> > [ 1025.695494] Hardware name: To be filled by O.E.M. Saber/To be filled by O.E.M., BIOS 0ACKL018 03/30/2018
> > [ 1025.695499] Call trace:
> > [ 1025.695505] dump_backtrace+0x0/0x160
> > [ 1025.695510] show_stack+0x24/0x30
> > [ 1025.695517] dump_stack+0x9c/0xd4
> > [ 1025.695524] lockdep_rcu_suspicious+0xcc/0x118
> > [ 1025.695537] gfn_to_memslot+0x174/0x190
> > [ 1025.695546] kvm_read_guest+0x50/0xb0
> > [ 1025.695553] vgic_its_check_id.isra.0+0x114/0x148
> > [ 1025.695560] vgic_its_save_tables_v0+0x1a0/0x320
> > [ 1025.695567] vgic_its_set_attr+0x330/0x388
> > [ 1025.695573] kvm_device_ioctl_attr+0x9c/0xd8
> > [ 1025.695579] kvm_device_ioctl+0x8c/0xf8
> > [ 1025.695587] do_vfs_ioctl+0xc4/0x938
> > [ 1025.695594] ksys_ioctl+0x8c/0x98
> > [ 1025.695601] sys_ioctl+0x34/0x48
> > [ 1025.695609] el0_svc_naked+0x44/0x48
> >
> > --Jan
> >
^ permalink raw reply [flat|nested] 22+ messages in thread