* [PATCH] KVM: x86: (64-bit x86_64 machine) : fix -frame-larger-than warnings/errors.
@ 2021-09-17 15:55 Ajay Garg
2021-10-13 19:00 ` Sean Christopherson
0 siblings, 1 reply; 4+ messages in thread
From: Ajay Garg @ 2021-09-17 15:55 UTC (permalink / raw)
To: kvm; +Cc: ajay
From: ajay <ajaygargnsit@gmail.com>
Issue :
=======
In "kvm_hv_flush_tlb" and "kvm_hv_send_ipi" methods, defining
"u64 sparse_banks[64]" inside the methods (on the stack), causes the
stack-segment-memory-allocation to go beyond 1024 bytes, thus raising the
warning/error which breaks the build.
Fix :
=====
Instead of defining "u64 sparse_banks [64]" inside the methods, we instead
define this array in the (only) client method "kvm_hv_hypercall", and then
pass the array (and its size) as additional arguments to the two methods.
Doing this, we do not exceed the 1024 bytes stack-segment-memory-allocation,
on any stack-segment of any method.
Signed-off-by: ajay <ajaygargnsit@gmail.com>
---
arch/x86/kvm/hyperv.c | 34 ++++++++++++++++++++++++----------
1 file changed, 24 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 232a86a6faaf..5340be93daa4 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1750,7 +1750,8 @@ struct kvm_hv_hcall {
sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
};
-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc,
+ bool ex, u64 *sparse_banks, u32 num_sparse_banks)
{
int i;
gpa_t gpa;
@@ -1762,10 +1763,11 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
unsigned long *vcpu_mask;
u64 valid_bank_mask;
- u64 sparse_banks[64];
int sparse_banks_len;
bool all_cpus;
+ memset(sparse_banks, 0, sizeof(u64) * num_sparse_banks);
+
if (!ex) {
if (hc->fast) {
flush.address_space = hc->ingpa;
@@ -1875,7 +1877,8 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
}
}
-static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc,
+ bool ex, u64 *sparse_banks, u32 num_sparse_banks)
{
struct kvm *kvm = vcpu->kvm;
struct hv_send_ipi_ex send_ipi_ex;
@@ -1884,11 +1887,12 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
unsigned long *vcpu_mask;
unsigned long valid_bank_mask;
- u64 sparse_banks[64];
int sparse_banks_len;
u32 vector;
bool all_cpus;
+ memset(sparse_banks, 0, sizeof(u64) * num_sparse_banks);
+
if (!ex) {
if (!hc->fast) {
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
@@ -2162,6 +2166,10 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
struct kvm_hv_hcall hc;
u64 ret = HV_STATUS_SUCCESS;
+#define NUM_SPARSE_BANKS 64
+
+ u64 sparse_banks[NUM_SPARSE_BANKS];
+
/*
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
@@ -2248,42 +2256,48 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, &hc, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, &hc, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, &hc, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, &hc, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_SEND_IPI:
if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, &hc, false);
+ ret = kvm_hv_send_ipi(vcpu, &hc, false,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_SEND_IPI_EX:
if (unlikely(hc.fast || hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, &hc, true);
+ ret = kvm_hv_send_ipi(vcpu, &hc, true,
+ sparse_banks, NUM_SPARSE_BANKS);
break;
case HVCALL_POST_DEBUG_DATA:
case HVCALL_RETRIEVE_DEBUG_DATA:
--
2.30.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] KVM: x86: (64-bit x86_64 machine) : fix -frame-larger-than warnings/errors.
2021-09-17 15:55 [PATCH] KVM: x86: (64-bit x86_64 machine) : fix -frame-larger-than warnings/errors Ajay Garg
@ 2021-10-13 19:00 ` Sean Christopherson
2021-10-14 17:23 ` Ajay Garg
0 siblings, 1 reply; 4+ messages in thread
From: Sean Christopherson @ 2021-10-13 19:00 UTC (permalink / raw)
To: Ajay Garg; +Cc: kvm
On Fri, Sep 17, 2021, Ajay Garg wrote:
> From: ajay <ajaygargnsit@gmail.com>
>
> Issue :
> =======
>
> In "kvm_hv_flush_tlb" and "kvm_hv_send_ipi" methods, defining
> "u64 sparse_banks[64]" inside the methods (on the stack), causes the
> stack-segment-memory-allocation to go beyond 1024 bytes, thus raising the
> warning/error which breaks the build.
>
> Fix :
> =====
>
> Instead of defining "u64 sparse_banks [64]" inside the methods, we instead
> define this array in the (only) client method "kvm_hv_hypercall", and then
> pass the array (and its size) as additional arguments to the two methods.
> Doing this, we do not exceed the 1024 bytes stack-segment-memory-allocation,
> on any stack-segment of any method.
This is a hack, and it's not guaranteed to work, e.g. if the compiler decided to
inline the helpers, then presumably this problem would rear its head again.
However, I don't think this is a problem any more. gcc-10 and clang-11 are both
comfortably under 1024, even if I force both helpers to be inlined. Neither
function has variables that would scale with NR_CPUS (and I verified high number
of NR_CPUS for giggles). Can you try reproducing the behavior on the latest
kvm/queue? I swear I've seen this in the past, but I couldn't find a commit that
"fixed" any such warning.
If it does repro, can you provide your .config and compiler version? Maybe your
compiler is doing somethign funky?
> Signed-off-by: ajay <ajaygargnsit@gmail.com>
The SoB needs your full name.
> ---
> arch/x86/kvm/hyperv.c | 34 ++++++++++++++++++++++++----------
> 1 file changed, 24 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index 232a86a6faaf..5340be93daa4 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -1750,7 +1750,8 @@ struct kvm_hv_hcall {
> sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
> };
>
> -static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
> +static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc,
> + bool ex, u64 *sparse_banks, u32 num_sparse_banks)
> {
> int i;
> gpa_t gpa;
> @@ -1762,10 +1763,11 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
> unsigned long *vcpu_mask;
> u64 valid_bank_mask;
> - u64 sparse_banks[64];
> int sparse_banks_len;
> bool all_cpus;
>
> + memset(sparse_banks, 0, sizeof(u64) * num_sparse_banks);
> +
FWIW, the array size needs to be validated, there is other code in this function
that assumes it's at least 64 entries.
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] KVM: x86: (64-bit x86_64 machine) : fix -frame-larger-than warnings/errors.
2021-10-13 19:00 ` Sean Christopherson
@ 2021-10-14 17:23 ` Ajay Garg
[not found] ` <CAHP4M8VzjPgzBmfn2DAdGD0P9OBF7_cafPP8nrjvTampGLoxyA@mail.gmail.com>
0 siblings, 1 reply; 4+ messages in thread
From: Ajay Garg @ 2021-10-14 17:23 UTC (permalink / raw)
To: Sean Christopherson; +Cc: kvm
Hi Sean,
Thanks for your time.
I have cloned the kernel many times since the time the patch was
posted, and have not seen the issue again.
One thing I distinctly remember that when the build was breaking, it
was with staging-drivers disabled. Since then, I have disabled the
staging-drivers. Today, I again enabled staging-drivers, and build
went fine.
So, I am ok with archiving this patch. We can revisit if someone else
reports this/similar issue.
Thanks and Regards,
Ajay
On Thu, Oct 14, 2021 at 12:30 AM Sean Christopherson <seanjc@google.com> wrote:
>
> On Fri, Sep 17, 2021, Ajay Garg wrote:
> > From: ajay <ajaygargnsit@gmail.com>
> >
> > Issue :
> > =======
> >
> > In "kvm_hv_flush_tlb" and "kvm_hv_send_ipi" methods, defining
> > "u64 sparse_banks[64]" inside the methods (on the stack), causes the
> > stack-segment-memory-allocation to go beyond 1024 bytes, thus raising the
> > warning/error which breaks the build.
> >
> > Fix :
> > =====
> >
> > Instead of defining "u64 sparse_banks [64]" inside the methods, we instead
> > define this array in the (only) client method "kvm_hv_hypercall", and then
> > pass the array (and its size) as additional arguments to the two methods.
>
> > Doing this, we do not exceed the 1024 bytes stack-segment-memory-allocation,
> > on any stack-segment of any method.
>
> This is a hack, and it's not guaranteed to work, e.g. if the compiler decided to
> inline the helpers, then presumably this problem would rear its head again.
>
> However, I don't think this is a problem any more. gcc-10 and clang-11 are both
> comfortably under 1024, even if I force both helpers to be inlined. Neither
> function has variables that would scale with NR_CPUS (and I verified high number
> of NR_CPUS for giggles). Can you try reproducing the behavior on the latest
> kvm/queue? I swear I've seen this in the past, but I couldn't find a commit that
> "fixed" any such warning.
>
> If it does repro, can you provide your .config and compiler version? Maybe your
> compiler is doing somethign funky?
>
> > Signed-off-by: ajay <ajaygargnsit@gmail.com>
>
> The SoB needs your full name.
>
> > ---
> > arch/x86/kvm/hyperv.c | 34 ++++++++++++++++++++++++----------
> > 1 file changed, 24 insertions(+), 10 deletions(-)
> >
> > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> > index 232a86a6faaf..5340be93daa4 100644
> > --- a/arch/x86/kvm/hyperv.c
> > +++ b/arch/x86/kvm/hyperv.c
> > @@ -1750,7 +1750,8 @@ struct kvm_hv_hcall {
> > sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
> > };
> >
> > -static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
> > +static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc,
> > + bool ex, u64 *sparse_banks, u32 num_sparse_banks)
>
>
> > {
> > int i;
> > gpa_t gpa;
> > @@ -1762,10 +1763,11 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> > DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
> > unsigned long *vcpu_mask;
> > u64 valid_bank_mask;
> > - u64 sparse_banks[64];
> > int sparse_banks_len;
> > bool all_cpus;
> >
> > + memset(sparse_banks, 0, sizeof(u64) * num_sparse_banks);
> > +
>
> FWIW, the array size needs to be validated, there is other code in this function
> that assumes it's at least 64 entries.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-10-26 0:23 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-17 15:55 [PATCH] KVM: x86: (64-bit x86_64 machine) : fix -frame-larger-than warnings/errors Ajay Garg
2021-10-13 19:00 ` Sean Christopherson
2021-10-14 17:23 ` Ajay Garg
[not found] ` <CAHP4M8VzjPgzBmfn2DAdGD0P9OBF7_cafPP8nrjvTampGLoxyA@mail.gmail.com>
2021-10-26 0:22 ` Sean Christopherson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.