linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/build: Fix vmlinux size check on 64-bit
@ 2020-10-05 15:15 Arvind Sankar
  2020-10-27 20:08 ` Borislav Petkov
  0 siblings, 1 reply; 10+ messages in thread
From: Arvind Sankar @ 2020-10-05 15:15 UTC (permalink / raw)
  To: x86; +Cc: linux-kernel

Commit b4e0409a36f4 ("x86: check vmlinux limits, 64-bit") added a check
that the size of the 64-bit kernel is less than KERNEL_IMAGE_SIZE.

The check uses (_end - _text), but this is not enough. The initial PMD
used in startup_64() (level2_kernel_pgt) can only map upto
KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text.

The correct check is the same as for 32-bit, since LOAD_OFFSET is
defined appropriately for the two architectures. Just check
(_end - LOAD_OFFSET) against KERNEL_IMAGE_SIZE unconditionally.

Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
---
 arch/x86/kernel/vmlinux.lds.S | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf9e0adb5b7e..b38832821b98 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -454,13 +454,12 @@ SECTIONS
 	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
 }
 
-#ifdef CONFIG_X86_32
 /*
  * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
  */
 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-#else
+#ifdef CONFIG_X86_64
 /*
  * Per-cpu symbols which need to be offset from __per_cpu_load
  * for the boot processor.
@@ -470,18 +469,12 @@ INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(fixed_percpu_data);
 INIT_PER_CPU(irq_stack_backing_store);
 
-/*
- * Build-time check on the image size:
- */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
-	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-
 #ifdef CONFIG_SMP
 . = ASSERT((fixed_percpu_data == 0),
            "fixed_percpu_data is not at start of per-cpu area");
 #endif
 
-#endif /* CONFIG_X86_32 */
+#endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_KEXEC_CORE
 #include <asm/kexec.h>
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-05 15:15 [PATCH] x86/build: Fix vmlinux size check on 64-bit Arvind Sankar
@ 2020-10-27 20:08 ` Borislav Petkov
  2020-10-27 21:14   ` Arvind Sankar
  0 siblings, 1 reply; 10+ messages in thread
From: Borislav Petkov @ 2020-10-27 20:08 UTC (permalink / raw)
  To: Arvind Sankar; +Cc: x86, linux-kernel

On Mon, Oct 05, 2020 at 11:15:39AM -0400, Arvind Sankar wrote:
> Commit b4e0409a36f4 ("x86: check vmlinux limits, 64-bit") added a check
> that the size of the 64-bit kernel is less than KERNEL_IMAGE_SIZE.
> 
> The check uses (_end - _text), but this is not enough. The initial PMD
> used in startup_64() (level2_kernel_pgt) can only map upto
> KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text.
> 
> The correct check is the same as for 32-bit, since LOAD_OFFSET is
> defined appropriately for the two architectures. Just check
> (_end - LOAD_OFFSET) against KERNEL_IMAGE_SIZE unconditionally.
> 
> Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
> ---
>  arch/x86/kernel/vmlinux.lds.S | 11 ++---------
>  1 file changed, 2 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
> index bf9e0adb5b7e..b38832821b98 100644
> --- a/arch/x86/kernel/vmlinux.lds.S
> +++ b/arch/x86/kernel/vmlinux.lds.S
> @@ -454,13 +454,12 @@ SECTIONS
>  	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
>  }
>  
> -#ifdef CONFIG_X86_32
>  /*
>   * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
>   */
>  . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
>  	   "kernel image bigger than KERNEL_IMAGE_SIZE");
> -#else
> +#ifdef CONFIG_X86_64
>  /*
>   * Per-cpu symbols which need to be offset from __per_cpu_load
>   * for the boot processor.
> @@ -470,18 +469,12 @@ INIT_PER_CPU(gdt_page);
>  INIT_PER_CPU(fixed_percpu_data);
>  INIT_PER_CPU(irq_stack_backing_store);
>  
> -/*
> - * Build-time check on the image size:
> - */
> -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
> -	   "kernel image bigger than KERNEL_IMAGE_SIZE");

So we have this:

SECTIONS
{       
#ifdef CONFIG_X86_32
        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
        phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
#else     
        . = __START_KERNEL;
	^^^^^^^^^^

which sets the location counter to

#define __START_KERNEL          (__START_KERNEL_map + __PHYSICAL_START)

which is 	0xffffffff80000000 + ALIGN(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN)

and that second term after the '+' has effect only when
CONFIG_RELOCATABLE=n and that's not really used on modern kernel configs
as RELOCATABLE is selected by EFI_STUB and RANDOMIZE_BASE depends on at
and and ...

So IOW, in a usual .config we have:

__START_KERNEL_map at 0xffffffff80000000
_text		   at 0xffffffff81000000

So practically and for the majority of configs, the kernel image really
does start at _text and not at __START_KERNEL_map and we map 16Mb which
is 4 PMDs of unused pages. So basically you're correcting that here -
that the number tested against KERNEL_IMAGE_SIZE is 16Mb more.

Yes, no?

Or am I missing some more important aspect and this is more than just a
small correctness fixlet?

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-27 20:08 ` Borislav Petkov
@ 2020-10-27 21:14   ` Arvind Sankar
  2020-10-28 13:39     ` Borislav Petkov
  0 siblings, 1 reply; 10+ messages in thread
From: Arvind Sankar @ 2020-10-27 21:14 UTC (permalink / raw)
  To: Borislav Petkov; +Cc: Arvind Sankar, x86, linux-kernel

On Tue, Oct 27, 2020 at 09:08:03PM +0100, Borislav Petkov wrote:
> On Mon, Oct 05, 2020 at 11:15:39AM -0400, Arvind Sankar wrote:
> > Commit b4e0409a36f4 ("x86: check vmlinux limits, 64-bit") added a check
> > that the size of the 64-bit kernel is less than KERNEL_IMAGE_SIZE.
> > 
> > The check uses (_end - _text), but this is not enough. The initial PMD
> > used in startup_64() (level2_kernel_pgt) can only map upto
> > KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text.
> > 
> > The correct check is the same as for 32-bit, since LOAD_OFFSET is
> > defined appropriately for the two architectures. Just check
> > (_end - LOAD_OFFSET) against KERNEL_IMAGE_SIZE unconditionally.
> > 
> > Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
> > ---
> >  arch/x86/kernel/vmlinux.lds.S | 11 ++---------
> >  1 file changed, 2 insertions(+), 9 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
> > index bf9e0adb5b7e..b38832821b98 100644
> > --- a/arch/x86/kernel/vmlinux.lds.S
> > +++ b/arch/x86/kernel/vmlinux.lds.S
> > @@ -454,13 +454,12 @@ SECTIONS
> >  	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
> >  }
> >  
> > -#ifdef CONFIG_X86_32
> >  /*
> >   * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
> >   */
> >  . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
> >  	   "kernel image bigger than KERNEL_IMAGE_SIZE");
> > -#else
> > +#ifdef CONFIG_X86_64
> >  /*
> >   * Per-cpu symbols which need to be offset from __per_cpu_load
> >   * for the boot processor.
> > @@ -470,18 +469,12 @@ INIT_PER_CPU(gdt_page);
> >  INIT_PER_CPU(fixed_percpu_data);
> >  INIT_PER_CPU(irq_stack_backing_store);
> >  
> > -/*
> > - * Build-time check on the image size:
> > - */
> > -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
> > -	   "kernel image bigger than KERNEL_IMAGE_SIZE");
> 
> So we have this:
> 
> SECTIONS
> {       
> #ifdef CONFIG_X86_32
>         . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
>         phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
> #else     
>         . = __START_KERNEL;
> 	^^^^^^^^^^
> 
> which sets the location counter to
> 
> #define __START_KERNEL          (__START_KERNEL_map + __PHYSICAL_START)
> 
> which is 	0xffffffff80000000 + ALIGN(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN)
> 
> and that second term after the '+' has effect only when
> CONFIG_RELOCATABLE=n and that's not really used on modern kernel configs
> as RELOCATABLE is selected by EFI_STUB and RANDOMIZE_BASE depends on at
> and and ...
> 
> So IOW, in a usual .config we have:
> 
> __START_KERNEL_map at 0xffffffff80000000
> _text		   at 0xffffffff81000000
> 
> So practically and for the majority of configs, the kernel image really
> does start at _text and not at __START_KERNEL_map and we map 16Mb which
> is 4 PMDs of unused pages. So basically you're correcting that here -
> that the number tested against KERNEL_IMAGE_SIZE is 16Mb more.
> 
> Yes, no?
> 
> Or am I missing some more important aspect and this is more than just a
> small correctness fixlet?
> 
> Thx.
> 

This is indeed just a small correctness fixlet, but I'm not following
the rest of your comments. PHYSICAL_START has an effect independent of
the setting of RELOCATABLE. It's where the kernel image starts in
virtual address space, as shown by the 16MiB difference between
__START_KERNEL_map and _text in the usual .config situation. In all
configs, not just majority, the kernel image itself starts at _text. The
16MiB gap below _text is not actually mapped, but the important point is
that the way the initial construction of pagetables is currently setup,
the code cannot map anything above __START_KERNEL_map + KERNEL_IMAGE_SIZE,
so _end needs to be below that.

If KASLR was disabled (either at build-time or run-time), these
link-time addresses are where the kernel actually lives (in VA space);
and if it was enabled, it will make sure to place the _end of the kernel
below KERNEL_IMAGE_SIZE when choosing a random virtual location.

That said, AFAICT, RELOCATABLE and PHYSICAL_START look like historical
artifacts at this point: RELOCATABLE should be completely irrelevant for
the 64-bit kernel, and there's really no reason to be able to configure
the start VA of the kernel, that should just be constant independent of
PHYSICAL_START.

Thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-27 21:14   ` Arvind Sankar
@ 2020-10-28 13:39     ` Borislav Petkov
  2020-10-28 16:45       ` Arvind Sankar
  0 siblings, 1 reply; 10+ messages in thread
From: Borislav Petkov @ 2020-10-28 13:39 UTC (permalink / raw)
  To: Arvind Sankar; +Cc: x86, linux-kernel

On Tue, Oct 27, 2020 at 05:14:22PM -0400, Arvind Sankar wrote:
> This is indeed just a small correctness fixlet, but I'm not following
> the rest of your comments.

I'm just trying to make sense of that house of cards we have here.

> PHYSICAL_START has an effect independent of the setting of
> RELOCATABLE.

Theoretically you can set PHYSICAL_START to 0x0:

config PHYSICAL_START
        hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
        default "0x1000000"
        help
          This gives the physical address where the kernel is loaded.

          If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
          bzImage will decompress itself to above physical address and
          run from there.
	  ^^^^^^^^^^^^^^

and disable RELOCATABLE:

CONFIG_PHYSICAL_START=0x0
# CONFIG_RELOCATABLE is not set

but then you hit this:

ld: per-CPU data too large - increase CONFIG_PHYSICAL_START

full output at the end of the mail.

> It's where the kernel image starts in virtual address space, as shown
> by the 16MiB difference between __START_KERNEL_map and _text in the
> usual .config situation. In all configs, not just majority, the kernel
> image itself starts at _text.

Of course.

> The 16MiB gap below _text is not actually mapped, but the important
> point is that the way the initial construction of pagetables is
> currently setup, the code cannot map anything above __START_KERNEL_map
> + KERNEL_IMAGE_SIZE, so _end needs to be below that.

Right.

> If KASLR was disabled (either at build-time or run-time), these
> link-time addresses are where the kernel actually lives (in VA space);
> and if it was enabled, it will make sure to place the _end of the kernel
> below KERNEL_IMAGE_SIZE when choosing a random virtual location.

Yes.

> That said, AFAICT, RELOCATABLE and PHYSICAL_START look like historical
> artifacts at this point: RELOCATABLE should be completely irrelevant for
> the 64-bit kernel, and there's really no reason to be able to configure
> the start VA of the kernel, that should just be constant independent of
> PHYSICAL_START.

See the CONFIG_PHYSICAL_START help text. Apparently there has been a
use case where one can set PHYSICAL_START to the region where a kdump
kernel is going to be loaded and that kdump kernel is a vmlinux and not
a bzImage and thus not relocatable.

And I just produced a .config which doesn't work. I guess

	"Don't change this unless you know what you are doing."

is supposed to say that that value can't be just anything but that ain't
good enough.

Lemme try a different offset after reading this:

	  Otherwise if you plan to use vmlinux
          for capturing the crash dump change this value to start of
          the reserved region.  In other words, it can be set based on
          the "X" value as specified in the "crashkernel=YM@XM"

Setting it to 256M works:

$ readelf -a vmlinux | grep -E "\W(_end|_text)"
 95509: ffffffff90000000     0 NOTYPE  GLOBAL DEFAULT    1 _text
 97868: ffffffff9aa26000     0 NOTYPE  GLOBAL DEFAULT   30 _end

Setting to 8M works too, it seems:

$ readelf -a vmlinux | grep -E "\W(_end|_text)"
 95509: ffffffff80800000     0 NOTYPE  GLOBAL DEFAULT    1 _text
 97868: ffffffff8b226000     0 NOTYPE  GLOBAL DEFAULT   30 _end

so I guess this should be a range > 0 specification but I guess not
important.

Going back to the question at hand, if you think about it, the kernel
image *is* between _text or _stext and _end. And KERNEL_IMAGE_SIZE is
exactly what it is - the size of the kernel image.

Now, if you were talking about a kernel *mapping* size, then I'd
understand but this check is for the kernel *image* size.

But reading that commit message again:

    these build-time and link-time checks would have prevented the
    vmlinux size regression.

this *is* talking about vmlinux size and that starts at _text...

Thx.

ld: per-CPU data too large - increase CONFIG_PHYSICAL_START
init/main.o: in function `perf_trace_initcall_level':
/home/boris/kernel/linux/./include/trace/events/initcall.h:10:(.text+0x147): relocation truncated to fit: R_X86_64_PC32 against symbol `this_cpu_off' defined in .data..percpu..read_mostly section in arch/x86/kernel/setup_percpu.o
init/main.o: in function `perf_trace_initcall_start':
/home/boris/kernel/linux/./include/trace/events/initcall.h:27:(.text+0x252): relocation truncated to fit: R_X86_64_PC32 against symbol `this_cpu_off' defined in .data..percpu..read_mostly section in arch/x86/kernel/setup_percpu.o
init/main.o: in function `perf_trace_initcall_finish':
/home/boris/kernel/linux/./include/trace/events/initcall.h:48:(.text+0x319): relocation truncated to fit: R_X86_64_PC32 against symbol `this_cpu_off' defined in .data..percpu..read_mostly section in arch/x86/kernel/setup_percpu.o
init/main.o: in function `preempt_count':
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:26:(.text+0x823): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:26:(.text+0x84f): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
init/main.o: in function `preempt_count_set':
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:34:(.text+0x88f): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:37:(.text+0x8a3): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
init/main.o: in function `trace_initcall_start':
/home/boris/kernel/linux/./include/trace/events/initcall.h:27:(.text+0x8e5): relocation truncated to fit: R_X86_64_PC32 against symbol `cpu_number' defined in .data..percpu..read_mostly section in arch/x86/kernel/setup_percpu.o
init/main.o: in function `__preempt_count_add':
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:79:(.text+0x8fc): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
init/main.o: in function `__preempt_count_dec_and_test':
/home/boris/kernel/linux/./arch/x86/include/asm/preempt.h:94:(.text+0x91b): relocation truncated to fit: R_X86_64_PC32 against symbol `__preempt_count' defined in .data..percpu section in arch/x86/kernel/cpu/common.o
init/main.o: in function `trace_initcall_finish':
/home/boris/kernel/linux/./include/trace/events/initcall.h:48:(.text+0x932): additional relocation overflows omitted from the output
make: *** [Makefile:1164: vmlinux] Error 1


-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-28 13:39     ` Borislav Petkov
@ 2020-10-28 16:45       ` Arvind Sankar
  2020-10-28 19:43         ` Borislav Petkov
  0 siblings, 1 reply; 10+ messages in thread
From: Arvind Sankar @ 2020-10-28 16:45 UTC (permalink / raw)
  To: Borislav Petkov; +Cc: Arvind Sankar, x86, linux-kernel

On Wed, Oct 28, 2020 at 02:39:09PM +0100, Borislav Petkov wrote:
> On Tue, Oct 27, 2020 at 05:14:22PM -0400, Arvind Sankar wrote:
> > This is indeed just a small correctness fixlet, but I'm not following
> > the rest of your comments.
> 
> I'm just trying to make sense of that house of cards we have here.
> 
> > PHYSICAL_START has an effect independent of the setting of
> > RELOCATABLE.
> 
> Theoretically you can set PHYSICAL_START to 0x0:
> 
> config PHYSICAL_START
>         hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
>         default "0x1000000"
>         help
>           This gives the physical address where the kernel is loaded.
> 
>           If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
>           bzImage will decompress itself to above physical address and
>           run from there.
> 	  ^^^^^^^^^^^^^^
> 
> and disable RELOCATABLE:
> 
> CONFIG_PHYSICAL_START=0x0
> # CONFIG_RELOCATABLE is not set
> 
> but then you hit this:
> 
> ld: per-CPU data too large - increase CONFIG_PHYSICAL_START
> 
> full output at the end of the mail.

You don't want to try to run the kernel from physical address 0 in any
case. The default is set to 16MiB to avoid low memory, historically to
avoid the 24-bit ISA DMA range.

> > That said, AFAICT, RELOCATABLE and PHYSICAL_START look like historical
> > artifacts at this point: RELOCATABLE should be completely irrelevant for
> > the 64-bit kernel, and there's really no reason to be able to configure
> > the start VA of the kernel, that should just be constant independent of
> > PHYSICAL_START.
> 
> See the CONFIG_PHYSICAL_START help text. Apparently there has been a
> use case where one can set PHYSICAL_START to the region where a kdump
> kernel is going to be loaded and that kdump kernel is a vmlinux and not
> a bzImage and thus not relocatable.

This doesn't matter for the 64-bit kernel, which can be run from any
physical address independent of the RELOCATABLE/PHYSICAL_START settings.
It only matters on 32-bit, where VA and PA are tied together by
	VA == __PAGE_OFFSET + PA
On 64-bit, the kernel's location in VA space and physical space can be
independently moved around, so a kernel that starts at 16MiB in VA space
can be loaded anywhere above 16MiB in physical space.

> 
> Going back to the question at hand, if you think about it, the kernel
> image *is* between _text or _stext and _end. And KERNEL_IMAGE_SIZE is
> exactly what it is - the size of the kernel image.
> 
> Now, if you were talking about a kernel *mapping* size, then I'd
> understand but this check is for the kernel *image* size.
> 

KERNEL_IMAGE_SIZE is _not_ the size of the kernel image, the name is
misleading. It is the maximum VA that the kernel can occupy, it is used
to prepopulate the PMD-level pagetable for initial boot (level2_kernel_pgt)
and is also used to define MODULES_VADDR, so it _is_ talking about
mappings. If you have a 30MiB kernel that is placed at a starting VA of
510MiB when KERNEL_IMAGE_SIZE is 512MiB, it won't boot.

> But reading that commit message again:
> 
>     these build-time and link-time checks would have prevented the
>     vmlinux size regression.
> 
> this *is* talking about vmlinux size and that starts at _text...
> 

Increasing vmlinux size can trigger the problem by pushing _end beyond
KERNEL_IMAGE_SIZE, but the problem occurs once _end - __START_KERNEL_map
exceeds KERNEL_IMAGE_SIZE, not when _end - _text exceeds it, hence this
patch.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-28 16:45       ` Arvind Sankar
@ 2020-10-28 19:43         ` Borislav Petkov
  2020-10-28 20:45           ` Arvind Sankar
  0 siblings, 1 reply; 10+ messages in thread
From: Borislav Petkov @ 2020-10-28 19:43 UTC (permalink / raw)
  To: Arvind Sankar; +Cc: x86, linux-kernel

On Wed, Oct 28, 2020 at 12:45:51PM -0400, Arvind Sankar wrote:
> You don't want to try to run the kernel from physical address 0 in any
> case. The default is set to 16MiB to avoid low memory, historically to
> avoid the 24-bit ISA DMA range.

Sure, that's why I wrote:

"... so I guess this should be a range > 0 specification but I guess not
important."

So how about a sentence or two alluding to that fact in the help text of
that option?

> This doesn't matter for the 64-bit kernel, which can be run from any
> physical address independent of the RELOCATABLE/PHYSICAL_START settings.
> It only matters on 32-bit, where VA and PA are tied together by
> 	VA == __PAGE_OFFSET + PA

You mean the kernel text mapping I assume because we do

#define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))

on 64-bit too but that's the direct mapping of all physical memory.

> On 64-bit, the kernel's location in VA space and physical space can be
> independently moved around, so a kernel that starts at 16MiB in VA space
> can be loaded anywhere above 16MiB in physical space.

Right.

> KERNEL_IMAGE_SIZE is _not_ the size of the kernel image, the name is
> misleading.

So that needs fixing too, I guess.

> It is the maximum VA that the kernel can occupy, it is used
> to prepopulate the PMD-level pagetable for initial boot (level2_kernel_pgt)
> and is also used to define MODULES_VADDR, so it _is_ talking about
> mappings. If you have a 30MiB kernel that is placed at a starting VA of
> 510MiB when KERNEL_IMAGE_SIZE is 512MiB, it won't boot.

... because not the whole kernel will be mapped, sure. There's a comment
above KERNEL_IMAGE_SIZE which could use some of that explanation.

> Increasing vmlinux size can trigger the problem by pushing _end
> beyond KERNEL_IMAGE_SIZE, but the problem occurs once _end -
> __START_KERNEL_map exceeds KERNEL_IMAGE_SIZE, not when _end - _text
> exceeds it, hence this patch.

Understood - in both cases, once _end goes beyond the 512MiB end of the
PMD mapping, we've lost. Please add that part to the commit message too
because we will forget.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-28 19:43         ` Borislav Petkov
@ 2020-10-28 20:45           ` Arvind Sankar
  2020-10-29 16:19             ` [PATCH v2] " Arvind Sankar
  2020-10-29 20:05             ` [PATCH] " Borislav Petkov
  0 siblings, 2 replies; 10+ messages in thread
From: Arvind Sankar @ 2020-10-28 20:45 UTC (permalink / raw)
  To: Borislav Petkov; +Cc: Arvind Sankar, x86, linux-kernel

On Wed, Oct 28, 2020 at 08:43:55PM +0100, Borislav Petkov wrote:
> On Wed, Oct 28, 2020 at 12:45:51PM -0400, Arvind Sankar wrote:
> > You don't want to try to run the kernel from physical address 0 in any
> > case. The default is set to 16MiB to avoid low memory, historically to
> > avoid the 24-bit ISA DMA range.
> 
> Sure, that's why I wrote:
> 
> "... so I guess this should be a range > 0 specification but I guess not
> important."
> 
> So how about a sentence or two alluding to that fact in the help text of
> that option?

It's mentioned in the commit message for ceefccc93932, but yeah, it
would be useful to have in the help text I guess. But that's not really
related to this patch.

> 
> > This doesn't matter for the 64-bit kernel, which can be run from any
> > physical address independent of the RELOCATABLE/PHYSICAL_START settings.
> > It only matters on 32-bit, where VA and PA are tied together by
> > 	VA == __PAGE_OFFSET + PA
> 
> You mean the kernel text mapping I assume because we do
> 
> #define __va(x)                 ((void *)((unsigned long)(x)+PAGE_OFFSET))
> 
> on 64-bit too but that's the direct mapping of all physical memory.

Yes, I meant the virtual addresses of the kernel symbols: the 32-bit
kernel needs relocation processing to be loaded at a different physical
address, but the 64-bit kernel doesn't unless the virtual address is
also being changed.

> 
> > KERNEL_IMAGE_SIZE is _not_ the size of the kernel image, the name is
> > misleading.
> 
> So that needs fixing too, I guess.

It's become ABI I think: looks like it's included by that name in
vmcoreinfo for kexec crash dumps.

> 
> > It is the maximum VA that the kernel can occupy, it is used
> > to prepopulate the PMD-level pagetable for initial boot (level2_kernel_pgt)
> > and is also used to define MODULES_VADDR, so it _is_ talking about
> > mappings. If you have a 30MiB kernel that is placed at a starting VA of
> > 510MiB when KERNEL_IMAGE_SIZE is 512MiB, it won't boot.
> 
> ... because not the whole kernel will be mapped, sure. There's a comment
> above KERNEL_IMAGE_SIZE which could use some of that explanation.

Hm, it also looks like KERNEL_IMAGE_SIZE is entirely unused on 32-bit
except for this linker script check and for KASLR. I'll do a v2 cleaning
up those comments.

> 
> > Increasing vmlinux size can trigger the problem by pushing _end
> > beyond KERNEL_IMAGE_SIZE, but the problem occurs once _end -
> > __START_KERNEL_map exceeds KERNEL_IMAGE_SIZE, not when _end - _text
> > exceeds it, hence this patch.
> 
> Understood - in both cases, once _end goes beyond the 512MiB end of the
> PMD mapping, we've lost. Please add that part to the commit message too
> because we will forget.
> 

That's what this bit in the commit message was trying to explain:
  The check uses (_end - _text), but this is not enough. The initial PMD
  used in startup_64() (level2_kernel_pgt) can only map upto
  KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2] x86/build: Fix vmlinux size check on 64-bit
  2020-10-28 20:45           ` Arvind Sankar
@ 2020-10-29 16:19             ` Arvind Sankar
  2020-10-29 21:02               ` [tip: x86/build] " tip-bot2 for Arvind Sankar
  2020-10-29 20:05             ` [PATCH] " Borislav Petkov
  1 sibling, 1 reply; 10+ messages in thread
From: Arvind Sankar @ 2020-10-29 16:19 UTC (permalink / raw)
  To: Borislav Petkov, x86; +Cc: linux-kernel

Commit b4e0409a36f4 ("x86: check vmlinux limits, 64-bit") added a check
that the size of the 64-bit kernel is less than KERNEL_IMAGE_SIZE.

The check uses (_end - _text), but this is not enough. The initial PMD
used in startup_64() (level2_kernel_pgt) can only map upto
KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text, and the
modules area (MODULES_VADDR) starts at KERNEL_IMAGE_SIZE.

The correct check is what is currently done for 32-bit, since
LOAD_OFFSET is defined appropriately for the two architectures. Just
check (_end - LOAD_OFFSET) against KERNEL_IMAGE_SIZE unconditionally.

Note that on 32-bit, the limit is not strict: KERNEL_IMAGE_SIZE is not
really used by the main kernel. The higher the kernel is located, the
less the space available for the vmalloc area. However, it is used by
KASLR in the compressed stub to limit the maximum address of the kernel
to a safe value.

Clean up various comments to clarify that despite the name,
KERNEL_IMAGE_SIZE is not a limit on the size of the kernel image, but a
limit on the maximum virtual address that the image can occupy.

Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
---
 arch/x86/include/asm/page_32_types.h |  8 +++++++-
 arch/x86/include/asm/page_64_types.h |  6 ++++--
 arch/x86/include/asm/pgtable_32.h    | 18 ++++++------------
 arch/x86/kernel/head_64.S            | 20 +++++++++-----------
 arch/x86/kernel/vmlinux.lds.S        | 11 ++---------
 5 files changed, 28 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index f462895a33e4..faf9cc1c14bb 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -53,7 +53,13 @@
 #define STACK_TOP_MAX		STACK_TOP
 
 /*
- * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself. On
+ * 32-bit, this is not a strict limit, but this value is used to limit the
+ * link-time virtual address range of the kernel, and by KASLR to limit the
+ * randomized address from which the kernel is executed. A relocatable kernel
+ * can be loaded somewhat higher than KERNEL_IMAGE_SIZE as long as enough space
+ * remains for the vmalloc area.
  */
 #define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
 
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 3f49dac03617..645bd1d0ee07 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -98,8 +98,10 @@
 #define STACK_TOP_MAX		TASK_SIZE_MAX
 
 /*
- * Maximum kernel image size is limited to 1 GiB, due to the fixmap living
- * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S).
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself.
+ * This can be at most 1 GiB, due to the fixmap living in the next 1 GiB (see
+ * level2_kernel_pgt in arch/x86/kernel/head_64.S).
  *
  * On KASLR use 1 GiB by default, leaving 1 GiB for modules once the
  * page tables are fully set up.
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index d7acae4120d5..7c9c968a42ef 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -57,19 +57,13 @@ do {						\
 #endif
 
 /*
- * This is how much memory in addition to the memory covered up to
- * and including _end we need mapped initially.
- * We need:
- *     (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
- *     (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
+ * This is used to calculate the .brk reservation for initial pagetables.
+ * Enough space is reserved to allocate pagetables sufficient to cover all
+ * of LOWMEM_PAGES, which is an upper bound on the size of the direct map of
+ * lowmem.
  *
- * Modulo rounding, each megabyte assigned here requires a kilobyte of
- * memory, which is currently unreclaimed.
- *
- * This should be a multiple of a page.
- *
- * KERNEL_IMAGE_SIZE should be greater than pa(_end)
- * and small than max_low_pfn, otherwise will waste some page table entries
+ * With PAE paging (PTRS_PER_PMD > 1), we allocate PTRS_PER_PGD == 4 pages for
+ * the PMD's in addition to the pages required for the last level pagetables.
  */
 #if PTRS_PER_PMD > 1
 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7eb2a1c87969..d41fa5bb77fe 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -524,21 +524,19 @@ SYM_DATA_END(level3_kernel_pgt)
 
 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
 	/*
-	 * 512 MB kernel mapping. We spend a full page on this pagetable
-	 * anyway.
+	 * Kernel high mapping.
 	 *
-	 * The kernel code+data+bss must not be bigger than that.
+	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+	 * 512 MiB otherwise.
 	 *
-	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
-	 *  If you want to increase this then increase MODULES_VADDR
-	 *  too.)
+	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
 	 *
-	 *  This table is eventually used by the kernel during normal
-	 *  runtime.  Care must be taken to clear out undesired bits
-	 *  later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
+	 * This table is eventually used by the kernel during normal runtime.
+	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
+	 * or _PAGE_GLOBAL in some cases.
 	 */
-	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
-		KERNEL_IMAGE_SIZE/PMD_SIZE)
+	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
 SYM_DATA_END(level2_kernel_pgt)
 
 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf9e0adb5b7e..b38832821b98 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -454,13 +454,12 @@ SECTIONS
 	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
 }
 
-#ifdef CONFIG_X86_32
 /*
  * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
  */
 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-#else
+#ifdef CONFIG_X86_64
 /*
  * Per-cpu symbols which need to be offset from __per_cpu_load
  * for the boot processor.
@@ -470,18 +469,12 @@ INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(fixed_percpu_data);
 INIT_PER_CPU(irq_stack_backing_store);
 
-/*
- * Build-time check on the image size:
- */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
-	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-
 #ifdef CONFIG_SMP
 . = ASSERT((fixed_percpu_data == 0),
            "fixed_percpu_data is not at start of per-cpu area");
 #endif
 
-#endif /* CONFIG_X86_32 */
+#endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_KEXEC_CORE
 #include <asm/kexec.h>
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH] x86/build: Fix vmlinux size check on 64-bit
  2020-10-28 20:45           ` Arvind Sankar
  2020-10-29 16:19             ` [PATCH v2] " Arvind Sankar
@ 2020-10-29 20:05             ` Borislav Petkov
  1 sibling, 0 replies; 10+ messages in thread
From: Borislav Petkov @ 2020-10-29 20:05 UTC (permalink / raw)
  To: Arvind Sankar; +Cc: x86, linux-kernel

On Wed, Oct 28, 2020 at 04:45:49PM -0400, Arvind Sankar wrote:
> It's become ABI I think: looks like it's included by that name in
> vmcoreinfo for kexec crash dumps.

Yeah, last time we had the ABI discussion we agreed with the kexec/crash
folks that this is not an ABI and that crash is "tied" more or less to a
kernel version. So I wouldn't worry about that.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [tip: x86/build] x86/build: Fix vmlinux size check on 64-bit
  2020-10-29 16:19             ` [PATCH v2] " Arvind Sankar
@ 2020-10-29 21:02               ` tip-bot2 for Arvind Sankar
  0 siblings, 0 replies; 10+ messages in thread
From: tip-bot2 for Arvind Sankar @ 2020-10-29 21:02 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: Arvind Sankar, Borislav Petkov, x86, LKML

The following commit has been merged into the x86/build branch of tip:

Commit-ID:     ea3186b9572a1b0299448697cfc44920061872cf
Gitweb:        https://git.kernel.org/tip/ea3186b9572a1b0299448697cfc44920061872cf
Author:        Arvind Sankar <nivedita@alum.mit.edu>
AuthorDate:    Thu, 29 Oct 2020 12:19:03 -04:00
Committer:     Borislav Petkov <bp@suse.de>
CommitterDate: Thu, 29 Oct 2020 21:54:35 +01:00

x86/build: Fix vmlinux size check on 64-bit

Commit

  b4e0409a36f4 ("x86: check vmlinux limits, 64-bit")

added a check that the size of the 64-bit kernel is less than
KERNEL_IMAGE_SIZE.

The check uses (_end - _text), but this is not enough. The initial
PMD used in startup_64() (level2_kernel_pgt) can only map upto
KERNEL_IMAGE_SIZE from __START_KERNEL_map, not from _text, and the
modules area (MODULES_VADDR) starts at KERNEL_IMAGE_SIZE.

The correct check is what is currently done for 32-bit, since
LOAD_OFFSET is defined appropriately for the two architectures. Just
check (_end - LOAD_OFFSET) against KERNEL_IMAGE_SIZE unconditionally.

Note that on 32-bit, the limit is not strict: KERNEL_IMAGE_SIZE is not
really used by the main kernel. The higher the kernel is located, the
less the space available for the vmalloc area. However, it is used by
KASLR in the compressed stub to limit the maximum address of the kernel
to a safe value.

Clean up various comments to clarify that despite the name,
KERNEL_IMAGE_SIZE is not a limit on the size of the kernel image, but a
limit on the maximum virtual address that the image can occupy.

Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20201029161903.2553528-1-nivedita@alum.mit.edu
---
 arch/x86/include/asm/page_32_types.h |  8 +++++++-
 arch/x86/include/asm/page_64_types.h |  6 ++++--
 arch/x86/include/asm/pgtable_32.h    | 18 ++++++------------
 arch/x86/kernel/head_64.S            | 20 +++++++++-----------
 arch/x86/kernel/vmlinux.lds.S        | 12 +++---------
 5 files changed, 29 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index f462895..faf9cc1 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -53,7 +53,13 @@
 #define STACK_TOP_MAX		STACK_TOP
 
 /*
- * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself. On
+ * 32-bit, this is not a strict limit, but this value is used to limit the
+ * link-time virtual address range of the kernel, and by KASLR to limit the
+ * randomized address from which the kernel is executed. A relocatable kernel
+ * can be loaded somewhat higher than KERNEL_IMAGE_SIZE as long as enough space
+ * remains for the vmalloc area.
  */
 #define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
 
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 3f49dac..645bd1d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -98,8 +98,10 @@
 #define STACK_TOP_MAX		TASK_SIZE_MAX
 
 /*
- * Maximum kernel image size is limited to 1 GiB, due to the fixmap living
- * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S).
+ * In spite of the name, KERNEL_IMAGE_SIZE is a limit on the maximum virtual
+ * address for the kernel image, rather than the limit on the size itself.
+ * This can be at most 1 GiB, due to the fixmap living in the next 1 GiB (see
+ * level2_kernel_pgt in arch/x86/kernel/head_64.S).
  *
  * On KASLR use 1 GiB by default, leaving 1 GiB for modules once the
  * page tables are fully set up.
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index d7acae4..7c9c968 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -57,19 +57,13 @@ do {						\
 #endif
 
 /*
- * This is how much memory in addition to the memory covered up to
- * and including _end we need mapped initially.
- * We need:
- *     (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
- *     (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
+ * This is used to calculate the .brk reservation for initial pagetables.
+ * Enough space is reserved to allocate pagetables sufficient to cover all
+ * of LOWMEM_PAGES, which is an upper bound on the size of the direct map of
+ * lowmem.
  *
- * Modulo rounding, each megabyte assigned here requires a kilobyte of
- * memory, which is currently unreclaimed.
- *
- * This should be a multiple of a page.
- *
- * KERNEL_IMAGE_SIZE should be greater than pa(_end)
- * and small than max_low_pfn, otherwise will waste some page table entries
+ * With PAE paging (PTRS_PER_PMD > 1), we allocate PTRS_PER_PGD == 4 pages for
+ * the PMD's in addition to the pages required for the last level pagetables.
  */
 #if PTRS_PER_PMD > 1
 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7eb2a1c..d41fa5b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -524,21 +524,19 @@ SYM_DATA_END(level3_kernel_pgt)
 
 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
 	/*
-	 * 512 MB kernel mapping. We spend a full page on this pagetable
-	 * anyway.
+	 * Kernel high mapping.
 	 *
-	 * The kernel code+data+bss must not be bigger than that.
+	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
+	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
+	 * 512 MiB otherwise.
 	 *
-	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
-	 *  If you want to increase this then increase MODULES_VADDR
-	 *  too.)
+	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
 	 *
-	 *  This table is eventually used by the kernel during normal
-	 *  runtime.  Care must be taken to clear out undesired bits
-	 *  later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
+	 * This table is eventually used by the kernel during normal runtime.
+	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
+	 * or _PAGE_GLOBAL in some cases.
 	 */
-	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
-		KERNEL_IMAGE_SIZE/PMD_SIZE)
+	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
 SYM_DATA_END(level2_kernel_pgt)
 
 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index bf9e0ad..efd9e9e 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -454,13 +454,13 @@ SECTIONS
 	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
 }
 
-#ifdef CONFIG_X86_32
 /*
  * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
  */
 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-#else
+
+#ifdef CONFIG_X86_64
 /*
  * Per-cpu symbols which need to be offset from __per_cpu_load
  * for the boot processor.
@@ -470,18 +470,12 @@ INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(fixed_percpu_data);
 INIT_PER_CPU(irq_stack_backing_store);
 
-/*
- * Build-time check on the image size:
- */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
-	   "kernel image bigger than KERNEL_IMAGE_SIZE");
-
 #ifdef CONFIG_SMP
 . = ASSERT((fixed_percpu_data == 0),
            "fixed_percpu_data is not at start of per-cpu area");
 #endif
 
-#endif /* CONFIG_X86_32 */
+#endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_KEXEC_CORE
 #include <asm/kexec.h>

^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2020-10-29 21:02 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-05 15:15 [PATCH] x86/build: Fix vmlinux size check on 64-bit Arvind Sankar
2020-10-27 20:08 ` Borislav Petkov
2020-10-27 21:14   ` Arvind Sankar
2020-10-28 13:39     ` Borislav Petkov
2020-10-28 16:45       ` Arvind Sankar
2020-10-28 19:43         ` Borislav Petkov
2020-10-28 20:45           ` Arvind Sankar
2020-10-29 16:19             ` [PATCH v2] " Arvind Sankar
2020-10-29 21:02               ` [tip: x86/build] " tip-bot2 for Arvind Sankar
2020-10-29 20:05             ` [PATCH] " Borislav Petkov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).