All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Yan <yanaijie@huawei.com>
To: <mpe@ellerman.id.au>, <linuxppc-dev@lists.ozlabs.org>,
	<diana.craciun@nxp.com>, <christophe.leroy@c-s.fr>,
	<benh@kernel.crashing.org>, <paulus@samba.org>,
	<npiggin@gmail.com>, <keescook@chromium.org>,
	<kernel-hardening@lists.openwall.com>, <oss@buserror.net>
Cc: <linux-kernel@vger.kernel.org>, <zhaohongjiang@huawei.com>,
	Jason Yan <yanaijie@huawei.com>
Subject: [PATCH v3 1/6] powerpc/fsl_booke/kaslr: refactor kaslr_legal_offset() and kaslr_early_init()
Date: Thu, 6 Feb 2020 10:58:20 +0800	[thread overview]
Message-ID: <20200206025825.22934-2-yanaijie@huawei.com> (raw)
In-Reply-To: <20200206025825.22934-1-yanaijie@huawei.com>

Some code refactor in kaslr_legal_offset() and kaslr_early_init(). No
functional change. This is a preparation for KASLR fsl_booke64.

Signed-off-by: Jason Yan <yanaijie@huawei.com>
Cc: Scott Wood <oss@buserror.net>
Cc: Diana Craciun <diana.craciun@nxp.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
---
 arch/powerpc/mm/nohash/kaslr_booke.c | 40 ++++++++++++++--------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e..07b036e98353 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -25,6 +25,7 @@ struct regions {
 	unsigned long pa_start;
 	unsigned long pa_end;
 	unsigned long kernel_size;
+	unsigned long linear_sz;
 	unsigned long dtb_start;
 	unsigned long dtb_end;
 	unsigned long initrd_start;
@@ -260,11 +261,23 @@ static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
 		*size_cells = fdt32_to_cpu(*prop);
 }
 
-static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
-					       unsigned long offset)
+static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long random)
 {
 	unsigned long koffset = 0;
 	unsigned long start;
+	unsigned long index;
+	unsigned long offset;
+
+	/*
+	 * Decide which 64M we want to start
+	 * Only use the low 8 bits of the random seed
+	 */
+	index = random & 0xFF;
+	index %= regions.linear_sz / SZ_64M;
+
+	/* Decide offset inside 64M */
+	offset = random % (SZ_64M - regions.kernel_size);
+	offset = round_down(offset, SZ_16K);
 
 	while ((long)index >= 0) {
 		offset = memstart_addr + index * SZ_64M + offset;
@@ -289,10 +302,9 @@ static inline __init bool kaslr_disabled(void)
 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
 						  unsigned long kernel_sz)
 {
-	unsigned long offset, random;
+	unsigned long random;
 	unsigned long ram, linear_sz;
 	u64 seed;
-	unsigned long index;
 
 	kaslr_get_cmdline(dt_ptr);
 	if (kaslr_disabled())
@@ -333,22 +345,12 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
 	regions.dtb_start = __pa(dt_ptr);
 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
 	regions.kernel_size = kernel_sz;
+	regions.linear_sz = linear_sz;
 
 	get_initrd_range(dt_ptr);
 	get_crash_kernel(dt_ptr, ram);
 
-	/*
-	 * Decide which 64M we want to start
-	 * Only use the low 8 bits of the random seed
-	 */
-	index = random & 0xFF;
-	index %= linear_sz / SZ_64M;
-
-	/* Decide offset inside 64M */
-	offset = random % (SZ_64M - kernel_sz);
-	offset = round_down(offset, SZ_16K);
-
-	return kaslr_legal_offset(dt_ptr, index, offset);
+	return kaslr_legal_offset(dt_ptr, random);
 }
 
 /*
@@ -358,8 +360,6 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
  */
 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
 {
-	unsigned long tlb_virt;
-	phys_addr_t tlb_phys;
 	unsigned long offset;
 	unsigned long kernel_sz;
 
@@ -375,8 +375,8 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
 	is_second_reloc = 1;
 
 	if (offset >= SZ_64M) {
-		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
-		tlb_phys = round_down(kernstart_addr, SZ_64M);
+		unsigned long tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
+		phys_addr_t tlb_phys = round_down(kernstart_addr, SZ_64M);
 
 		/* Create kernel map to relocate in */
 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
-- 
2.17.2


WARNING: multiple messages have this Message-ID (diff)
From: Jason Yan <yanaijie@huawei.com>
To: <mpe@ellerman.id.au>, <linuxppc-dev@lists.ozlabs.org>,
	<diana.craciun@nxp.com>, <christophe.leroy@c-s.fr>,
	<benh@kernel.crashing.org>, <paulus@samba.org>,
	<npiggin@gmail.com>, <keescook@chromium.org>,
	<kernel-hardening@lists.openwall.com>, <oss@buserror.net>
Cc: Jason Yan <yanaijie@huawei.com>,
	linux-kernel@vger.kernel.org, zhaohongjiang@huawei.com
Subject: [PATCH v3 1/6] powerpc/fsl_booke/kaslr: refactor kaslr_legal_offset() and kaslr_early_init()
Date: Thu, 6 Feb 2020 10:58:20 +0800	[thread overview]
Message-ID: <20200206025825.22934-2-yanaijie@huawei.com> (raw)
In-Reply-To: <20200206025825.22934-1-yanaijie@huawei.com>

Some code refactor in kaslr_legal_offset() and kaslr_early_init(). No
functional change. This is a preparation for KASLR fsl_booke64.

Signed-off-by: Jason Yan <yanaijie@huawei.com>
Cc: Scott Wood <oss@buserror.net>
Cc: Diana Craciun <diana.craciun@nxp.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
---
 arch/powerpc/mm/nohash/kaslr_booke.c | 40 ++++++++++++++--------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e..07b036e98353 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -25,6 +25,7 @@ struct regions {
 	unsigned long pa_start;
 	unsigned long pa_end;
 	unsigned long kernel_size;
+	unsigned long linear_sz;
 	unsigned long dtb_start;
 	unsigned long dtb_end;
 	unsigned long initrd_start;
@@ -260,11 +261,23 @@ static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
 		*size_cells = fdt32_to_cpu(*prop);
 }
 
-static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
-					       unsigned long offset)
+static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long random)
 {
 	unsigned long koffset = 0;
 	unsigned long start;
+	unsigned long index;
+	unsigned long offset;
+
+	/*
+	 * Decide which 64M we want to start
+	 * Only use the low 8 bits of the random seed
+	 */
+	index = random & 0xFF;
+	index %= regions.linear_sz / SZ_64M;
+
+	/* Decide offset inside 64M */
+	offset = random % (SZ_64M - regions.kernel_size);
+	offset = round_down(offset, SZ_16K);
 
 	while ((long)index >= 0) {
 		offset = memstart_addr + index * SZ_64M + offset;
@@ -289,10 +302,9 @@ static inline __init bool kaslr_disabled(void)
 static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
 						  unsigned long kernel_sz)
 {
-	unsigned long offset, random;
+	unsigned long random;
 	unsigned long ram, linear_sz;
 	u64 seed;
-	unsigned long index;
 
 	kaslr_get_cmdline(dt_ptr);
 	if (kaslr_disabled())
@@ -333,22 +345,12 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
 	regions.dtb_start = __pa(dt_ptr);
 	regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
 	regions.kernel_size = kernel_sz;
+	regions.linear_sz = linear_sz;
 
 	get_initrd_range(dt_ptr);
 	get_crash_kernel(dt_ptr, ram);
 
-	/*
-	 * Decide which 64M we want to start
-	 * Only use the low 8 bits of the random seed
-	 */
-	index = random & 0xFF;
-	index %= linear_sz / SZ_64M;
-
-	/* Decide offset inside 64M */
-	offset = random % (SZ_64M - kernel_sz);
-	offset = round_down(offset, SZ_16K);
-
-	return kaslr_legal_offset(dt_ptr, index, offset);
+	return kaslr_legal_offset(dt_ptr, random);
 }
 
 /*
@@ -358,8 +360,6 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
  */
 notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
 {
-	unsigned long tlb_virt;
-	phys_addr_t tlb_phys;
 	unsigned long offset;
 	unsigned long kernel_sz;
 
@@ -375,8 +375,8 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
 	is_second_reloc = 1;
 
 	if (offset >= SZ_64M) {
-		tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
-		tlb_phys = round_down(kernstart_addr, SZ_64M);
+		unsigned long tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
+		phys_addr_t tlb_phys = round_down(kernstart_addr, SZ_64M);
 
 		/* Create kernel map to relocate in */
 		create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
-- 
2.17.2


  reply	other threads:[~2020-02-06  2:59 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-06  2:58 [PATCH v3 0/6] implement KASLR for powerpc/fsl_booke/64 Jason Yan
2020-02-06  2:58 ` Jason Yan
2020-02-06  2:58 ` Jason Yan [this message]
2020-02-06  2:58   ` [PATCH v3 1/6] powerpc/fsl_booke/kaslr: refactor kaslr_legal_offset() and kaslr_early_init() Jason Yan
2020-02-20 13:40   ` Christophe Leroy
2020-02-26  2:11     ` Jason Yan
2020-02-26  2:11       ` Jason Yan
2020-02-06  2:58 ` [PATCH v3 2/6] powerpc/fsl_booke/64: introduce reloc_kernel_entry() helper Jason Yan
2020-02-06  2:58   ` Jason Yan
2020-02-20 13:41   ` Christophe Leroy
2020-02-06  2:58 ` [PATCH v3 3/6] powerpc/fsl_booke/64: implement KASLR for fsl_booke64 Jason Yan
2020-02-06  2:58   ` Jason Yan
2020-02-20 13:48   ` Christophe Leroy
2020-02-26  2:40     ` Jason Yan
2020-02-26  2:40       ` Jason Yan
2020-02-26  3:33       ` Jason Yan
2020-02-26  3:33         ` Jason Yan
2020-02-26  5:04         ` [RFC PATCH] Use IS_ENABLED() instead of #ifdefs Christophe Leroy
2020-02-26  5:04           ` Christophe Leroy
2020-02-26  6:26           ` Jason Yan
2020-02-26  6:26             ` Jason Yan
2020-02-26  5:10         ` [PATCH v3 3/6] powerpc/fsl_booke/64: implement KASLR for fsl_booke64 Christophe Leroy
2020-02-26  5:08       ` Christophe Leroy
2020-03-04 21:44   ` Scott Wood
2020-03-04 21:44     ` Scott Wood
2020-03-05  2:32     ` Jason Yan
2020-03-05  2:32       ` Jason Yan
2020-02-06  2:58 ` [PATCH v3 4/6] powerpc/fsl_booke/64: do not clear the BSS for the second pass Jason Yan
2020-02-06  2:58   ` Jason Yan
2020-03-04 21:49   ` Scott Wood
2020-03-04 21:49     ` Scott Wood
2020-03-05  3:14     ` Jason Yan
2020-03-05  3:14       ` Jason Yan
2020-02-06  2:58 ` [PATCH v3 5/6] powerpc/fsl_booke/64: clear the original kernel if randomized Jason Yan
2020-02-06  2:58   ` Jason Yan
2020-02-20 13:49   ` Christophe Leroy
2020-02-26  2:44     ` Jason Yan
2020-02-26  2:44       ` Jason Yan
2020-03-04 21:53   ` Scott Wood
2020-03-04 21:53     ` Scott Wood
2020-03-05  3:20     ` Jason Yan
2020-03-05  3:20       ` Jason Yan
2020-02-06  2:58 ` [PATCH v3 6/6] powerpc/fsl_booke/kaslr: rename kaslr-booke32.rst to kaslr-booke.rst and add 64bit part Jason Yan
2020-02-06  2:58   ` Jason Yan
2020-02-20 13:50   ` Christophe Leroy
2020-02-26  2:46     ` Jason Yan
2020-02-26  2:46       ` Jason Yan
2020-02-13  3:00 ` [PATCH v3 0/6] implement KASLR for powerpc/fsl_booke/64 Jason Yan
2020-02-13  3:00   ` Jason Yan
2020-02-20  3:33   ` Jason Yan
2020-02-20  3:33     ` Jason Yan
2020-02-26  7:16 ` Daniel Axtens
2020-02-26  7:16   ` Daniel Axtens
2020-02-26  8:18   ` Jason Yan
2020-02-26  8:18     ` Jason Yan
2020-02-26 11:41     ` Daniel Axtens
2020-02-27  1:55       ` Jason Yan
2020-02-27  1:55         ` Jason Yan
2020-02-28  5:53     ` Scott Wood
2020-02-28  5:53       ` Scott Wood
2020-02-28  6:47       ` Jason Yan
2020-02-28  6:47         ` Jason Yan
2020-02-29  4:28         ` Scott Wood
2020-02-29  4:28           ` Scott Wood
2020-02-29  7:27           ` Jason Yan
2020-02-29  7:27             ` Jason Yan
2020-02-29 22:54             ` Scott Wood
2020-02-29 22:54               ` Scott Wood
2020-03-02  2:17               ` Jason Yan
2020-03-02  2:17                 ` Jason Yan
2020-03-02  3:24                 ` Scott Wood
2020-03-02  3:24                   ` Scott Wood
2020-03-02  7:12                   ` Jason Yan
2020-03-02  7:12                     ` Jason Yan
2020-03-02  8:47                     ` Scott Wood
2020-03-02  8:47                       ` Scott Wood
2020-03-02  9:37                       ` Jason Yan
2020-03-02  9:37                         ` Jason Yan
2020-03-04 21:21   ` Scott Wood
2020-03-04 21:21     ` Scott Wood
2020-03-05  3:22     ` Jason Yan
2020-03-05  3:22       ` Jason Yan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200206025825.22934-2-yanaijie@huawei.com \
    --to=yanaijie@huawei.com \
    --cc=benh@kernel.crashing.org \
    --cc=christophe.leroy@c-s.fr \
    --cc=diana.craciun@nxp.com \
    --cc=keescook@chromium.org \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=oss@buserror.net \
    --cc=paulus@samba.org \
    --cc=zhaohongjiang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.