* [PATCH v2 0/3] Have CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE
@ 2017-07-31 12:11 Balbir Singh
2017-07-31 12:11 ` [PATCH v2 1/3] powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX Balbir Singh
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Balbir Singh @ 2017-07-31 12:11 UTC (permalink / raw)
To: linuxppc-dev
These patches make CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE
The first patch splits up the radix linear mapping nicely on relocation
to support granular read-only and execution bits. The second patch warns
if relocation is actually done (PHYSICAL_START > MEMORY_START), we do
best effort support of expected permissions. We could do more granular
linear mapping, but we decided to leave it as a TODO (to check for
performance/MPSS/etc).
The last patch changes the config so that we are no longer dependent on
!RELOCATABLE for CONFIG_STRICT_KERNEL_RWX feature.
Changelog v2
- Rebase on top of the changes made in v4.13
- Move hash tables to IS_ALIGNED logic
Balbir Singh (3):
powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX
powerpc/mm/hash: WARN if relocation is enabled and
CONFIG_STRICT_KERNEL_RWX
powerpc/strict_kernel_rwx: Don't depend on !RELOCATABLE
arch/powerpc/Kconfig | 2 +-
arch/powerpc/mm/pgtable-hash64.c | 28 +++++-
arch/powerpc/mm/pgtable-radix.c | 183 ++++++++++++++++++++++++++++++++-------
3 files changed, 178 insertions(+), 35 deletions(-)
--
2.9.4
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 1/3] powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX
2017-07-31 12:11 [PATCH v2 0/3] Have CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE Balbir Singh
@ 2017-07-31 12:11 ` Balbir Singh
2017-07-31 12:11 ` [PATCH v2 2/3] powerpc/mm/hash: WARN if relocation is enabled and CONFIG_STRICT_KERNEL_RWX Balbir Singh
2017-07-31 12:11 ` [PATCH v2 3/3] powerpc/strict_kernel_rwx: Don't depend on !RELOCATABLE Balbir Singh
2 siblings, 0 replies; 4+ messages in thread
From: Balbir Singh @ 2017-07-31 12:11 UTC (permalink / raw)
To: linuxppc-dev
The mappings now do perfect kernel pte mappings even when the
kernel is relocated. This patch refactors create_physical_mapping()
and mark_rodata_ro(). create_physical_mapping() is now largely done with
a helper called __create_physical_mapping(), which is defined differently
for when CONFIG_STRICT_KERNEL_RWX is enabled and when its not.
The goal of the patchset is to provide minimal changes when the
CONFIG_STRICT_KERNEL_RWX is disabled, when enabled however, we do
split the linear mapping so that permissions are strictly adherent
to expectations from the user.
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
arch/powerpc/mm/pgtable-radix.c | 183 +++++++++++++++++++++++++++++++++-------
1 file changed, 151 insertions(+), 32 deletions(-)
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 671a45d..6e0176d 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -164,8 +164,14 @@ void radix__mark_rodata_ro(void)
end = (unsigned long)__init_begin;
radix__change_memory_range(start, end, _PAGE_WRITE);
+
+ start = (unsigned long)__start_interrupts - PHYSICAL_START;
+ end = (unsigned long)__end_interrupts - PHYSICAL_START;
+
+ radix__change_memory_range(start, end, _PAGE_WRITE);
}
+
void radix__mark_initmem_nx(void)
{
unsigned long start = (unsigned long)__init_begin;
@@ -173,6 +179,7 @@ void radix__mark_initmem_nx(void)
radix__change_memory_range(start, end, _PAGE_EXEC);
}
+
#endif /* CONFIG_STRICT_KERNEL_RWX */
static inline void __meminit print_mapping(unsigned long start,
@@ -185,31 +192,36 @@ static inline void __meminit print_mapping(unsigned long start,
pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
}
-static int __meminit create_physical_mapping(unsigned long start,
- unsigned long end)
+/*
+ * Create physical mapping and return the last mapping size
+ * If the call is successful, end_of_mapping will return the
+ * last address mapped via this call, if not, it will leave
+ * the value untouched.
+ */
+static int __meminit __create_physical_mapping(unsigned long vstart,
+ unsigned long vend, pgprot_t prot,
+ unsigned long *end_of_mapping)
{
- unsigned long vaddr, addr, mapping_size = 0;
- pgprot_t prot;
- unsigned long max_mapping_size;
-#ifdef CONFIG_STRICT_KERNEL_RWX
- int split_text_mapping = 1;
-#else
- int split_text_mapping = 0;
-#endif
+ unsigned long mapping_size = 0;
+ static unsigned long previous_size;
+ unsigned long addr, start, end;
+ start = __pa(vstart);
+ end = __pa(vend);
start = _ALIGN_UP(start, PAGE_SIZE);
+
+ pr_devel("physical_mapping start %lx->%lx, prot %lx\n",
+ vstart, vend, pgprot_val(prot));
+
for (addr = start; addr < end; addr += mapping_size) {
- unsigned long gap, previous_size;
+ unsigned long gap;
int rc;
gap = end - addr;
previous_size = mapping_size;
- max_mapping_size = PUD_SIZE;
-retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
- mmu_psize_defs[MMU_PAGE_1G].shift &&
- PUD_SIZE <= max_mapping_size)
+ mmu_psize_defs[MMU_PAGE_1G].shift)
mapping_size = PUD_SIZE;
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift)
@@ -217,40 +229,147 @@ static int __meminit create_physical_mapping(unsigned long start,
else
mapping_size = PAGE_SIZE;
- if (split_text_mapping && (mapping_size == PUD_SIZE) &&
- (addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext)) {
- max_mapping_size = PMD_SIZE;
- goto retry;
+ if (previous_size != mapping_size) {
+ print_mapping(start, addr, previous_size);
+ start = addr;
+ previous_size = mapping_size;
}
- if (split_text_mapping && (mapping_size == PMD_SIZE) &&
- (addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext))
- mapping_size = PAGE_SIZE;
+ rc = radix__map_kernel_page((unsigned long)__va(addr), addr,
+ prot, mapping_size);
+ if (rc)
+ return rc;
+ }
- if (mapping_size != previous_size) {
- print_mapping(start, addr, previous_size);
- start = addr;
+ print_mapping(start, addr, mapping_size);
+ *end_of_mapping = (unsigned long)__va(addr);
+ return 0;
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static int __meminit create_physical_mapping(unsigned long start,
+ unsigned long end)
+{
+ pgprot_t prot;
+ unsigned long rc;
+ unsigned long vstart, vend;
+ unsigned long gap;
+ unsigned long st = (unsigned long)_stext;
+ unsigned long ie = (unsigned long)__init_end;
+ unsigned long ib = (unsigned long)__init_begin;
+ unsigned long si = (unsigned long)__start_interrupts - PHYSICAL_START;
+ unsigned long ei = (unsigned long)__end_interrupts - PHYSICAL_START;
+
+
+ start = _ALIGN_UP(start, PAGE_SIZE);
+ vstart = (unsigned long)__va(start);
+ vend = (unsigned long)__va(end);
+
+ while (vstart < vend) {
+ if ((PHYSICAL_START > MEMORY_START) &&
+ (overlaps_interrupt_vector_text(vstart, vend))) {
+ /*
+ * Is there a gap between start and start of interrupts.
+ * We need to care for PHYSICAL_START here since we need
+ * to nail down __start_interrupts..__end_interrupts as
+ * physical offsets from 0.
+ */
+ gap = _ALIGN_DOWN(si, PAGE_SIZE) - vstart;
+ if (gap > PAGE_SIZE) {
+ prot = PAGE_KERNEL;
+ rc = __create_physical_mapping(vstart, si, prot,
+ &vstart);
+ if (rc)
+ return rc;
+ }
+
+ prot = PAGE_KERNEL_X;
+ rc = __create_physical_mapping(vstart, ei, prot,
+ &vstart);
+ if (rc)
+ return rc;
}
- vaddr = (unsigned long)__va(addr);
+ if (overlaps_kernel_text(vstart, vend)) {
+
+ gap = _ALIGN_DOWN(st, PAGE_SIZE) - vstart;
+ if (gap > PAGE_SIZE) {
+ prot = PAGE_KERNEL;
+ rc = __create_physical_mapping(vstart, st,
+ prot, &vstart);
+ if (rc)
+ return rc;
+ }
- if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
- overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
+ /*
+ * __init_begin/end are special,they are marked
+ * executable but we'll turn rw off until __init_begin
+ * and if the mapping is not split here, it will spill
+ * over up to * __init_end and allocations from that
+ * region will find read-only permissions
+ */
+ prot = PAGE_KERNEL_X;
+ rc = __create_physical_mapping(vstart, ib, prot,
+ &vstart);
+ if (rc)
+ return rc;
+
+ rc = __create_physical_mapping(vstart, ie, prot,
+ &vstart);
+ if (rc)
+ return rc;
+ }
+
+ prot = PAGE_KERNEL;
+ rc = __create_physical_mapping(vstart, vend, prot, &vstart);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+#else /* !CONFIG_STRICT_KERNEL_RWX */
+
+static int __meminit create_physical_mapping(unsigned long start,
+ unsigned long end)
+{
+ pgprot_t prot;
+ unsigned long rc;
+ unsigned long vstart, vend;
+ unsigned long mapping_size;
+
+
+ start = _ALIGN_UP(start, PAGE_SIZE);
+ vstart = (unsigned long)__va(start);
+ vend = (unsigned long)__va(end);
+
+ while (vstart < vend) {
+ /*
+ * STRICT_KERNEL_RWX is off, but we can't map all of
+ * vstart--vend as * executable, lets split vend into
+ * mapping_size and try
+ */
+ mapping_size = min(vend - vstart, PUD_SIZE);
+
+ if (overlaps_kernel_text(vstart, vstart + mapping_size) ||
+ overlaps_interrupt_vector_text(vstart,
+ vstart + mapping_size))
prot = PAGE_KERNEL_X;
else
prot = PAGE_KERNEL;
- rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
+ rc = __create_physical_mapping(vstart, vstart + mapping_size,
+ prot, &vstart);
if (rc)
return rc;
}
- print_mapping(start, addr, mapping_size);
return 0;
}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
static void __init radix_init_pgtable(void)
{
unsigned long rts_field;
--
2.9.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 2/3] powerpc/mm/hash: WARN if relocation is enabled and CONFIG_STRICT_KERNEL_RWX
2017-07-31 12:11 [PATCH v2 0/3] Have CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE Balbir Singh
2017-07-31 12:11 ` [PATCH v2 1/3] powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX Balbir Singh
@ 2017-07-31 12:11 ` Balbir Singh
2017-07-31 12:11 ` [PATCH v2 3/3] powerpc/strict_kernel_rwx: Don't depend on !RELOCATABLE Balbir Singh
2 siblings, 0 replies; 4+ messages in thread
From: Balbir Singh @ 2017-07-31 12:11 UTC (permalink / raw)
To: linuxppc-dev
For radix we split the mapping into smaller page sizes (at the cost of
additional TLB overhead), but for hash its best to print a warning. In
the case of hash and no-relocation, the kernel should be well aligned
to provide the least overhead with the current linear mapping size (16M)
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
arch/powerpc/mm/pgtable-hash64.c | 28 ++++++++++++++++++++++++++--
1 file changed, 26 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 443a2c6..656f7f3 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -434,8 +434,26 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
shift = mmu_psize_defs[mmu_linear_psize].shift;
step = 1 << shift;
- start = ALIGN_DOWN(start, step);
- end = ALIGN(end, step); // aligns up
+ if (!IS_ALIGNED(PHYSICAL_START, step)) {
+ /*
+ * For the relocatable case we might have
+ * a case where _stext shares the page
+ * with rw memory or __init_begin might
+ * share the page with executable text.
+ * This breaks strict RWX, but allows the
+ * kernel to boot. If PHYSICAL_START is mmu_linear_psize
+ * aligned, then we can continue to make the same
+ * assumptions as the non-relocatable case.
+ *
+ * TODO: If we really care about the relocatable
+ * case, we can align __init_begin/end better.
+ */
+ start = ALIGN(start, step);
+ end = ALIGN_DOWN(end, step);
+ } else {
+ start = ALIGN_DOWN(start, step);
+ end = ALIGN(end, step); /* Aligns up */
+ }
if (start >= end)
return false;
@@ -455,6 +473,12 @@ void hash__mark_rodata_ro(void)
{
unsigned long start, end;
+ if (PHYSICAL_START > MEMORY_START)
+ pr_warn("Detected relocation and CONFIG_STRICT_KERNEL_RWX "
+ "permissions are best effort, some non-text area "
+ "might still be left as executable");
+
+
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
--
2.9.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v2 3/3] powerpc/strict_kernel_rwx: Don't depend on !RELOCATABLE
2017-07-31 12:11 [PATCH v2 0/3] Have CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE Balbir Singh
2017-07-31 12:11 ` [PATCH v2 1/3] powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX Balbir Singh
2017-07-31 12:11 ` [PATCH v2 2/3] powerpc/mm/hash: WARN if relocation is enabled and CONFIG_STRICT_KERNEL_RWX Balbir Singh
@ 2017-07-31 12:11 ` Balbir Singh
2 siblings, 0 replies; 4+ messages in thread
From: Balbir Singh @ 2017-07-31 12:11 UTC (permalink / raw)
To: linuxppc-dev
The concerns with extra permissions and overlap have been
address, remove the dependency on !RELOCTABLE
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
---
arch/powerpc/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 36f858c..b5b8ba8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -165,7 +165,7 @@ config PPC
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S_64 && !RELOCATABLE && !HIBERNATION)
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S_64 && !HIBERNATION)
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_CBPF_JIT if !PPC64
select HAVE_CONTEXT_TRACKING if PPC64
--
2.9.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2017-07-31 12:12 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-31 12:11 [PATCH v2 0/3] Have CONFIG_STRICT_KERNEL_RWX work with CONFIG_RELOCATABLE Balbir Singh
2017-07-31 12:11 ` [PATCH v2 1/3] powerpc/mm/radix: Fix relocatable radix mappings for STRICT_RWX Balbir Singh
2017-07-31 12:11 ` [PATCH v2 2/3] powerpc/mm/hash: WARN if relocation is enabled and CONFIG_STRICT_KERNEL_RWX Balbir Singh
2017-07-31 12:11 ` [PATCH v2 3/3] powerpc/strict_kernel_rwx: Don't depend on !RELOCATABLE Balbir Singh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).