All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Subject: [PATCH v1 12/30] powerpc/vdso: Replace vdso_base by vdso
Date: Sun, 27 Sep 2020 09:16:29 +0000 (UTC)	[thread overview]
Message-ID: <8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu> (raw)
In-Reply-To: <cover.1601197618.git.christophe.leroy@csgroup.eu>

All other architectures but s390 use a void pointer named 'vdso'
to reference the VDSO mapping.

In a following patch, the VDSO data page will be put in front of
text, vdso_base will then not anymore point to VDSO text.

To avoid confusion between vdso_base and VDSO text, rename vdso_base
into vdso and make it a void __user *.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/book3s/32/mmu-hash.h | 2 +-
 arch/powerpc/include/asm/book3s/64/mmu.h      | 2 +-
 arch/powerpc/include/asm/elf.h                | 2 +-
 arch/powerpc/include/asm/mmu_context.h        | 6 ++++--
 arch/powerpc/include/asm/nohash/32/mmu-40x.h  | 2 +-
 arch/powerpc/include/asm/nohash/32/mmu-44x.h  | 2 +-
 arch/powerpc/include/asm/nohash/32/mmu-8xx.h  | 2 +-
 arch/powerpc/include/asm/nohash/mmu-book3e.h  | 2 +-
 arch/powerpc/kernel/signal_32.c               | 8 ++++----
 arch/powerpc/kernel/signal_64.c               | 4 ++--
 arch/powerpc/kernel/vdso.c                    | 8 ++++----
 arch/powerpc/perf/callchain_32.c              | 8 ++++----
 arch/powerpc/perf/callchain_64.c              | 4 ++--
 13 files changed, 27 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 2e277ca0170f..331187661236 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -90,7 +90,7 @@ struct hash_pte {
 
 typedef struct {
 	unsigned long id;
-	unsigned long vdso_base;
+	void __user *vdso;
 } mm_context_t;
 
 void update_bats(void);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index ddc414ab3c4d..fc6cb6a712c7 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -111,7 +111,7 @@ typedef struct {
 
 	struct hash_mm_context *hash_context;
 
-	unsigned long vdso_base;
+	void __user *vdso;
 	/*
 	 * pagetable fragment support
 	 */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 53ed2ca40151..4ecc372c408e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -169,7 +169,7 @@ do {									\
 	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
 	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
 	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
-	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base);	\
+	VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
 	ARCH_DLINFO_CACHE_GEOMETRY;					\
 } while (0)
 
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index e02aa793420b..d54358cb5be1 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -262,8 +262,10 @@ extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
 			      unsigned long start, unsigned long end)
 {
-	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
-		mm->context.vdso_base = 0;
+	unsigned long vdso_base = (unsigned long)mm->context.vdso;
+
+	if (start <= vdso_base && vdso_base < end)
+		mm->context.vdso = NULL;
 }
 
 #ifdef CONFIG_PPC_MEM_KEYS
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
index 74f4edb5916e..8a8f13a22cf4 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
@@ -57,7 +57,7 @@
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 28aa3b339c5e..2d92a39d8f2e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 /* patch sites */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 1d9ac0f9c794..f0bd7f20c1e3 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -198,7 +198,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly);
 typedef struct {
 	unsigned int id;
 	unsigned int active;
-	unsigned long vdso_base;
+	void __user *vdso;
 	void *pte_frag;
 } mm_context_t;
 
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index b41004664312..e43a418d3ccd 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -238,7 +238,7 @@ extern unsigned int tlbcam_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 /* Page size definitions, common between 32 and 64-bit
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 96950f189b5a..4dcc5e2659ce 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -785,9 +785,9 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
 	/* Save user registers on the stack */
 	frame = &rt_sf->uc.uc_mcontext;
 	addr = frame;
-	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
+	if (vdso32_rt_sigtramp && tsk->mm->context.vdso) {
 		sigret = 0;
-		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
+		tramp = (unsigned long)tsk->mm->context.vdso + vdso32_rt_sigtramp;
 	} else {
 		sigret = __NR_rt_sigreturn;
 		tramp = (unsigned long) frame->tramp;
@@ -1247,9 +1247,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
 	    || __put_user(ksig->sig, &sc->signal))
 		goto badframe;
 
-	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
+	if (vdso32_sigtramp && tsk->mm->context.vdso) {
 		sigret = 0;
-		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
+		tramp = (unsigned long)tsk->mm->context.vdso + vdso32_sigtramp;
 	} else {
 		sigret = __NR_sigreturn;
 		tramp = (unsigned long) frame->mctx.tramp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bfc939360bad..80ad09c8bc14 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -864,8 +864,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
 	tsk->thread.fp_state.fpscr = 0;
 
 	/* Set up to return from userspace. */
-	if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
-		regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
+	if (vdso64_rt_sigtramp && tsk->mm->context.vdso) {
+		regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp;
 	} else {
 		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
 		if (err)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 971764d5b85b..87b77b793029 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
 	if (new_size != text_size + PAGE_SIZE)
 		return -EINVAL;
 
-	current->mm->context.vdso_base = new_vma->vm_start;
+	current->mm->context.vdso = (void __user *)new_vma->vm_start;
 
 	return 0;
 }
@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
 	 * install_special_mapping or the perf counter mmap tracking code
 	 * will fail to recognise it as a vDSO.
 	 */
-	current->mm->context.vdso_base = vdso_base;
+	mm->context.vdso = (void __user *)vdso_base;
 
 	/*
 	 * our vma flags don't have VM_WRITE so by default, the process isn't
@@ -224,7 +224,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	struct mm_struct *mm = current->mm;
 	int rc;
 
-	mm->context.vdso_base = 0;
+	mm->context.vdso = NULL;
 
 	if (!vdso_ready)
 		return 0;
@@ -234,7 +234,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
 	rc = __arch_setup_additional_pages(bprm, uses_interp);
 	if (rc)
-		mm->context.vdso_base = 0;
+		mm->context.vdso = NULL;
 
 	mmap_write_unlock(mm);
 	return rc;
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 64e4013d8060..b32e94047fb9 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
 		return 1;
-	if (vdso32_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
+	if (vdso32_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp)
 		return 1;
 	return 0;
 }
@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
 	if (nip == fp + offsetof(struct rt_signal_frame_32,
 				 uc.uc_mcontext.mc_pad))
 		return 1;
-	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
+	if (vdso32_rt_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp)
 		return 1;
 	return 0;
 }
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index fed90e827f3a..c4dfe3c2702b 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -67,8 +67,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_64, tramp))
 		return 1;
-	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
+	if (vdso64_rt_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp)
 		return 1;
 	return 0;
 }
-- 
2.25.0


WARNING: multiple messages have this Message-ID (diff)
From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: [PATCH v1 12/30] powerpc/vdso: Replace vdso_base by vdso
Date: Sun, 27 Sep 2020 09:16:29 +0000 (UTC)	[thread overview]
Message-ID: <8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu> (raw)
In-Reply-To: <cover.1601197618.git.christophe.leroy@csgroup.eu>

All other architectures but s390 use a void pointer named 'vdso'
to reference the VDSO mapping.

In a following patch, the VDSO data page will be put in front of
text, vdso_base will then not anymore point to VDSO text.

To avoid confusion between vdso_base and VDSO text, rename vdso_base
into vdso and make it a void __user *.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/powerpc/include/asm/book3s/32/mmu-hash.h | 2 +-
 arch/powerpc/include/asm/book3s/64/mmu.h      | 2 +-
 arch/powerpc/include/asm/elf.h                | 2 +-
 arch/powerpc/include/asm/mmu_context.h        | 6 ++++--
 arch/powerpc/include/asm/nohash/32/mmu-40x.h  | 2 +-
 arch/powerpc/include/asm/nohash/32/mmu-44x.h  | 2 +-
 arch/powerpc/include/asm/nohash/32/mmu-8xx.h  | 2 +-
 arch/powerpc/include/asm/nohash/mmu-book3e.h  | 2 +-
 arch/powerpc/kernel/signal_32.c               | 8 ++++----
 arch/powerpc/kernel/signal_64.c               | 4 ++--
 arch/powerpc/kernel/vdso.c                    | 8 ++++----
 arch/powerpc/perf/callchain_32.c              | 8 ++++----
 arch/powerpc/perf/callchain_64.c              | 4 ++--
 13 files changed, 27 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 2e277ca0170f..331187661236 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -90,7 +90,7 @@ struct hash_pte {
 
 typedef struct {
 	unsigned long id;
-	unsigned long vdso_base;
+	void __user *vdso;
 } mm_context_t;
 
 void update_bats(void);
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index ddc414ab3c4d..fc6cb6a712c7 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -111,7 +111,7 @@ typedef struct {
 
 	struct hash_mm_context *hash_context;
 
-	unsigned long vdso_base;
+	void __user *vdso;
 	/*
 	 * pagetable fragment support
 	 */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 53ed2ca40151..4ecc372c408e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -169,7 +169,7 @@ do {									\
 	NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize);			\
 	NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize);			\
 	NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize);			\
-	VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base);	\
+	VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
 	ARCH_DLINFO_CACHE_GEOMETRY;					\
 } while (0)
 
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index e02aa793420b..d54358cb5be1 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -262,8 +262,10 @@ extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
 			      unsigned long start, unsigned long end)
 {
-	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
-		mm->context.vdso_base = 0;
+	unsigned long vdso_base = (unsigned long)mm->context.vdso;
+
+	if (start <= vdso_base && vdso_base < end)
+		mm->context.vdso = NULL;
 }
 
 #ifdef CONFIG_PPC_MEM_KEYS
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
index 74f4edb5916e..8a8f13a22cf4 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
@@ -57,7 +57,7 @@
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 28aa3b339c5e..2d92a39d8f2e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 /* patch sites */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 1d9ac0f9c794..f0bd7f20c1e3 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -198,7 +198,7 @@ void mmu_pin_tlb(unsigned long top, bool readonly);
 typedef struct {
 	unsigned int id;
 	unsigned int active;
-	unsigned long vdso_base;
+	void __user *vdso;
 	void *pte_frag;
 } mm_context_t;
 
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index b41004664312..e43a418d3ccd 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -238,7 +238,7 @@ extern unsigned int tlbcam_index;
 typedef struct {
 	unsigned int	id;
 	unsigned int	active;
-	unsigned long	vdso_base;
+	void __user	*vdso;
 } mm_context_t;
 
 /* Page size definitions, common between 32 and 64-bit
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 96950f189b5a..4dcc5e2659ce 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -785,9 +785,9 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
 	/* Save user registers on the stack */
 	frame = &rt_sf->uc.uc_mcontext;
 	addr = frame;
-	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
+	if (vdso32_rt_sigtramp && tsk->mm->context.vdso) {
 		sigret = 0;
-		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
+		tramp = (unsigned long)tsk->mm->context.vdso + vdso32_rt_sigtramp;
 	} else {
 		sigret = __NR_rt_sigreturn;
 		tramp = (unsigned long) frame->tramp;
@@ -1247,9 +1247,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
 	    || __put_user(ksig->sig, &sc->signal))
 		goto badframe;
 
-	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
+	if (vdso32_sigtramp && tsk->mm->context.vdso) {
 		sigret = 0;
-		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
+		tramp = (unsigned long)tsk->mm->context.vdso + vdso32_sigtramp;
 	} else {
 		sigret = __NR_sigreturn;
 		tramp = (unsigned long) frame->mctx.tramp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bfc939360bad..80ad09c8bc14 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -864,8 +864,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
 	tsk->thread.fp_state.fpscr = 0;
 
 	/* Set up to return from userspace. */
-	if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
-		regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
+	if (vdso64_rt_sigtramp && tsk->mm->context.vdso) {
+		regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp;
 	} else {
 		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
 		if (err)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 971764d5b85b..87b77b793029 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -123,7 +123,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
 	if (new_size != text_size + PAGE_SIZE)
 		return -EINVAL;
 
-	current->mm->context.vdso_base = new_vma->vm_start;
+	current->mm->context.vdso = (void __user *)new_vma->vm_start;
 
 	return 0;
 }
@@ -198,7 +198,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
 	 * install_special_mapping or the perf counter mmap tracking code
 	 * will fail to recognise it as a vDSO.
 	 */
-	current->mm->context.vdso_base = vdso_base;
+	mm->context.vdso = (void __user *)vdso_base;
 
 	/*
 	 * our vma flags don't have VM_WRITE so by default, the process isn't
@@ -224,7 +224,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	struct mm_struct *mm = current->mm;
 	int rc;
 
-	mm->context.vdso_base = 0;
+	mm->context.vdso = NULL;
 
 	if (!vdso_ready)
 		return 0;
@@ -234,7 +234,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
 	rc = __arch_setup_additional_pages(bprm, uses_interp);
 	if (rc)
-		mm->context.vdso_base = 0;
+		mm->context.vdso = NULL;
 
 	mmap_write_unlock(mm);
 	return rc;
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 64e4013d8060..b32e94047fb9 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -59,8 +59,8 @@ static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
 		return 1;
-	if (vdso32_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
+	if (vdso32_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp)
 		return 1;
 	return 0;
 }
@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
 	if (nip == fp + offsetof(struct rt_signal_frame_32,
 				 uc.uc_mcontext.mc_pad))
 		return 1;
-	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
+	if (vdso32_rt_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp)
 		return 1;
 	return 0;
 }
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index fed90e827f3a..c4dfe3c2702b 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -67,8 +67,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
 {
 	if (nip == fp + offsetof(struct signal_frame_64, tramp))
 		return 1;
-	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
-	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
+	if (vdso64_rt_sigtramp && current->mm->context.vdso &&
+	    nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp)
 		return 1;
 	return 0;
 }
-- 
2.25.0


  parent reply	other threads:[~2020-09-27  9:16 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-27  9:16 [PATCH v1 00/30] Modernise VDSO setup Christophe Leroy
2020-09-27  9:16 ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 01/30] powerpc/vdso: Stripped VDSO is not needed, don't build it Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 02/30] powerpc/vdso: Add missing includes and clean vdso_setup_syscall_map() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 03/30] powerpc/vdso: Rename syscall_map_32/64 to simplify vdso_setup_syscall_map() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 04/30] powerpc/vdso: Remove get_page() in vdso_pagelist initialization Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 05/30] powerpc/vdso: Remove NULL termination element in vdso_pagelist Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 06/30] powerpc/vdso: Refactor 32 bits and 64 bits pages setup Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 07/30] powerpc/vdso: Remove unnecessary ifdefs in vdso_pagelist initialization Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 08/30] powerpc/vdso: Use VDSO size in arch_setup_additional_pages() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 09/30] powerpc/vdso: Simplify arch_setup_additional_pages() exit Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 10/30] powerpc/vdso: Move to _install_special_mapping() and remove arch_vma_name() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 11/30] powerpc/vdso: Provide vdso_remap() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` Christophe Leroy [this message]
2020-09-27  9:16   ` [PATCH v1 12/30] powerpc/vdso: Replace vdso_base by vdso Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 13/30] powerpc/vdso: Move vdso datapage up front Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 14/30] powerpc/vdso: Simplify __get_datapage() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 15/30] powerpc/vdso: Remove unused \tmp param in __get_datapage() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 16/30] powerpc/vdso: Retrieve sigtramp offsets at buildtime Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 17/30] powerpc/vdso: Use builtin symbols to locate fixup section Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 18/30] powerpc/vdso: Merge __kernel_sync_dicache_p5() into __kernel_sync_dicache() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 19/30] powerpc/vdso: Remove vdso32_pages and vdso64_pages Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 20/30] powerpc/vdso: Remove __kernel_datapage_offset Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 21/30] powerpc/vdso: Remove runtime generated sigtramp offsets Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 22/30] powerpc/vdso: Remove vdso_patches[] and associated functions Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 23/30] powerpc/vdso: Remove unused text member in struct lib32/64_elfinfo Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 24/30] powerpc/vdso: Remove symbol section information " Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 25/30] powerpc/vdso: Remove lib32_elfinfo and lib64_elfinfo Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 26/30] powerpc/vdso: Remove vdso_setup() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 27/30] powerpc/vdso: Remove vdso_ready Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 28/30] powerpc/vdso: Remove DBG() Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 29/30] powerpc/vdso: Remove VDSO32_LBASE and VDSO64_LBASE Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-09-27  9:16 ` [PATCH v1 30/30] powerpc/vdso: Cleanup vdso.h Christophe Leroy
2020-09-27  9:16   ` Christophe Leroy
2020-12-10 11:29 ` [PATCH v1 00/30] Modernise VDSO setup Michael Ellerman
2020-12-10 11:29   ` Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu \
    --to=christophe.leroy@csgroup.eu \
    --cc=benh@kernel.crashing.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.