All of lore.kernel.org
 help / color / mirror / Atom feed
From: Anton Blanchard <anton@samba.org>
To: eparis@redhat.com, viro@zeniv.linux.org.uk,
	benh@kernel.crashing.org, paulus@samba.org
Cc: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 4/4] powerpc: Optimise 64bit syscall auditing exit path
Date: Wed, 9 Jan 2013 10:48:59 +1100	[thread overview]
Message-ID: <20130109104859.0f475fa0@kryten> (raw)
In-Reply-To: <20130109104617.74e995a5@kryten>


Add an assembly fast path for the syscall audit exit path on
64bit. Some distros enable auditing by default which forces us
through the syscall auditing path even if there are no rules.

With syscall auditing enabled we currently disable interrupts,
check the threadinfo flags then immediately re-enable interrupts
and call audit_syscall_exit. This patch splits the threadinfo
flag check into two so we can avoid the disable/reenable of
interrupts when handling trace flags. We must do the user work
flag check with interrupts off to avoid returning to userspace
without handling them.

The other big gain is that we don't have to save and restore
the non volatile registers or exit via the slow ret_from_except
path.

I wrote some test cases to validate the patch:

http://ozlabs.org/~anton/junkcode/audit_tests.tar.gz

And to test the performance I ran a simple null syscall
microbenchmark on a POWER7 box:

http://ozlabs.org/~anton/junkcode/null_syscall.c

Baseline: 920.6 cycles
Patched:  719.6 cycles

An improvement of 22%.

Signed-off-by: Anton Blanchard <anton@samba.org>
---

Index: b/arch/powerpc/kernel/entry_64.S
===================================================================
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -195,6 +195,19 @@ syscall_exit:
 	andi.	r10,r8,MSR_RI
 	beq-	unrecov_restore
 #endif
+
+	/* We can handle some thread info flags with interrupts on */
+	ld	r9,TI_FLAGS(r12)
+	li	r11,-_LAST_ERRNO
+	andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_PERSYSCALL_MASK)
+	bne	syscall_exit_work
+
+	cmpld	r3,r11
+	ld	r5,_CCR(r1)
+	bge-	syscall_error
+
+.Lsyscall_exit_work_cont:
+
 	/*
 	 * Disable interrupts so current_thread_info()->flags can't change,
 	 * and so that we don't get interrupted after loading SRR0/1.
@@ -208,21 +221,19 @@ syscall_exit:
 	 * clear EE. We only need to clear RI just before we restore r13
 	 * below, but batching it with EE saves us one expensive mtmsrd call.
 	 * We have to be careful to restore RI if we branch anywhere from
-	 * here (eg syscall_exit_work).
+	 * here (eg syscall_exit_user_work).
 	 */
 	li	r9,MSR_RI
 	andc	r11,r10,r9
 	mtmsrd	r11,1
 #endif /* CONFIG_PPC_BOOK3E */
 
+	/* Recheck thread info flags with interrupts off */
 	ld	r9,TI_FLAGS(r12)
-	li	r11,-_LAST_ERRNO
-	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
-	bne-	syscall_exit_work
-	cmpld	r3,r11
-	ld	r5,_CCR(r1)
-	bge-	syscall_error
-.Lsyscall_error_cont:
+
+	andi.   r0,r9,_TIF_USER_WORK_MASK
+	bne-	syscall_exit_user_work
+
 	ld	r7,_NIP(r1)
 BEGIN_FTR_SECTION
 	stdcx.	r0,0,r1			/* to clear the reservation */
@@ -246,7 +257,7 @@ syscall_error:
 	oris	r5,r5,0x1000	/* Set SO bit in CR */
 	neg	r3,r3
 	std	r5,_CCR(r1)
-	b	.Lsyscall_error_cont
+	b	.Lsyscall_exit_work_cont
 	
 /* Traced system call support */
 syscall_dotrace:
@@ -306,58 +317,79 @@ audit_entry:
 syscall_enosys:
 	li	r3,-ENOSYS
 	b	syscall_exit
-	
+
 syscall_exit_work:
-#ifdef CONFIG_PPC_BOOK3S
-	mtmsrd	r10,1		/* Restore RI */
-#endif
-	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
-	 If TIF_NOERROR is set, just save r3 as it is. */
+	li	r6,1		/* r6 contains syscall success */
+	mr	r7,r3
+	ld	r5,_CCR(r1)
 
+	/*
+	 * If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
+	 * If TIF_NOERROR is set, just save r3 as it is.
+	 */
 	andi.	r0,r9,_TIF_RESTOREALL
 	beq+	0f
 	REST_NVGPRS(r1)
 	b	2f
-0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
+0:	cmpld	r3,r11		/* r11 is -LAST_ERRNO */
 	blt+	1f
 	andi.	r0,r9,_TIF_NOERROR
 	bne-	1f
-	ld	r5,_CCR(r1)
+	li	r6,0		/* syscall failed */
 	neg	r3,r3
 	oris	r5,r5,0x1000	/* Set SO bit in CR */
 	std	r5,_CCR(r1)
 1:	std	r3,GPR3(r1)
-2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
+
+2:	andi.	r0,r9,_TIF_SYSCALL_AUDIT
 	beq	4f
 
-	/* Clear per-syscall TIF flags if any are set.  */
+	mr	r3,r6
+	mr	r4,r7
+	bl	.__audit_syscall_exit
+	CURRENT_THREAD_INFO(r12, r1)
+	ld	r9,TI_FLAGS(r12)
+	ld	r3,GPR3(r1)
+	ld	r5,_CCR(r1)
+	ld	r8,_MSR(r1)
+
+4:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
+	beq	6f
 
+	/* Clear per-syscall TIF flags if any are set.  */
 	li	r11,_TIF_PERSYSCALL_MASK
 	addi	r12,r12,TI_FLAGS
-3:	ldarx	r10,0,r12
+5:	ldarx	r10,0,r12
 	andc	r10,r10,r11
 	stdcx.	r10,0,r12
-	bne-	3b
+	bne-	5b
 	subi	r12,r12,TI_FLAGS
 
-4:	/* Anything else left to do? */
-	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
-	beq	.ret_from_except_lite
+	/*
+	 * We can use the fast path if no other trace flags are on and
+	 * _TIF_RESTOREALL wasn't set.
+	 */
+6:      andi.   r0,r9,((_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_RESTOREALL) & ~_TIF_SYSCALL_AUDIT)
+	mr	r9,r10
+	beq	.Lsyscall_exit_work_cont
 
-	/* Re-enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
-	wrteei	1
-#else
-	ld	r10,PACAKMSR(r13)
-	ori	r10,r10,MSR_EE
-	mtmsrd	r10,1
-#endif /* CONFIG_PPC_BOOK3E */
+	andi.	r0,r9,((_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) & ~_TIF_SYSCALL_AUDIT)
+	beq	7f
 
 	bl	.save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	.do_syscall_trace_leave
 	b	.ret_from_except
 
+7:	b	.ret_from_except_lite
+
+syscall_exit_user_work:
+#ifdef CONFIG_PPC_BOOK3S
+	mtmsrd	r10,1		/* Restore RI */
+#endif
+	std	r3,GPR3(r1)
+	b	.ret_from_except_lite
+
 /* Save non-volatile GPRs, if not already saved. */
 _GLOBAL(save_nvgprs)
 	ld	r11,_TRAP(r1)
Index: b/arch/powerpc/kernel/ptrace.c
===================================================================
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1781,7 +1781,9 @@ void do_syscall_trace_leave(struct pt_re
 {
 	int step;
 
+#ifdef CONFIG_PPC32
 	audit_syscall_exit(regs);
+#endif
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_exit(regs, regs->result);

WARNING: multiple messages have this Message-ID (diff)
From: Anton Blanchard <anton@samba.org>
To: eparis@redhat.com, viro@zeniv.linux.org.uk,
	benh@kernel.crashing.org, paulus@samba.org
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: [PATCH 4/4] powerpc: Optimise 64bit syscall auditing exit path
Date: Wed, 9 Jan 2013 10:48:59 +1100	[thread overview]
Message-ID: <20130109104859.0f475fa0@kryten> (raw)
In-Reply-To: <20130109104617.74e995a5@kryten>


Add an assembly fast path for the syscall audit exit path on
64bit. Some distros enable auditing by default which forces us
through the syscall auditing path even if there are no rules.

With syscall auditing enabled we currently disable interrupts,
check the threadinfo flags then immediately re-enable interrupts
and call audit_syscall_exit. This patch splits the threadinfo
flag check into two so we can avoid the disable/reenable of
interrupts when handling trace flags. We must do the user work
flag check with interrupts off to avoid returning to userspace
without handling them.

The other big gain is that we don't have to save and restore
the non volatile registers or exit via the slow ret_from_except
path.

I wrote some test cases to validate the patch:

http://ozlabs.org/~anton/junkcode/audit_tests.tar.gz

And to test the performance I ran a simple null syscall
microbenchmark on a POWER7 box:

http://ozlabs.org/~anton/junkcode/null_syscall.c

Baseline: 920.6 cycles
Patched:  719.6 cycles

An improvement of 22%.

Signed-off-by: Anton Blanchard <anton@samba.org>
---

Index: b/arch/powerpc/kernel/entry_64.S
===================================================================
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -195,6 +195,19 @@ syscall_exit:
 	andi.	r10,r8,MSR_RI
 	beq-	unrecov_restore
 #endif
+
+	/* We can handle some thread info flags with interrupts on */
+	ld	r9,TI_FLAGS(r12)
+	li	r11,-_LAST_ERRNO
+	andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_PERSYSCALL_MASK)
+	bne	syscall_exit_work
+
+	cmpld	r3,r11
+	ld	r5,_CCR(r1)
+	bge-	syscall_error
+
+.Lsyscall_exit_work_cont:
+
 	/*
 	 * Disable interrupts so current_thread_info()->flags can't change,
 	 * and so that we don't get interrupted after loading SRR0/1.
@@ -208,21 +221,19 @@ syscall_exit:
 	 * clear EE. We only need to clear RI just before we restore r13
 	 * below, but batching it with EE saves us one expensive mtmsrd call.
 	 * We have to be careful to restore RI if we branch anywhere from
-	 * here (eg syscall_exit_work).
+	 * here (eg syscall_exit_user_work).
 	 */
 	li	r9,MSR_RI
 	andc	r11,r10,r9
 	mtmsrd	r11,1
 #endif /* CONFIG_PPC_BOOK3E */
 
+	/* Recheck thread info flags with interrupts off */
 	ld	r9,TI_FLAGS(r12)
-	li	r11,-_LAST_ERRNO
-	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
-	bne-	syscall_exit_work
-	cmpld	r3,r11
-	ld	r5,_CCR(r1)
-	bge-	syscall_error
-.Lsyscall_error_cont:
+
+	andi.   r0,r9,_TIF_USER_WORK_MASK
+	bne-	syscall_exit_user_work
+
 	ld	r7,_NIP(r1)
 BEGIN_FTR_SECTION
 	stdcx.	r0,0,r1			/* to clear the reservation */
@@ -246,7 +257,7 @@ syscall_error:
 	oris	r5,r5,0x1000	/* Set SO bit in CR */
 	neg	r3,r3
 	std	r5,_CCR(r1)
-	b	.Lsyscall_error_cont
+	b	.Lsyscall_exit_work_cont
 	
 /* Traced system call support */
 syscall_dotrace:
@@ -306,58 +317,79 @@ audit_entry:
 syscall_enosys:
 	li	r3,-ENOSYS
 	b	syscall_exit
-	
+
 syscall_exit_work:
-#ifdef CONFIG_PPC_BOOK3S
-	mtmsrd	r10,1		/* Restore RI */
-#endif
-	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
-	 If TIF_NOERROR is set, just save r3 as it is. */
+	li	r6,1		/* r6 contains syscall success */
+	mr	r7,r3
+	ld	r5,_CCR(r1)
 
+	/*
+	 * If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
+	 * If TIF_NOERROR is set, just save r3 as it is.
+	 */
 	andi.	r0,r9,_TIF_RESTOREALL
 	beq+	0f
 	REST_NVGPRS(r1)
 	b	2f
-0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
+0:	cmpld	r3,r11		/* r11 is -LAST_ERRNO */
 	blt+	1f
 	andi.	r0,r9,_TIF_NOERROR
 	bne-	1f
-	ld	r5,_CCR(r1)
+	li	r6,0		/* syscall failed */
 	neg	r3,r3
 	oris	r5,r5,0x1000	/* Set SO bit in CR */
 	std	r5,_CCR(r1)
 1:	std	r3,GPR3(r1)
-2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
+
+2:	andi.	r0,r9,_TIF_SYSCALL_AUDIT
 	beq	4f
 
-	/* Clear per-syscall TIF flags if any are set.  */
+	mr	r3,r6
+	mr	r4,r7
+	bl	.__audit_syscall_exit
+	CURRENT_THREAD_INFO(r12, r1)
+	ld	r9,TI_FLAGS(r12)
+	ld	r3,GPR3(r1)
+	ld	r5,_CCR(r1)
+	ld	r8,_MSR(r1)
+
+4:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
+	beq	6f
 
+	/* Clear per-syscall TIF flags if any are set.  */
 	li	r11,_TIF_PERSYSCALL_MASK
 	addi	r12,r12,TI_FLAGS
-3:	ldarx	r10,0,r12
+5:	ldarx	r10,0,r12
 	andc	r10,r10,r11
 	stdcx.	r10,0,r12
-	bne-	3b
+	bne-	5b
 	subi	r12,r12,TI_FLAGS
 
-4:	/* Anything else left to do? */
-	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
-	beq	.ret_from_except_lite
+	/*
+	 * We can use the fast path if no other trace flags are on and
+	 * _TIF_RESTOREALL wasn't set.
+	 */
+6:      andi.   r0,r9,((_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_RESTOREALL) & ~_TIF_SYSCALL_AUDIT)
+	mr	r9,r10
+	beq	.Lsyscall_exit_work_cont
 
-	/* Re-enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
-	wrteei	1
-#else
-	ld	r10,PACAKMSR(r13)
-	ori	r10,r10,MSR_EE
-	mtmsrd	r10,1
-#endif /* CONFIG_PPC_BOOK3E */
+	andi.	r0,r9,((_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) & ~_TIF_SYSCALL_AUDIT)
+	beq	7f
 
 	bl	.save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	.do_syscall_trace_leave
 	b	.ret_from_except
 
+7:	b	.ret_from_except_lite
+
+syscall_exit_user_work:
+#ifdef CONFIG_PPC_BOOK3S
+	mtmsrd	r10,1		/* Restore RI */
+#endif
+	std	r3,GPR3(r1)
+	b	.ret_from_except_lite
+
 /* Save non-volatile GPRs, if not already saved. */
 _GLOBAL(save_nvgprs)
 	ld	r11,_TRAP(r1)
Index: b/arch/powerpc/kernel/ptrace.c
===================================================================
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1781,7 +1781,9 @@ void do_syscall_trace_leave(struct pt_re
 {
 	int step;
 
+#ifdef CONFIG_PPC32
 	audit_syscall_exit(regs);
+#endif
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_exit(regs, regs->result);

  parent reply	other threads:[~2013-01-08 23:49 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-08 23:46 [PATCH 1/4] audit: Syscall rules are not applied to existing processes on non-x86 Anton Blanchard
2013-01-08 23:46 ` Anton Blanchard
2013-01-08 23:47 ` [PATCH 2/4] powerpc: Remove static branch prediction in 64bit traced syscall path Anton Blanchard
2013-01-08 23:47   ` Anton Blanchard
2013-01-08 23:48 ` [PATCH 3/4] powerpc: Optimise 64bit syscall auditing entry path Anton Blanchard
2013-01-08 23:48   ` Anton Blanchard
2013-04-10 16:56   ` Eric Paris
2013-04-10 16:56     ` Eric Paris
2013-01-08 23:48 ` Anton Blanchard [this message]
2013-01-08 23:48   ` [PATCH 4/4] powerpc: Optimise 64bit syscall auditing exit path Anton Blanchard
2013-02-07  4:13 ` [PATCH 1/4] audit: Syscall rules are not applied to existing processes on non-x86 Anton Blanchard
2013-02-07  4:13   ` Anton Blanchard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130109104859.0f475fa0@kryten \
    --to=anton@samba.org \
    --cc=benh@kernel.crashing.org \
    --cc=eparis@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=paulus@samba.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.