linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86-64: fix unwind info for incomplete frames
@ 2015-05-28  8:20 Jan Beulich
  2015-05-28  9:01 ` Ingo Molnar
  0 siblings, 1 reply; 17+ messages in thread
From: Jan Beulich @ 2015-05-28  8:20 UTC (permalink / raw)
  To: mingo, tglx, hpa; +Cc: Andy Lutomirski, Denys Vlasenko, linux-kernel

Commit 76f5df43ca ('x86/asm/entry/64: Always allocate a complete
"struct pt_regs" on the kernel stack') deleted PARTIAL_FRAME without
considering that while a full frame is now being allocated, not all
registers get always saved into it. Instead of restoring that macro,
simply make DEFAULT_FRAME capable of expressing both.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
---
 arch/x86/kernel/entry_64.S |   18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

--- 4.1-rc5/arch/x86/kernel/entry_64.S
+++ 4.1-rc5-x86_64-unwind-info/arch/x86/kernel/entry_64.S
@@ -148,7 +148,7 @@ ENDPROC(native_usergs_sysret64)
 /*
  * frame that enables passing a complete pt_regs to a C function.
  */
-	.macro DEFAULT_FRAME start=1 offset=0
+	.macro DEFAULT_FRAME start=1 offset=0 extra=1
 	XCPT_FRAME \start, ORIG_RAX+\offset
 	CFI_REL_OFFSET rdi, RDI+\offset
 	CFI_REL_OFFSET rsi, RSI+\offset
@@ -159,12 +159,14 @@ ENDPROC(native_usergs_sysret64)
 	CFI_REL_OFFSET r9, R9+\offset
 	CFI_REL_OFFSET r10, R10+\offset
 	CFI_REL_OFFSET r11, R11+\offset
+	.if \extra
 	CFI_REL_OFFSET rbx, RBX+\offset
 	CFI_REL_OFFSET rbp, RBP+\offset
 	CFI_REL_OFFSET r12, R12+\offset
 	CFI_REL_OFFSET r13, R13+\offset
 	CFI_REL_OFFSET r14, R14+\offset
 	CFI_REL_OFFSET r15, R15+\offset
+	.endif
 	.endm
 
 /*
@@ -491,7 +493,7 @@ END(system_call)
 	.macro FORK_LIKE func
 ENTRY(stub_\func)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8		/* offset 8: return address */
+	DEFAULT_FRAME 0, 8, 0		/* offset 8: return address */
 	SAVE_EXTRA_REGS 8
 	jmp sys_\func
 	CFI_ENDPROC
@@ -504,7 +506,7 @@ END(stub_\func)
 
 ENTRY(stub_execve)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	call	sys_execve
 return_from_execve:
 	testl	%eax, %eax
@@ -527,7 +529,7 @@ END(stub_execve)
 	.align	8
 GLOBAL(stub_execveat)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	call	sys_execveat
 	jmp	return_from_execve
 	CFI_ENDPROC
@@ -537,7 +539,7 @@ END(stub_execveat)
 	.align	8
 GLOBAL(stub_x32_execve)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	call	compat_sys_execve
 	jmp	return_from_execve
 	CFI_ENDPROC
@@ -545,7 +547,7 @@ END(stub_x32_execve)
 	.align	8
 GLOBAL(stub_x32_execveat)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	call	compat_sys_execveat
 	jmp	return_from_execve
 	CFI_ENDPROC
@@ -575,7 +577,7 @@ END(stub32_execveat)
  */
 ENTRY(stub_rt_sigreturn)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	/*
 	 * SAVE_EXTRA_REGS result is not normally needed:
 	 * sigreturn overwrites all pt_regs->GPREGS.
@@ -597,7 +599,7 @@ END(stub_rt_sigreturn)
 #ifdef CONFIG_X86_X32_ABI
 ENTRY(stub_x32_rt_sigreturn)
 	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
+	DEFAULT_FRAME 0, 8, 0
 	SAVE_EXTRA_REGS 8
 	call sys32_x32_rt_sigreturn
 	jmp  return_from_stub




^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86-64: fix unwind info for incomplete frames
  2015-05-28  8:20 [PATCH] x86-64: fix unwind info for incomplete frames Jan Beulich
@ 2015-05-28  9:01 ` Ingo Molnar
  2015-05-28  9:45   ` Jan Beulich
  0 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2015-05-28  9:01 UTC (permalink / raw)
  To: Jan Beulich
  Cc: mingo, tglx, hpa, Andy Lutomirski, Denys Vlasenko, linux-kernel,
	Linus Torvalds, Brian Gerst


* Jan Beulich <JBeulich@suse.com> wrote:

> Commit 76f5df43ca ('x86/asm/entry/64: Always allocate a complete
> "struct pt_regs" on the kernel stack') deleted PARTIAL_FRAME without
> considering that while a full frame is now being allocated, not all
> registers get always saved into it. Instead of restoring that macro,
> simply make DEFAULT_FRAME capable of expressing both.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Cc: Denys Vlasenko <dvlasenk@redhat.com>
> Cc: Andy Lutomirski <luto@amacapital.net>
> ---
>  arch/x86/kernel/entry_64.S |   18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> --- 4.1-rc5/arch/x86/kernel/entry_64.S
> +++ 4.1-rc5-x86_64-unwind-info/arch/x86/kernel/entry_64.S
> @@ -148,7 +148,7 @@ ENDPROC(native_usergs_sysret64)
>  /*
>   * frame that enables passing a complete pt_regs to a C function.
>   */
> -	.macro DEFAULT_FRAME start=1 offset=0
> +	.macro DEFAULT_FRAME start=1 offset=0 extra=1
>  	XCPT_FRAME \start, ORIG_RAX+\offset
>  	CFI_REL_OFFSET rdi, RDI+\offset
>  	CFI_REL_OFFSET rsi, RSI+\offset
> @@ -159,12 +159,14 @@ ENDPROC(native_usergs_sysret64)
>  	CFI_REL_OFFSET r9, R9+\offset
>  	CFI_REL_OFFSET r10, R10+\offset
>  	CFI_REL_OFFSET r11, R11+\offset
> +	.if \extra
>  	CFI_REL_OFFSET rbx, RBX+\offset
>  	CFI_REL_OFFSET rbp, RBP+\offset
>  	CFI_REL_OFFSET r12, R12+\offset
>  	CFI_REL_OFFSET r13, R13+\offset
>  	CFI_REL_OFFSET r14, R14+\offset
>  	CFI_REL_OFFSET r15, R15+\offset
> +	.endif
>  	.endm

I have a couple of code cleanliness complaints:

 - So 'extra' isn't very expressive, I'd name it 'full' to signal a full frame, 
   and full=0 denotes

 - So I had to go into the source and double check various nested macros to see 
   that DEFAULT_FRAME is only defining debug information, it's not emitting any 
   actual code. This should have been glaringly obvious from the macro name!

 - So I hate these 'default values' vararg-ish assembly macros:

arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8              /* offset 8: return address */
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0

    because unlike C functions they make the actual arguments a guessing game: 
    you always have to double check the macro definition itself - while the
    'savings' in terms of less code written are miniscule. So it actually obscures 
    macros.

    So these should be flattened, with clear, fixed length parameter signatures, 
    to make them as similar to regular C code as syntactically possible.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86-64: fix unwind info for incomplete frames
  2015-05-28  9:01 ` Ingo Molnar
@ 2015-05-28  9:45   ` Jan Beulich
  2015-05-28 11:20     ` [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations Ingo Molnar
  0 siblings, 1 reply; 17+ messages in thread
From: Jan Beulich @ 2015-05-28  9:45 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Andy Lutomirski, mingo, Brian Gerst, tglx, Linus Torvalds,
	Denys Vlasenko, linux-kernel, hpa

>>> On 28.05.15 at 11:01, <mingo@kernel.org> wrote:
> * Jan Beulich <JBeulich@suse.com> wrote:
>> --- 4.1-rc5/arch/x86/kernel/entry_64.S
>> +++ 4.1-rc5-x86_64-unwind-info/arch/x86/kernel/entry_64.S
>> @@ -148,7 +148,7 @@ ENDPROC(native_usergs_sysret64)
>>  /*
>>   * frame that enables passing a complete pt_regs to a C function.
>>   */
>> -	.macro DEFAULT_FRAME start=1 offset=0
>> +	.macro DEFAULT_FRAME start=1 offset=0 extra=1
>>  	XCPT_FRAME \start, ORIG_RAX+\offset
>>  	CFI_REL_OFFSET rdi, RDI+\offset
>>  	CFI_REL_OFFSET rsi, RSI+\offset
>> @@ -159,12 +159,14 @@ ENDPROC(native_usergs_sysret64)
>>  	CFI_REL_OFFSET r9, R9+\offset
>>  	CFI_REL_OFFSET r10, R10+\offset
>>  	CFI_REL_OFFSET r11, R11+\offset
>> +	.if \extra
>>  	CFI_REL_OFFSET rbx, RBX+\offset
>>  	CFI_REL_OFFSET rbp, RBP+\offset
>>  	CFI_REL_OFFSET r12, R12+\offset
>>  	CFI_REL_OFFSET r13, R13+\offset
>>  	CFI_REL_OFFSET r14, R14+\offset
>>  	CFI_REL_OFFSET r15, R15+\offset
>> +	.endif
>>  	.endm
> 
> I have a couple of code cleanliness complaints:
> 
>  - So 'extra' isn't very expressive, I'd name it 'full' to signal a full frame, 
>    and full=0 denotes

I can certainly do this; as easy as "sed s/extra/full/g" on the patch -
perhaps you could even do this while committing?

>  - So I had to go into the source and double check various nested macros to see 
>    that DEFAULT_FRAME is only defining debug information, it's not emitting any 
>    actual code. This should have been glaringly obvious from the macro name!

CFI_DEFAULT_FRAME? Anyway - clearly not in this patch.

>  - So I hate these 'default values' vararg-ish assembly macros:
> 
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8              /* offset 8: return address */
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> 
>     because unlike C functions they make the actual arguments a guessing game: 
>     you always have to double check the macro definition itself - while the
>     'savings' in terms of less code written are miniscule. So it actually obscures 
>     macros.
> 
>     So these should be flattened, with clear, fixed length parameter signatures, 
>     to make them as similar to regular C code as syntactically possible.

Not sure why assembly code should look like C code. It's a matter
of taste perhaps, and I can see your point, but I'm also not really
eager to do changes just to match other people's taste. And just
like above - certainly not something for this patch I would think.

Jan


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-28  9:45   ` Jan Beulich
@ 2015-05-28 11:20     ` Ingo Molnar
  2015-05-28 11:39       ` [PATCH v2] " Ingo Molnar
  2015-05-28 11:51       ` [PATCH] " Jan Beulich
  0 siblings, 2 replies; 17+ messages in thread
From: Ingo Molnar @ 2015-05-28 11:20 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Andy Lutomirski, mingo, Brian Gerst, tglx, Linus Torvalds,
	Denys Vlasenko, linux-kernel, hpa, Josh Poimboeuf,
	Borislav Petkov, Peter Zijlstra, Frédéric Weisbecker


* Jan Beulich <JBeulich@suse.com> wrote:

> >  - So I hate these 'default values' vararg-ish assembly macros:
> > 
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8              /* offset 8: return address */
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0, 8
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME
> > arch/x86/kernel/entry_64.S:     DEFAULT_FRAME 0
> > 
> >     because unlike C functions they make the actual arguments a guessing game: 
> >     you always have to double check the macro definition itself - while the
> >     'savings' in terms of less code written are miniscule. So it actually obscures 
> >     macros.
> > 
> >     So these should be flattened, with clear, fixed length parameter signatures, 
> >     to make them as similar to regular C code as syntactically possible.
> 
> Not sure why assembly code should look like C code. It's a matter of taste 
> perhaps, and I can see your point, but I'm also not really eager to do changes 
> just to match other people's taste. And just like above - certainly not 
> something for this patch I would think.

Yeah, no, so this isn't going to work that way.

On one hand you want dwarf annotations mostly for the out of tree dwarf-unwinding 
stack backtraces patch on SUSE kernels, while for the upstream kernel it's mostly 
just unreadable gunk in some of the most security sensitive code paths of the 
kernel, which only gets in the way of readability.

But on the other hand you are unwilling to (or don't have the time to) do a proper 
job of making this palatable for upstream.

That's unacceptable from the upstream kernel's POV, so instead of limping forward 
I'll do the attached patch: it gets rid of the unmaintainable dwarf mess from low 
level x86 assembly code. This isn't a new concern, a couple of years ago we almost 
did this.

Someone who has the willingness and time to do this properly can reintroduce dwarf 
debuginfo in x86 assembly code from first principles, with the following 
conditions:

 - keep CFI annotations out of the primary .S files. Having
   them in .h files where we define helper macros to set up
   symbols is probably fine, but not in .S files.  Assembly
   code is hard to read already, without any annotations.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for example for
   pop/push instructions that manipulate the stack pointer.
   This could be done for example via a preprocessing step
   that just looks for common patterns. We have hundreds
   of CFI annotations, so automating most of that makes sense.

 - the new dwarf code should come with build tooling checks
   that ensure that CFI annotations are sensible and that we
   haven't missed any symbol. We've seen such build time
   sanity checking efforts from the framepointer side, and
   there's no reason it couldn't be done on the dwarf side.

and meanwhile you can keep a revert of this patch ported to SUSE kernels in 
whatever fashion you prefer.

Thanks,

	Ingo

=============================>
From: Ingo Molnar <mingo@kernel.org>
Date: Thu, 28 May 2015 12:21:47 +0200
Subject: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - keep CFI annotations out of the primary .S files. Having
   them in .h files where we define helper macros to set up
   symbols is probably fine, but not in .S files.  Assembly
   code is hard to read already, without any annotations.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for example for
   pop/push instructions that manipulate the stack pointer.
   This could be done for example via a preprocessing step
   that just looks for common patterns. We have hundreds
   of CFI annotations, so automating most of that makes sense.

 - the new dwarf code should come with build tooling checks
   that ensure that CFI annotations are sensible and that we
   haven't missed any symbol. We've seen such build time
   sanity checking efforts from the framepointer side, and
   there's no reason it couldn't be done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/Makefile              |  10 +-
 arch/x86/ia32/ia32entry.S      | 133 ++++-----------
 arch/x86/include/asm/calling.h |  94 +++++------
 arch/x86/include/asm/dwarf2.h  | 170 -------------------
 arch/x86/include/asm/frame.h   |   7 +-
 arch/x86/kernel/entry_32.S     | 368 ++++++++++++-----------------------------
 arch/x86/kernel/entry_64.S     | 288 ++++++--------------------------
 arch/x86/lib/atomic64_386_32.S |   7 +-
 arch/x86/lib/atomic64_cx8_32.S |  61 +++----
 arch/x86/lib/checksum_32.S     |  52 +++---
 arch/x86/lib/clear_page_64.S   |   7 -
 arch/x86/lib/cmpxchg16b_emu.S  |  12 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  11 +-
 arch/x86/lib/copy_page_64.S    |  11 --
 arch/x86/lib/copy_user_64.S    |  15 --
 arch/x86/lib/csum-copy_64.S    |  17 --
 arch/x86/lib/getuser.S         |  13 --
 arch/x86/lib/iomap_copy_64.S   |   3 -
 arch/x86/lib/memcpy_64.S       |   3 -
 arch/x86/lib/memmove_64.S      |   3 -
 arch/x86/lib/memset_64.S       |   5 -
 arch/x86/lib/msr-reg.S         |  44 ++---
 arch/x86/lib/putuser.S         |   8 +-
 arch/x86/lib/rwsem.S           |  49 +++---
 arch/x86/lib/thunk_32.S        |  15 +-
 arch/x86/lib/thunk_64.S        |  44 +++--
 arch/x86/net/bpf_jit.S         |   1 -
 27 files changed, 350 insertions(+), 1101 deletions(-)

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57996ee840dd..43e8328a23e4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -149,12 +149,6 @@ endif
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
 
-# do binutils support CFI?
-cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
-# is .cfi_signal_frame supported too?
-cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-
 # does binutils support specific instructions?
 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
 asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 63450a596800..2be23c734db5 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -4,7 +4,6 @@
  * Copyright 2000-2002 Andi Kleen, SuSE Labs.
  */		 
 
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm-offsets.h>
 #include <asm/current.h>
@@ -60,17 +59,6 @@
 	movl %eax,%eax			/* zero extension */
 	.endm
 	
-	.macro CFI_STARTPROC32 simple
-	CFI_STARTPROC	\simple
-	CFI_UNDEFINED	r8
-	CFI_UNDEFINED	r9
-	CFI_UNDEFINED	r10
-	CFI_UNDEFINED	r11
-	CFI_UNDEFINED	r12
-	CFI_UNDEFINED	r13
-	CFI_UNDEFINED	r14
-	CFI_UNDEFINED	r15
-	.endm
 
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_usergs_sysret32)
@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
  * with the int 0x80 path.
  */
 ENTRY(ia32_sysenter_target)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rsp,rbp
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
 	movl	%eax, %eax
 
 	movl	ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
-	CFI_REGISTER rip,r10
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi	$__USER32_DS		/* pt_regs->ss */
-	pushq_cfi	%rbp			/* pt_regs->sp */
-	CFI_REL_OFFSET	rsp,0
-	pushfq_cfi				/* pt_regs->flags */
-	pushq_cfi	$__USER32_CS		/* pt_regs->cs */
-	pushq_cfi	%r10 /* pt_regs->ip = thread_info->sysenter_return */
-	CFI_REL_OFFSET	rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	$__USER32_DS		/* pt_regs->ss */
+	pushq	%rbp			/* pt_regs->sp */
+	pushfq				/* pt_regs->flags */
+	pushq	$__USER32_CS		/* pt_regs->cs */
+	pushq	%r10 /* pt_regs->ip = thread_info->sysenter_return */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	cld
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	/*
 	 * no need to do an access_ok check here because rbp has been
@@ -161,8 +140,8 @@ ENTRY(ia32_sysenter_target)
 
 	orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	CFI_REMEMBER_STATE
 	jnz  sysenter_tracesys
+
 sysenter_do_call:
 	/* 32bit syscall -> 64bit C ABI argument conversion */
 	movl	%edi,%r8d	/* arg5 */
@@ -193,14 +172,12 @@ ENTRY(ia32_sysenter_target)
 	 */
 	andl    $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	movl	RIP(%rsp),%ecx		/* User %eip */
-	CFI_REGISTER rip,rcx
 	RESTORE_RSI_RDI
 	xorl	%edx,%edx		/* avoid info leaks */
 	xorq	%r8,%r8
 	xorq	%r9,%r9
 	xorq	%r10,%r10
 	movl	EFLAGS(%rsp),%r11d	/* User eflags */
-	/*CFI_RESTORE rflags*/
 	TRACE_IRQS_ON
 
 	/*
@@ -231,8 +208,6 @@ ENTRY(ia32_sysenter_target)
 	 */
 	USERGS_SYSRET32
 
-	CFI_RESTORE_STATE
-
 #ifdef CONFIG_AUDITSYSCALL
 	.macro auditsys_entry_common
 	movl %esi,%r8d			/* 5th arg: 4th syscall arg */
@@ -282,8 +257,8 @@ ENTRY(ia32_sysenter_target)
 #endif
 
 sysenter_fix_flags:
-	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
-	popfq_cfi
+	pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+	popfq
 	jmp sysenter_flags_fixed
 
 sysenter_tracesys:
@@ -298,7 +273,6 @@ ENTRY(ia32_sysenter_target)
 	LOAD_ARGS32  /* reload args from stack in case ptrace changed it */
 	RESTORE_EXTRA_REGS
 	jmp	sysenter_do_call
-	CFI_ENDPROC
 ENDPROC(ia32_sysenter_target)
 
 /*
@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
  * with the int 0x80 path.
  */
 ENTRY(ia32_cstar_target)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rip,rcx
-	/*CFI_REGISTER	rflags,r11*/
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
 	 */
 	SWAPGS_UNSAFE_STACK
 	movl	%esp,%r8d
-	CFI_REGISTER	rsp,r8
 	movq	PER_CPU_VAR(cpu_current_top_of_stack),%rsp
 	ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
 	movl	%eax,%eax
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi	$__USER32_DS		/* pt_regs->ss */
-	pushq_cfi	%r8			/* pt_regs->sp */
-	CFI_REL_OFFSET rsp,0
-	pushq_cfi	%r11			/* pt_regs->flags */
-	pushq_cfi	$__USER32_CS		/* pt_regs->cs */
-	pushq_cfi	%rcx			/* pt_regs->ip */
-	CFI_REL_OFFSET rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rbp			/* pt_regs->cx */
+	pushq	$__USER32_DS		/* pt_regs->ss */
+	pushq	%r8			/* pt_regs->sp */
+	pushq	%r11			/* pt_regs->flags */
+	pushq	$__USER32_CS		/* pt_regs->cs */
+	pushq	%rcx			/* pt_regs->ip */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rbp			/* pt_regs->cx */
 	movl	%ebp,%ecx
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	/*
 	 * no need to do an access_ok check here because r8 has been
@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
 	ASM_CLAC
 	orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	CFI_REMEMBER_STATE
 	jnz   cstar_tracesys
+
 cstar_do_call:
 	/* 32bit syscall -> 64bit C ABI argument conversion */
 	movl	%edi,%r8d	/* arg5 */
@@ -403,15 +367,12 @@ ENTRY(ia32_cstar_target)
 	andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	RESTORE_RSI_RDI_RDX
 	movl RIP(%rsp),%ecx
-	CFI_REGISTER rip,rcx
 	movl EFLAGS(%rsp),%r11d
-	/*CFI_REGISTER rflags,r11*/
 	xorq	%r10,%r10
 	xorq	%r9,%r9
 	xorq	%r8,%r8
 	TRACE_IRQS_ON
 	movl RSP(%rsp),%esp
-	CFI_RESTORE rsp
 	/*
 	 * 64bit->32bit SYSRET restores eip from ecx,
 	 * eflags from r11 (but RF and VM bits are forced to 0),
@@ -430,7 +391,6 @@ ENTRY(ia32_cstar_target)
 
 #ifdef CONFIG_AUDITSYSCALL
 cstar_auditsys:
-	CFI_RESTORE_STATE
 	movl %r9d,R9(%rsp)	/* register to be clobbered by call */
 	auditsys_entry_common
 	movl R9(%rsp),%r9d	/* reload 6th syscall arg */
@@ -460,7 +420,6 @@ END(ia32_cstar_target)
 	ASM_CLAC
 	movq $-EFAULT,%rax
 	jmp ia32_sysret
-	CFI_ENDPROC
 
 /*
  * Emulated IA32 system calls via int 0x80.
@@ -484,15 +443,6 @@ END(ia32_cstar_target)
  */
 
 ENTRY(ia32_syscall)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,5*8
-	/*CFI_REL_OFFSET	ss,4*8 */
-	CFI_REL_OFFSET	rsp,3*8
-	/*CFI_REL_OFFSET	rflags,2*8 */
-	/*CFI_REL_OFFSET	cs,1*8 */
-	CFI_REL_OFFSET	rip,0*8
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
 	movl	%eax,%eax
 
 	/* Construct struct pt_regs on stack (iret frame is already on stack) */
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	cld
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -544,7 +493,6 @@ ENTRY(ia32_syscall)
 	LOAD_ARGS32	/* reload args from stack in case ptrace changed it */
 	RESTORE_EXTRA_REGS
 	jmp ia32_do_call
-	CFI_ENDPROC
 END(ia32_syscall)
 
 	.macro PTREGSCALL label, func
@@ -554,8 +502,6 @@ GLOBAL(\label)
 	jmp  ia32_ptregs_common	
 	.endm
 
-	CFI_STARTPROC32
-
 	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
 	PTREGSCALL stub32_sigreturn, sys32_sigreturn
 	PTREGSCALL stub32_fork, sys_fork
@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
 
 	ALIGN
 ia32_ptregs_common:
-	CFI_ENDPROC
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,SIZEOF_PTREGS
-	CFI_REL_OFFSET	rax,RAX
-	CFI_REL_OFFSET	rcx,RCX
-	CFI_REL_OFFSET	rdx,RDX
-	CFI_REL_OFFSET	rsi,RSI
-	CFI_REL_OFFSET	rdi,RDI
-	CFI_REL_OFFSET	rip,RIP
-/*	CFI_REL_OFFSET	cs,CS*/
-/*	CFI_REL_OFFSET	rflags,EFLAGS*/
-	CFI_REL_OFFSET	rsp,RSP
-/*	CFI_REL_OFFSET	ss,SS*/
 	SAVE_EXTRA_REGS 8
 	call *%rax
 	RESTORE_EXTRA_REGS 8
 	ret
-	CFI_ENDPROC
 END(ia32_ptregs_common)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 1c8b50edb2db..399fbcb18fae 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
 
 */
 
-#include <asm/dwarf2.h>
-
 #ifdef CONFIG_X86_64
 
 /*
@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
 	subq	$15*8+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET 15*8+\addskip
 	.endm
 
 	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
 	.if \r11
-	movq_cfi r11, 6*8+\offset
+	movq %r11, 6*8+\offset
 	.endif
 	.if \r8910
-	movq_cfi r10, 7*8+\offset
-	movq_cfi r9,  8*8+\offset
-	movq_cfi r8,  9*8+\offset
+	movq %r10, 7*8+\offset
+	movq %r9,  8*8+\offset
+	movq %r8,  9*8+\offset
 	.endif
 	.if \rax
-	movq_cfi rax, 10*8+\offset
+	movq %rax, 10*8+\offset
 	.endif
 	.if \rcx
-	movq_cfi rcx, 11*8+\offset
+	movq %rcx, 11*8+\offset
 	.endif
-	movq_cfi rdx, 12*8+\offset
-	movq_cfi rsi, 13*8+\offset
-	movq_cfi rdi, 14*8+\offset
+	movq %rdx, 12*8+\offset
+	movq %rsi, 13*8+\offset
+	movq %rdi, 14*8+\offset
 	.endm
 	.macro SAVE_C_REGS offset=0
 	SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
 	.endm
 
 	.macro SAVE_EXTRA_REGS offset=0
-	movq_cfi r15, 0*8+\offset
-	movq_cfi r14, 1*8+\offset
-	movq_cfi r13, 2*8+\offset
-	movq_cfi r12, 3*8+\offset
-	movq_cfi rbp, 4*8+\offset
-	movq_cfi rbx, 5*8+\offset
+	movq %r15, 0*8+\offset
+	movq %r14, 1*8+\offset
+	movq %r13, 2*8+\offset
+	movq %r12, 3*8+\offset
+	movq %rbp, 4*8+\offset
+	movq %rbx, 5*8+\offset
 	.endm
 	.macro SAVE_EXTRA_REGS_RBP offset=0
-	movq_cfi rbp, 4*8+\offset
+	movq %rbp, 4*8+\offset
 	.endm
 
 	.macro RESTORE_EXTRA_REGS offset=0
-	movq_cfi_restore 0*8+\offset, r15
-	movq_cfi_restore 1*8+\offset, r14
-	movq_cfi_restore 2*8+\offset, r13
-	movq_cfi_restore 3*8+\offset, r12
-	movq_cfi_restore 4*8+\offset, rbp
-	movq_cfi_restore 5*8+\offset, rbx
+	movq 0*8+\offset(%rsp), %r15
+	movq 1*8+\offset(%rsp), %r14
+	movq 2*8+\offset(%rsp), %r13
+	movq 3*8+\offset(%rsp), %r12
+	movq 4*8+\offset(%rsp), %rbp
+	movq 5*8+\offset(%rsp), %rbx
 	.endm
 
 	.macro ZERO_EXTRA_REGS
@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
 	.if \rstor_r11
-	movq_cfi_restore 6*8, r11
+	movq 6*8(%rsp), %r11
 	.endif
 	.if \rstor_r8910
-	movq_cfi_restore 7*8, r10
-	movq_cfi_restore 8*8, r9
-	movq_cfi_restore 9*8, r8
+	movq 7*8(%rsp), %r10
+	movq 8*8(%rsp), %r9
+	movq 9*8(%rsp), %r8
 	.endif
 	.if \rstor_rax
-	movq_cfi_restore 10*8, rax
+	movq 10*8(%rsp), %rax
 	.endif
 	.if \rstor_rcx
-	movq_cfi_restore 11*8, rcx
+	movq 11*8(%rsp), %rcx
 	.endif
 	.if \rstor_rdx
-	movq_cfi_restore 12*8, rdx
+	movq 12*8(%rsp), %rdx
 	.endif
-	movq_cfi_restore 13*8, rsi
-	movq_cfi_restore 14*8, rdi
+	movq 13*8(%rsp), %rsi
+	movq 14*8(%rsp), %rdi
 	.endm
 	.macro RESTORE_C_REGS
 	RESTORE_C_REGS_HELPER 1,1,1,1,1
@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
 	addq $15*8+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
 	.endm
 
 	.macro icebp
@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
  */
 
 	.macro SAVE_ALL
-	pushl_cfi_reg eax
-	pushl_cfi_reg ebp
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg edx
-	pushl_cfi_reg ecx
-	pushl_cfi_reg ebx
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	.endm
 
 	.macro RESTORE_ALL
-	popl_cfi_reg ebx
-	popl_cfi_reg ecx
-	popl_cfi_reg edx
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi_reg ebp
-	popl_cfi_reg eax
+	popl %ebx
+	popl %ecx
+	popl %edx
+	popl %esi
+	popl %edi
+	popl %ebp
+	popl %eax
 	.endm
 
 #endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644
index de1cdaf4d743..000000000000
--- a/arch/x86/include/asm/dwarf2.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef _ASM_X86_DWARF2_H
-#define _ASM_X86_DWARF2_H
-
-#ifndef __ASSEMBLY__
-#warning "asm/dwarf2.h should be only included in pure assembly files"
-#endif
-
-/*
- * Macros for dwarf2 CFI unwind table entries.
- * See "as.info" for details on these pseudo ops. Unfortunately
- * they are only supported in very new binutils, so define them
- * away for older version.
- */
-
-#ifdef CONFIG_AS_CFI
-
-#define CFI_STARTPROC		.cfi_startproc
-#define CFI_ENDPROC		.cfi_endproc
-#define CFI_DEF_CFA		.cfi_def_cfa
-#define CFI_DEF_CFA_REGISTER	.cfi_def_cfa_register
-#define CFI_DEF_CFA_OFFSET	.cfi_def_cfa_offset
-#define CFI_ADJUST_CFA_OFFSET	.cfi_adjust_cfa_offset
-#define CFI_OFFSET		.cfi_offset
-#define CFI_REL_OFFSET		.cfi_rel_offset
-#define CFI_REGISTER		.cfi_register
-#define CFI_RESTORE		.cfi_restore
-#define CFI_REMEMBER_STATE	.cfi_remember_state
-#define CFI_RESTORE_STATE	.cfi_restore_state
-#define CFI_UNDEFINED		.cfi_undefined
-#define CFI_ESCAPE		.cfi_escape
-
-#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
-#define CFI_SIGNAL_FRAME	.cfi_signal_frame
-#else
-#define CFI_SIGNAL_FRAME
-#endif
-
-#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
-	/*
-	 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
-	 * The latter we currently just discard since we don't do DWARF
-	 * unwinding at runtime.  So only the offline DWARF information is
-	 * useful to anyone.  Note we should not use this directive if this
-	 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
-	 * changed so it doesn't discard .eh_frame.
-	 */
-	.cfi_sections .debug_frame
-#endif
-
-#else
-
-/*
- * Due to the structure of pre-exisiting code, don't use assembler line
- * comment character # to ignore the arguments. Instead, use a dummy macro.
- */
-.macro cfi_ignore a=0, b=0, c=0, d=0
-.endm
-
-#define CFI_STARTPROC		cfi_ignore
-#define CFI_ENDPROC		cfi_ignore
-#define CFI_DEF_CFA		cfi_ignore
-#define CFI_DEF_CFA_REGISTER	cfi_ignore
-#define CFI_DEF_CFA_OFFSET	cfi_ignore
-#define CFI_ADJUST_CFA_OFFSET	cfi_ignore
-#define CFI_OFFSET		cfi_ignore
-#define CFI_REL_OFFSET		cfi_ignore
-#define CFI_REGISTER		cfi_ignore
-#define CFI_RESTORE		cfi_ignore
-#define CFI_REMEMBER_STATE	cfi_ignore
-#define CFI_RESTORE_STATE	cfi_ignore
-#define CFI_UNDEFINED		cfi_ignore
-#define CFI_ESCAPE		cfi_ignore
-#define CFI_SIGNAL_FRAME	cfi_ignore
-
-#endif
-
-/*
- * An attempt to make CFI annotations more or less
- * correct and shorter. It is implied that you know
- * what you're doing if you use them.
- */
-#ifdef __ASSEMBLY__
-#ifdef CONFIG_X86_64
-	.macro pushq_cfi reg
-	pushq \reg
-	CFI_ADJUST_CFA_OFFSET 8
-	.endm
-
-	.macro pushq_cfi_reg reg
-	pushq %\reg
-	CFI_ADJUST_CFA_OFFSET 8
-	CFI_REL_OFFSET \reg, 0
-	.endm
-
-	.macro popq_cfi reg
-	popq \reg
-	CFI_ADJUST_CFA_OFFSET -8
-	.endm
-
-	.macro popq_cfi_reg reg
-	popq %\reg
-	CFI_ADJUST_CFA_OFFSET -8
-	CFI_RESTORE \reg
-	.endm
-
-	.macro pushfq_cfi
-	pushfq
-	CFI_ADJUST_CFA_OFFSET 8
-	.endm
-
-	.macro popfq_cfi
-	popfq
-	CFI_ADJUST_CFA_OFFSET -8
-	.endm
-
-	.macro movq_cfi reg offset=0
-	movq %\reg, \offset(%rsp)
-	CFI_REL_OFFSET \reg, \offset
-	.endm
-
-	.macro movq_cfi_restore offset reg
-	movq \offset(%rsp), %\reg
-	CFI_RESTORE \reg
-	.endm
-#else /*!CONFIG_X86_64*/
-	.macro pushl_cfi reg
-	pushl \reg
-	CFI_ADJUST_CFA_OFFSET 4
-	.endm
-
-	.macro pushl_cfi_reg reg
-	pushl %\reg
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET \reg, 0
-	.endm
-
-	.macro popl_cfi reg
-	popl \reg
-	CFI_ADJUST_CFA_OFFSET -4
-	.endm
-
-	.macro popl_cfi_reg reg
-	popl %\reg
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE \reg
-	.endm
-
-	.macro pushfl_cfi
-	pushfl
-	CFI_ADJUST_CFA_OFFSET 4
-	.endm
-
-	.macro popfl_cfi
-	popfl
-	CFI_ADJUST_CFA_OFFSET -4
-	.endm
-
-	.macro movl_cfi reg offset=0
-	movl %\reg, \offset(%esp)
-	CFI_REL_OFFSET \reg, \offset
-	.endm
-
-	.macro movl_cfi_restore offset reg
-	movl \offset(%esp), %\reg
-	CFI_RESTORE \reg
-	.endm
-#endif /*!CONFIG_X86_64*/
-#endif /*__ASSEMBLY__*/
-
-#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f47eb65..793179cf8e21 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,20 +1,17 @@
 #ifdef __ASSEMBLY__
 
 #include <asm/asm.h>
-#include <asm/dwarf2.h>
 
 /* The annotation hides the frame from the unwinder and makes it look
    like a ordinary ebp save/restore. This avoids some special cases for
    frame pointer later */
 #ifdef CONFIG_FRAME_POINTER
 	.macro FRAME
-	__ASM_SIZE(push,_cfi)	%__ASM_REG(bp)
-	CFI_REL_OFFSET		__ASM_REG(bp), 0
+	__ASM_SIZE(push,)	%__ASM_REG(bp)
 	__ASM_SIZE(mov)		%__ASM_REG(sp), %__ASM_REG(bp)
 	.endm
 	.macro ENDFRAME
-	__ASM_SIZE(pop,_cfi)	%__ASM_REG(bp)
-	CFI_RESTORE		__ASM_REG(bp)
+	__ASM_SIZE(pop,)	%__ASM_REG(bp)
 	.endm
 #else
 	.macro FRAME
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1c309763e321..0ac73de925d1 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -50,7 +50,6 @@
 #include <asm/smp.h>
 #include <asm/page_types.h>
 #include <asm/percpu.h>
-#include <asm/dwarf2.h>
 #include <asm/processor-flags.h>
 #include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
@@ -113,11 +112,10 @@
 
  /* unfortunately push/pop can't be no-op */
 .macro PUSH_GS
-	pushl_cfi $0
+	pushl $0
 .endm
 .macro POP_GS pop=0
 	addl $(4 + \pop), %esp
-	CFI_ADJUST_CFA_OFFSET -(4 + \pop)
 .endm
 .macro POP_GS_EX
 .endm
@@ -137,16 +135,13 @@
 #else	/* CONFIG_X86_32_LAZY_GS */
 
 .macro PUSH_GS
-	pushl_cfi %gs
-	/*CFI_REL_OFFSET gs, 0*/
+	pushl %gs
 .endm
 
 .macro POP_GS pop=0
-98:	popl_cfi %gs
-	/*CFI_RESTORE gs*/
+98:	popl %gs
   .if \pop <> 0
 	add $\pop, %esp
-	CFI_ADJUST_CFA_OFFSET -\pop
   .endif
 .endm
 .macro POP_GS_EX
@@ -170,11 +165,9 @@
 
 .macro GS_TO_REG reg
 	movl %gs, \reg
-	/*CFI_REGISTER gs, \reg*/
 .endm
 .macro REG_TO_PTGS reg
 	movl \reg, PT_GS(%esp)
-	/*CFI_REL_OFFSET gs, PT_GS*/
 .endm
 .macro SET_KERNEL_GS reg
 	movl $(__KERNEL_STACK_CANARY), \reg
@@ -186,26 +179,16 @@
 .macro SAVE_ALL
 	cld
 	PUSH_GS
-	pushl_cfi %fs
-	/*CFI_REL_OFFSET fs, 0;*/
-	pushl_cfi %es
-	/*CFI_REL_OFFSET es, 0;*/
-	pushl_cfi %ds
-	/*CFI_REL_OFFSET ds, 0;*/
-	pushl_cfi %eax
-	CFI_REL_OFFSET eax, 0
-	pushl_cfi %ebp
-	CFI_REL_OFFSET ebp, 0
-	pushl_cfi %edi
-	CFI_REL_OFFSET edi, 0
-	pushl_cfi %esi
-	CFI_REL_OFFSET esi, 0
-	pushl_cfi %edx
-	CFI_REL_OFFSET edx, 0
-	pushl_cfi %ecx
-	CFI_REL_OFFSET ecx, 0
-	pushl_cfi %ebx
-	CFI_REL_OFFSET ebx, 0
+	pushl %fs
+	pushl %es
+	pushl %ds
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	movl $(__USER_DS), %edx
 	movl %edx, %ds
 	movl %edx, %es
@@ -215,30 +198,20 @@
 .endm
 
 .macro RESTORE_INT_REGS
-	popl_cfi %ebx
-	CFI_RESTORE ebx
-	popl_cfi %ecx
-	CFI_RESTORE ecx
-	popl_cfi %edx
-	CFI_RESTORE edx
-	popl_cfi %esi
-	CFI_RESTORE esi
-	popl_cfi %edi
-	CFI_RESTORE edi
-	popl_cfi %ebp
-	CFI_RESTORE ebp
-	popl_cfi %eax
-	CFI_RESTORE eax
+	popl %ebx
+	popl %ecx
+	popl %edx
+	popl %esi
+	popl %edi
+	popl %ebp
+	popl %eax
 .endm
 
 .macro RESTORE_REGS pop=0
 	RESTORE_INT_REGS
-1:	popl_cfi %ds
-	/*CFI_RESTORE ds;*/
-2:	popl_cfi %es
-	/*CFI_RESTORE es;*/
-3:	popl_cfi %fs
-	/*CFI_RESTORE fs;*/
+1:	popl %ds
+2:	popl %es
+3:	popl %fs
 	POP_GS \pop
 .pushsection .fixup, "ax"
 4:	movl $0, (%esp)
@@ -254,64 +227,27 @@
 	POP_GS_EX
 .endm
 
-.macro RING0_INT_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 3*4
-	/*CFI_OFFSET cs, -2*4;*/
-	CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_EC_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 4*4
-	/*CFI_OFFSET cs, -2*4;*/
-	CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_PTREGS_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
-	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
-	CFI_OFFSET eip, PT_EIP-PT_OLDESP
-	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
-	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
-	CFI_OFFSET eax, PT_EAX-PT_OLDESP
-	CFI_OFFSET ebp, PT_EBP-PT_OLDESP
-	CFI_OFFSET edi, PT_EDI-PT_OLDESP
-	CFI_OFFSET esi, PT_ESI-PT_OLDESP
-	CFI_OFFSET edx, PT_EDX-PT_OLDESP
-	CFI_OFFSET ecx, PT_ECX-PT_OLDESP
-	CFI_OFFSET ebx, PT_EBX-PT_OLDESP
-.endm
-
 ENTRY(ret_from_fork)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	call schedule_tail
 	GET_THREAD_INFO(%ebp)
-	popl_cfi %eax
-	pushl_cfi $0x0202		# Reset kernel eflags
-	popfl_cfi
+	popl %eax
+	pushl $0x0202		# Reset kernel eflags
+	popfl
 	jmp syscall_exit
-	CFI_ENDPROC
 END(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	call schedule_tail
 	GET_THREAD_INFO(%ebp)
-	popl_cfi %eax
-	pushl_cfi $0x0202		# Reset kernel eflags
-	popfl_cfi
+	popl %eax
+	pushl $0x0202		# Reset kernel eflags
+	popfl
 	movl PT_EBP(%esp),%eax
 	call *PT_EBX(%esp)
 	movl $0,PT_EAX(%esp)
 	jmp syscall_exit
-	CFI_ENDPROC
 ENDPROC(ret_from_kernel_thread)
 
 /*
@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread)
 
 	# userspace resumption stub bypassing syscall exit tracing
 	ALIGN
-	RING0_PTREGS_FRAME
 ret_from_exception:
 	preempt_stop(CLBR_ANY)
 ret_from_intr:
@@ -367,17 +302,12 @@ ENTRY(resume_kernel)
 	jmp need_resched
 END(resume_kernel)
 #endif
-	CFI_ENDPROC
 
 /* SYSENTER_RETURN points to after the "sysenter" instruction in
    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
 
 	# sysenter call handler stub
 ENTRY(ia32_sysenter_target)
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 0
-	CFI_REGISTER esp, ebp
 	movl TSS_sysenter_sp0(%esp),%esp
 sysenter_past_esp:
 	/*
@@ -385,14 +315,11 @@ ENTRY(ia32_sysenter_target)
 	 * enough kernel state to call TRACE_IRQS_OFF can be called - but
 	 * we immediately enable interrupts at that point anyway.
 	 */
-	pushl_cfi $__USER_DS
-	/*CFI_REL_OFFSET ss, 0*/
-	pushl_cfi %ebp
-	CFI_REL_OFFSET esp, 0
-	pushfl_cfi
+	pushl $__USER_DS
+	pushl %ebp
+	pushfl
 	orl $X86_EFLAGS_IF, (%esp)
-	pushl_cfi $__USER_CS
-	/*CFI_REL_OFFSET cs, 0*/
+	pushl $__USER_CS
 	/*
 	 * Push current_thread_info()->sysenter_return to the stack.
 	 * A tiny bit of offset fixup is necessary: TI_sysenter_return
@@ -401,10 +328,9 @@ ENTRY(ia32_sysenter_target)
 	 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
 	 * and THREAD_SIZE takes us to the bottom.
 	 */
-	pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
-	CFI_REL_OFFSET eip, 0
+	pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
 
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -453,11 +379,11 @@ ENTRY(ia32_sysenter_target)
 	/* movl PT_EAX(%esp), %eax	already set, syscall number: 1st arg to audit */
 	movl PT_EBX(%esp), %edx		/* ebx/a0: 2nd arg to audit */
 	/* movl PT_ECX(%esp), %ecx	already set, a1: 3nd arg to audit */
-	pushl_cfi PT_ESI(%esp)		/* a3: 5th arg */
-	pushl_cfi PT_EDX+4(%esp)	/* a2: 4th arg */
+	pushl PT_ESI(%esp)		/* a3: 5th arg */
+	pushl PT_EDX+4(%esp)	/* a2: 4th arg */
 	call __audit_syscall_entry
-	popl_cfi %ecx /* get that remapped edx off the stack */
-	popl_cfi %ecx /* get that remapped esi off the stack */
+	popl %ecx /* get that remapped edx off the stack */
+	popl %ecx /* get that remapped esi off the stack */
 	movl PT_EAX(%esp),%eax		/* reload syscall number */
 	jmp sysenter_do_call
 
@@ -480,7 +406,6 @@ ENTRY(ia32_sysenter_target)
 	jmp sysenter_exit
 #endif
 
-	CFI_ENDPROC
 .pushsection .fixup,"ax"
 2:	movl $0,PT_FS(%esp)
 	jmp 1b
@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target)
 
 	# system call handler stub
 ENTRY(system_call)
-	RING0_INT_FRAME			# can't unwind into user space anyway
 	ASM_CLAC
-	pushl_cfi %eax			# save orig_eax
+	pushl %eax			# save orig_eax
 	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
 					# system call tracing in operation / emulation
@@ -527,7 +451,6 @@ ENTRY(system_call)
 	movb PT_CS(%esp), %al
 	andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
-	CFI_REMEMBER_STATE
 	je ldt_ss			# returning to user-space with LDT SS
 #endif
 restore_nocheck:
@@ -543,7 +466,6 @@ ENTRY(iret_exc)
 	_ASM_EXTABLE(irq_return,iret_exc)
 
 #ifdef CONFIG_X86_ESPFIX32
-	CFI_RESTORE_STATE
 ldt_ss:
 #ifdef CONFIG_PARAVIRT
 	/*
@@ -577,22 +499,19 @@ ENTRY(iret_exc)
 	shr $16, %edx
 	mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
 	mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-	pushl_cfi $__ESPFIX_SS
-	pushl_cfi %eax			/* new kernel esp */
+	pushl $__ESPFIX_SS
+	pushl %eax			/* new kernel esp */
 	/* Disable interrupts, but do not irqtrace this section: we
 	 * will soon execute iret and the tracer was already set to
 	 * the irqstate after the iret */
 	DISABLE_INTERRUPTS(CLBR_EAX)
 	lss (%esp), %esp		/* switch to espfix segment */
-	CFI_ADJUST_CFA_OFFSET -8
 	jmp restore_nocheck
 #endif
-	CFI_ENDPROC
 ENDPROC(system_call)
 
 	# perform work that needs to be done immediately before resumption
 	ALIGN
-	RING0_PTREGS_FRAME		# can't unwind into user space anyway
 work_pending:
 	testb $_TIF_NEED_RESCHED, %cl
 	jz work_notifysig
@@ -634,9 +553,9 @@ work_notifysig:				# deal with pending signals and
 #ifdef CONFIG_VM86
 	ALIGN
 work_notifysig_v86:
-	pushl_cfi %ecx			# save ti_flags for do_notify_resume
+	pushl %ecx			# save ti_flags for do_notify_resume
 	call save_v86_state		# %eax contains pt_regs pointer
-	popl_cfi %ecx
+	popl %ecx
 	movl %eax, %esp
 	jmp 1b
 #endif
@@ -666,9 +585,7 @@ END(syscall_trace_entry)
 	call syscall_trace_leave
 	jmp resume_userspace
 END(syscall_exit_work)
-	CFI_ENDPROC
 
-	RING0_INT_FRAME			# can't unwind into user space anyway
 syscall_fault:
 	ASM_CLAC
 	GET_THREAD_INFO(%ebp)
@@ -685,7 +602,6 @@ END(syscall_badsys)
 	movl $-ENOSYS,%eax
 	jmp sysenter_after_call
 END(sysenter_badsys)
-	CFI_ENDPROC
 
 .macro FIXUP_ESPFIX_STACK
 /*
@@ -701,10 +617,9 @@ END(sysenter_badsys)
 	mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
 	shl $16, %eax
 	addl %esp, %eax			/* the adjusted stack pointer */
-	pushl_cfi $__KERNEL_DS
-	pushl_cfi %eax
+	pushl $__KERNEL_DS
+	pushl %eax
 	lss (%esp), %esp		/* switch to the normal stack segment */
-	CFI_ADJUST_CFA_OFFSET -8
 #endif
 .endm
 .macro UNWIND_ESPFIX_STACK
@@ -728,13 +643,11 @@ END(sysenter_badsys)
  */
 	.align 8
 ENTRY(irq_entries_start)
-	RING0_INT_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-	pushl_cfi $(~vector+0x80)	/* Note: always in signed byte range */
+	pushl $(~vector+0x80)	/* Note: always in signed byte range */
     vector=vector+1
 	jmp	common_interrupt
-	CFI_ADJUST_CFA_OFFSET -4
 	.align	8
     .endr
 END(irq_entries_start)
@@ -753,19 +666,16 @@ END(irq_entries_start)
 	call do_IRQ
 	jmp ret_from_intr
 ENDPROC(common_interrupt)
-	CFI_ENDPROC
 
 #define BUILD_INTERRUPT3(name, nr, fn)	\
 ENTRY(name)				\
-	RING0_INT_FRAME;		\
 	ASM_CLAC;			\
-	pushl_cfi $~(nr);		\
+	pushl $~(nr);		\
 	SAVE_ALL;			\
 	TRACE_IRQS_OFF			\
 	movl %esp,%eax;			\
 	call fn;			\
 	jmp ret_from_intr;		\
-	CFI_ENDPROC;			\
 ENDPROC(name)
 
 
@@ -784,37 +694,31 @@ ENDPROC(name)
 #include <asm/entry_arch.h>
 
 ENTRY(coprocessor_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_coprocessor_error
+	pushl $0
+	pushl $do_coprocessor_error
 	jmp error_code
-	CFI_ENDPROC
 END(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
+	pushl $0
 #ifdef CONFIG_X86_INVD_BUG
 	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-	ALTERNATIVE "pushl_cfi $do_general_protection",	\
+	ALTERNATIVE "pushl $do_general_protection",	\
 		    "pushl $do_simd_coprocessor_error", \
 		    X86_FEATURE_XMM
 #else
-	pushl_cfi $do_simd_coprocessor_error
+	pushl $do_simd_coprocessor_error
 #endif
 	jmp error_code
-	CFI_ENDPROC
 END(simd_coprocessor_error)
 
 ENTRY(device_not_available)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $-1			# mark this as an int
-	pushl_cfi $do_device_not_available
+	pushl $-1			# mark this as an int
+	pushl $do_device_not_available
 	jmp error_code
-	CFI_ENDPROC
 END(device_not_available)
 
 #ifdef CONFIG_PARAVIRT
@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit)
 #endif
 
 ENTRY(overflow)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_overflow
+	pushl $0
+	pushl $do_overflow
 	jmp error_code
-	CFI_ENDPROC
 END(overflow)
 
 ENTRY(bounds)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_bounds
+	pushl $0
+	pushl $do_bounds
 	jmp error_code
-	CFI_ENDPROC
 END(bounds)
 
 ENTRY(invalid_op)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_invalid_op
+	pushl $0
+	pushl $do_invalid_op
 	jmp error_code
-	CFI_ENDPROC
 END(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_coprocessor_segment_overrun
+	pushl $0
+	pushl $do_coprocessor_segment_overrun
 	jmp error_code
-	CFI_ENDPROC
 END(coprocessor_segment_overrun)
 
 ENTRY(invalid_TSS)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_invalid_TSS
+	pushl $do_invalid_TSS
 	jmp error_code
-	CFI_ENDPROC
 END(invalid_TSS)
 
 ENTRY(segment_not_present)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_segment_not_present
+	pushl $do_segment_not_present
 	jmp error_code
-	CFI_ENDPROC
 END(segment_not_present)
 
 ENTRY(stack_segment)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_stack_segment
+	pushl $do_stack_segment
 	jmp error_code
-	CFI_ENDPROC
 END(stack_segment)
 
 ENTRY(alignment_check)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_alignment_check
+	pushl $do_alignment_check
 	jmp error_code
-	CFI_ENDPROC
 END(alignment_check)
 
 ENTRY(divide_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0			# no error code
-	pushl_cfi $do_divide_error
+	pushl $0			# no error code
+	pushl $do_divide_error
 	jmp error_code
-	CFI_ENDPROC
 END(divide_error)
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi machine_check_vector
+	pushl $0
+	pushl machine_check_vector
 	jmp error_code
-	CFI_ENDPROC
 END(machine_check)
 #endif
 
 ENTRY(spurious_interrupt_bug)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_spurious_interrupt_bug
+	pushl $0
+	pushl $do_spurious_interrupt_bug
 	jmp error_code
-	CFI_ENDPROC
 END(spurious_interrupt_bug)
 
 #ifdef CONFIG_XEN
 /* Xen doesn't set %esp to be precisely what the normal sysenter
    entrypoint expects, so fix it up before using the normal path. */
 ENTRY(xen_sysenter_target)
-	RING0_INT_FRAME
 	addl $5*4, %esp		/* remove xen-provided frame */
-	CFI_ADJUST_CFA_OFFSET -5*4
 	jmp sysenter_past_esp
-	CFI_ENDPROC
 
 ENTRY(xen_hypervisor_callback)
-	CFI_STARTPROC
-	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+	pushl $-1 /* orig_ax = -1 => not a system call */
 	SAVE_ALL
 	TRACE_IRQS_OFF
 
@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall)
 	call xen_maybe_preempt_hcall
 #endif
 	jmp  ret_from_intr
-	CFI_ENDPROC
 ENDPROC(xen_hypervisor_callback)
 
 # Hypervisor uses this for application faults while it executes.
@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback)
 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 # We distinguish between categories by maintaining a status value in EAX.
 ENTRY(xen_failsafe_callback)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	movl $1,%eax
 1:	mov 4(%esp),%ds
 2:	mov 8(%esp),%es
@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback)
 	/* EAX == 0 => Category 1 (Bad segment)
 	   EAX != 0 => Category 2 (Bad IRET) */
 	testl %eax,%eax
-	popl_cfi %eax
+	popl %eax
 	lea 16(%esp),%esp
-	CFI_ADJUST_CFA_OFFSET -16
 	jz 5f
 	jmp iret_exc
-5:	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+5:	pushl $-1 /* orig_ax = -1 => not a system call */
 	SAVE_ALL
 	jmp ret_from_exception
-	CFI_ENDPROC
 
 .section .fixup,"ax"
 6:	xorl %eax,%eax
@@ -1195,34 +1069,28 @@ END(ftrace_graph_caller)
 
 #ifdef CONFIG_TRACING
 ENTRY(trace_page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $trace_do_page_fault
+	pushl $trace_do_page_fault
 	jmp error_code
-	CFI_ENDPROC
 END(trace_page_fault)
 #endif
 
 ENTRY(page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_page_fault
+	pushl $do_page_fault
 	ALIGN
 error_code:
 	/* the function address is in %gs's slot on the stack */
-	pushl_cfi %fs
-	/*CFI_REL_OFFSET fs, 0*/
-	pushl_cfi %es
-	/*CFI_REL_OFFSET es, 0*/
-	pushl_cfi %ds
-	/*CFI_REL_OFFSET ds, 0*/
-	pushl_cfi_reg eax
-	pushl_cfi_reg ebp
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg edx
-	pushl_cfi_reg ecx
-	pushl_cfi_reg ebx
+	pushl %fs
+	pushl %es
+	pushl %ds
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	cld
 	movl $(__KERNEL_PERCPU), %ecx
 	movl %ecx, %fs
@@ -1240,7 +1108,6 @@ ENTRY(page_fault)
 	movl %esp,%eax			# pt_regs pointer
 	call *%edi
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(page_fault)
 
 /*
@@ -1261,29 +1128,24 @@ END(page_fault)
 	jne \ok
 \label:
 	movl TSS_sysenter_sp0 + \offset(%esp), %esp
-	CFI_DEF_CFA esp, 0
-	CFI_UNDEFINED eip
-	pushfl_cfi
-	pushl_cfi $__KERNEL_CS
-	pushl_cfi $sysenter_past_esp
-	CFI_REL_OFFSET eip, 0
+	pushfl
+	pushl $__KERNEL_CS
+	pushl $sysenter_past_esp
 .endm
 
 ENTRY(debug)
-	RING0_INT_FRAME
 	ASM_CLAC
 	cmpl $ia32_sysenter_target,(%esp)
 	jne debug_stack_correct
 	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
-	pushl_cfi $-1			# mark this as an int
+	pushl $-1			# mark this as an int
 	SAVE_ALL
 	TRACE_IRQS_OFF
 	xorl %edx,%edx			# error code 0
 	movl %esp,%eax			# pt_regs pointer
 	call do_debug
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(debug)
 
 /*
@@ -1295,45 +1157,40 @@ END(debug)
  * fault happened on the sysenter path.
  */
 ENTRY(nmi)
-	RING0_INT_FRAME
 	ASM_CLAC
 #ifdef CONFIG_X86_ESPFIX32
-	pushl_cfi %eax
+	pushl %eax
 	movl %ss, %eax
 	cmpw $__ESPFIX_SS, %ax
-	popl_cfi %eax
+	popl %eax
 	je nmi_espfix_stack
 #endif
 	cmpl $ia32_sysenter_target,(%esp)
 	je nmi_stack_fixup
-	pushl_cfi %eax
+	pushl %eax
 	movl %esp,%eax
 	/* Do not access memory above the end of our stack page,
 	 * it might not exist.
 	 */
 	andl $(THREAD_SIZE-1),%eax
 	cmpl $(THREAD_SIZE-20),%eax
-	popl_cfi %eax
+	popl %eax
 	jae nmi_stack_correct
 	cmpl $ia32_sysenter_target,12(%esp)
 	je nmi_debug_stack_check
 nmi_stack_correct:
-	/* We have a RING0_INT_FRAME here */
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_nmi
 	jmp restore_all_notrace
-	CFI_ENDPROC
 
 nmi_stack_fixup:
-	RING0_INT_FRAME
 	FIX_STACK 12, nmi_stack_correct, 1
 	jmp nmi_stack_correct
 
 nmi_debug_stack_check:
-	/* We have a RING0_INT_FRAME here */
 	cmpw $__KERNEL_CS,16(%esp)
 	jne nmi_stack_correct
 	cmpl $debug,(%esp)
@@ -1345,57 +1202,48 @@ ENTRY(nmi)
 
 #ifdef CONFIG_X86_ESPFIX32
 nmi_espfix_stack:
-	/* We have a RING0_INT_FRAME here.
-	 *
+	/*
 	 * create the pointer to lss back
 	 */
-	pushl_cfi %ss
-	pushl_cfi %esp
+	pushl %ss
+	pushl %esp
 	addl $4, (%esp)
 	/* copy the iret frame of 12 bytes */
 	.rept 3
-	pushl_cfi 16(%esp)
+	pushl 16(%esp)
 	.endr
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	FIXUP_ESPFIX_STACK		# %eax == %esp
 	xorl %edx,%edx			# zero error code
 	call do_nmi
 	RESTORE_REGS
 	lss 12+4(%esp), %esp		# back to espfix stack
-	CFI_ADJUST_CFA_OFFSET -24
 	jmp irq_return
 #endif
-	CFI_ENDPROC
 END(nmi)
 
 ENTRY(int3)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $-1			# mark this as an int
+	pushl $-1			# mark this as an int
 	SAVE_ALL
 	TRACE_IRQS_OFF
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_int3
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(int3)
 
 ENTRY(general_protection)
-	RING0_EC_FRAME
-	pushl_cfi $do_general_protection
+	pushl $do_general_protection
 	jmp error_code
-	CFI_ENDPROC
 END(general_protection)
 
 #ifdef CONFIG_KVM_GUEST
 ENTRY(async_page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_async_page_fault
+	pushl $do_async_page_fault
 	jmp error_code
-	CFI_ENDPROC
 END(async_page_fault)
 #endif
 
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0395a59f67c4..c21b4356aa8b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -19,8 +19,6 @@
  * at the top of the kernel process stack.
  *
  * Some macro usage:
- * - CFI macros are used to generate dwarf2 unwind information for better
- * backtraces. They don't change any code.
  * - ENTRY/END Define functions in the symbol table.
  * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  * - idtentry - Define exception entry points.
@@ -30,7 +28,6 @@
 #include <asm/segment.h>
 #include <asm/cache.h>
 #include <asm/errno.h>
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm-offsets.h>
 #include <asm/msr.h>
@@ -113,61 +110,6 @@ ENDPROC(native_usergs_sysret64)
 #endif
 
 /*
- * empty frame
- */
-	.macro EMPTY_FRAME start=1 offset=0
-	.if \start
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA rsp,8+\offset
-	.else
-	CFI_DEF_CFA_OFFSET 8+\offset
-	.endif
-	.endm
-
-/*
- * initial frame state for interrupts (and exceptions without error code)
- */
-	.macro INTR_FRAME start=1 offset=0
-	EMPTY_FRAME \start, 5*8+\offset
-	/*CFI_REL_OFFSET ss, 4*8+\offset*/
-	CFI_REL_OFFSET rsp, 3*8+\offset
-	/*CFI_REL_OFFSET rflags, 2*8+\offset*/
-	/*CFI_REL_OFFSET cs, 1*8+\offset*/
-	CFI_REL_OFFSET rip, 0*8+\offset
-	.endm
-
-/*
- * initial frame state for exceptions with error code (and interrupts
- * with vector already pushed)
- */
-	.macro XCPT_FRAME start=1 offset=0
-	INTR_FRAME \start, 1*8+\offset
-	.endm
-
-/*
- * frame that enables passing a complete pt_regs to a C function.
- */
-	.macro DEFAULT_FRAME start=1 offset=0
-	XCPT_FRAME \start, ORIG_RAX+\offset
-	CFI_REL_OFFSET rdi, RDI+\offset
-	CFI_REL_OFFSET rsi, RSI+\offset
-	CFI_REL_OFFSET rdx, RDX+\offset
-	CFI_REL_OFFSET rcx, RCX+\offset
-	CFI_REL_OFFSET rax, RAX+\offset
-	CFI_REL_OFFSET r8, R8+\offset
-	CFI_REL_OFFSET r9, R9+\offset
-	CFI_REL_OFFSET r10, R10+\offset
-	CFI_REL_OFFSET r11, R11+\offset
-	CFI_REL_OFFSET rbx, RBX+\offset
-	CFI_REL_OFFSET rbp, RBP+\offset
-	CFI_REL_OFFSET r12, R12+\offset
-	CFI_REL_OFFSET r13, R13+\offset
-	CFI_REL_OFFSET r14, R14+\offset
-	CFI_REL_OFFSET r15, R15+\offset
-	.endm
-
-/*
  * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
  *
  * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64)
  */
 
 ENTRY(system_call)
-	CFI_STARTPROC	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rip,rcx
-	/*CFI_REGISTER	rflags,r11*/
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs)
 	movq	PER_CPU_VAR(cpu_current_top_of_stack),%rsp
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi $__USER_DS			/* pt_regs->ss */
-	pushq_cfi PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
+	pushq $__USER_DS			/* pt_regs->ss */
+	pushq PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
 	/*
 	 * Re-enable interrupts.
 	 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs)
 	 * with using rsp_scratch:
 	 */
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi	%r11			/* pt_regs->flags */
-	pushq_cfi	$__USER_CS		/* pt_regs->cs */
-	pushq_cfi	%rcx			/* pt_regs->ip */
-	CFI_REL_OFFSET rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
-	pushq_cfi_reg	r8			/* pt_regs->r8 */
-	pushq_cfi_reg	r9			/* pt_regs->r9 */
-	pushq_cfi_reg	r10			/* pt_regs->r10 */
-	pushq_cfi_reg	r11			/* pt_regs->r11 */
+	pushq	%r11			/* pt_regs->flags */
+	pushq	$__USER_CS		/* pt_regs->cs */
+	pushq	%rcx			/* pt_regs->ip */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
+	pushq	%r8			/* pt_regs->r8 */
+	pushq	%r9			/* pt_regs->r9 */
+	pushq	%r10			/* pt_regs->r10 */
+	pushq	%r11			/* pt_regs->r11 */
 	sub	$(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 6*8
 
 	testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 	jnz tracesys
@@ -282,13 +216,9 @@ GLOBAL(system_call_after_swapgs)
 	testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 	jnz int_ret_from_sys_call_irqs_off	/* Go to the slow path */
 
-	CFI_REMEMBER_STATE
-
 	RESTORE_C_REGS_EXCEPT_RCX_R11
 	movq	RIP(%rsp),%rcx
-	CFI_REGISTER	rip,rcx
 	movq	EFLAGS(%rsp),%r11
-	/*CFI_REGISTER	rflags,r11*/
 	movq	RSP(%rsp),%rsp
 	/*
 	 * 64bit SYSRET restores rip from rcx,
@@ -307,8 +237,6 @@ GLOBAL(system_call_after_swapgs)
 	 */
 	USERGS_SYSRET64
 
-	CFI_RESTORE_STATE
-
 	/* Do syscall entry tracing */
 tracesys:
 	movq %rsp, %rdi
@@ -374,9 +302,9 @@ GLOBAL(int_with_check)
 	jnc  int_very_careful
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi %rdi
+	pushq %rdi
 	SCHEDULE_USER
-	popq_cfi %rdi
+	popq %rdi
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp int_with_check
@@ -389,10 +317,10 @@ GLOBAL(int_with_check)
 	/* Check for syscall exit trace */
 	testl $_TIF_WORK_SYSCALL_EXIT,%edx
 	jz int_signal
-	pushq_cfi %rdi
+	pushq %rdi
 	leaq 8(%rsp),%rdi	# &ptregs -> arg1
 	call syscall_trace_leave
-	popq_cfi %rdi
+	popq %rdi
 	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
 	jmp int_restore_rest
 
@@ -475,27 +403,21 @@ GLOBAL(int_with_check)
 	 * perf profiles.  Nothing jumps here.
 	 */
 syscall_return_via_sysret:
-	CFI_REMEMBER_STATE
 	/* rcx and r11 are already restored (see code above) */
 	RESTORE_C_REGS_EXCEPT_RCX_R11
 	movq RSP(%rsp),%rsp
 	USERGS_SYSRET64
-	CFI_RESTORE_STATE
 
 opportunistic_sysret_failed:
 	SWAPGS
 	jmp	restore_c_regs_and_iret
-	CFI_ENDPROC
 END(system_call)
 
 
 	.macro FORK_LIKE func
 ENTRY(stub_\func)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8		/* offset 8: return address */
 	SAVE_EXTRA_REGS 8
 	jmp sys_\func
-	CFI_ENDPROC
 END(stub_\func)
 	.endm
 
@@ -504,8 +426,6 @@ END(stub_\func)
 	FORK_LIKE  vfork
 
 ENTRY(stub_execve)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	sys_execve
 return_from_execve:
 	testl	%eax, %eax
@@ -515,11 +435,9 @@ ENTRY(stub_execve)
 1:
 	/* must use IRET code path (pt_regs->cs may have changed) */
 	addq	$8, %rsp
-	CFI_ADJUST_CFA_OFFSET -8
 	ZERO_EXTRA_REGS
 	movq	%rax,RAX(%rsp)
 	jmp	int_ret_from_sys_call
-	CFI_ENDPROC
 END(stub_execve)
 /*
  * Remaining execve stubs are only 7 bytes long.
@@ -527,32 +445,23 @@ END(stub_execve)
  */
 	.align	8
 GLOBAL(stub_execveat)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	sys_execveat
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub_execveat)
 
 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
 	.align	8
 GLOBAL(stub_x32_execve)
 GLOBAL(stub32_execve)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	compat_sys_execve
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub32_execve)
 END(stub_x32_execve)
 	.align	8
 GLOBAL(stub_x32_execveat)
 GLOBAL(stub32_execveat)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	compat_sys_execveat
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub32_execveat)
 END(stub_x32_execveat)
 #endif
@@ -562,8 +471,6 @@ END(stub_x32_execveat)
  * This cannot be done with SYSRET, so use the IRET return path instead.
  */
 ENTRY(stub_rt_sigreturn)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	/*
 	 * SAVE_EXTRA_REGS result is not normally needed:
 	 * sigreturn overwrites all pt_regs->GPREGS.
@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
 	call sys_rt_sigreturn
 return_from_stub:
 	addq	$8, %rsp
-	CFI_ADJUST_CFA_OFFSET -8
 	RESTORE_EXTRA_REGS
 	movq %rax,RAX(%rsp)
 	jmp int_ret_from_sys_call
-	CFI_ENDPROC
 END(stub_rt_sigreturn)
 
 #ifdef CONFIG_X86_X32_ABI
 ENTRY(stub_x32_rt_sigreturn)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	SAVE_EXTRA_REGS 8
 	call sys32_x32_rt_sigreturn
 	jmp  return_from_stub
-	CFI_ENDPROC
 END(stub_x32_rt_sigreturn)
 #endif
 
@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
  * rdi: prev task we switched from
  */
 ENTRY(ret_from_fork)
-	DEFAULT_FRAME
 
 	LOCK ; btr $TIF_FORK,TI_flags(%r8)
 
-	pushq_cfi $0x0002
-	popfq_cfi				# reset kernel eflags
+	pushq $0x0002
+	popfq				# reset kernel eflags
 
 	call schedule_tail			# rdi: 'prev' task parameter
 
@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
 	movl $0, RAX(%rsp)
 	RESTORE_EXTRA_REGS
 	jmp int_ret_from_sys_call
-	CFI_ENDPROC
 END(ret_from_fork)
 
 /*
@@ -637,16 +537,13 @@ END(ret_from_fork)
  */
 	.align 8
 ENTRY(irq_entries_start)
-	INTR_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-	pushq_cfi $(~vector+0x80)	/* Note: always in signed byte range */
+	pushq $(~vector+0x80)	/* Note: always in signed byte range */
     vector=vector+1
 	jmp	common_interrupt
-	CFI_ADJUST_CFA_OFFSET -8
 	.align	8
     .endr
-	CFI_ENDPROC
 END(irq_entries_start)
 
 /*
@@ -688,17 +585,7 @@ END(irq_entries_start)
 	movq %rsp, %rsi
 	incl PER_CPU_VAR(irq_count)
 	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
-	CFI_DEF_CFA_REGISTER	rsi
 	pushq %rsi
-	/*
-	 * For debugger:
-	 * "CFA (Current Frame Address) is the value on stack + offset"
-	 */
-	CFI_ESCAPE	0x0f /* DW_CFA_def_cfa_expression */, 6, \
-			0x77 /* DW_OP_breg7 (rsp) */, 0, \
-			0x06 /* DW_OP_deref */, \
-			0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
-			0x22 /* DW_OP_plus */
 	/* We entered an interrupt context - irqs are off: */
 	TRACE_IRQS_OFF
 
@@ -711,7 +598,6 @@ END(irq_entries_start)
 	 */
 	.p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-	XCPT_FRAME
 	ASM_CLAC
 	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
 	interrupt do_IRQ
@@ -723,11 +609,8 @@ END(irq_entries_start)
 
 	/* Restore saved previous stack */
 	popq %rsi
-	CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
 	/* return code expects complete pt_regs - adjust rsp accordingly: */
 	leaq -RBP(%rsi),%rsp
-	CFI_DEF_CFA_REGISTER	rsp
-	CFI_ADJUST_CFA_OFFSET	RBP
 
 	testb	$3, CS(%rsp)
 	jz	retint_kernel
@@ -743,7 +626,6 @@ END(irq_entries_start)
 	LOCKDEP_SYS_EXIT_IRQ
 	movl TI_flags(%rcx),%edx
 	andl %edi,%edx
-	CFI_REMEMBER_STATE
 	jnz  retint_careful
 
 retint_swapgs:		/* return to user-space */
@@ -807,8 +689,8 @@ ENTRY(native_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
 native_irq_return_ldt:
-	pushq_cfi %rax
-	pushq_cfi %rdi
+	pushq %rax
+	pushq %rdi
 	SWAPGS
 	movq PER_CPU_VAR(espfix_waddr),%rdi
 	movq %rax,(0*8)(%rdi)	/* RAX */
@@ -823,24 +705,23 @@ ENTRY(native_iret)
 	movq (5*8)(%rsp),%rax	/* RSP */
 	movq %rax,(4*8)(%rdi)
 	andl $0xffff0000,%eax
-	popq_cfi %rdi
+	popq %rdi
 	orq PER_CPU_VAR(espfix_stack),%rax
 	SWAPGS
 	movq %rax,%rsp
-	popq_cfi %rax
+	popq %rax
 	jmp native_irq_return_iret
 #endif
 
 	/* edi: workmask, edx: work */
 retint_careful:
-	CFI_RESTORE_STATE
 	bt    $TIF_NEED_RESCHED,%edx
 	jnc   retint_signal
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi %rdi
+	pushq %rdi
 	SCHEDULE_USER
-	popq_cfi %rdi
+	popq %rdi
 	GET_THREAD_INFO(%rcx)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
@@ -862,7 +743,6 @@ ENTRY(native_iret)
 	GET_THREAD_INFO(%rcx)
 	jmp retint_with_reschedule
 
-	CFI_ENDPROC
 END(common_interrupt)
 
 /*
@@ -870,13 +750,11 @@ END(common_interrupt)
  */
 .macro apicinterrupt3 num sym do_sym
 ENTRY(\sym)
-	INTR_FRAME
 	ASM_CLAC
-	pushq_cfi $~(\num)
+	pushq $~(\num)
 .Lcommon_\sym:
 	interrupt \do_sym
 	jmp ret_from_intr
-	CFI_ENDPROC
 END(\sym)
 .endm
 
@@ -966,24 +844,17 @@ ENTRY(\sym)
 	.error "using shift_ist requires paranoid=1"
 	.endif
 
-	.if \has_error_code
-	XCPT_FRAME
-	.else
-	INTR_FRAME
-	.endif
-
 	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 
 	.ifeq \has_error_code
-	pushq_cfi $-1			/* ORIG_RAX: no syscall to restart */
+	pushq $-1			/* ORIG_RAX: no syscall to restart */
 	.endif
 
 	ALLOC_PT_GPREGS_ON_STACK
 
 	.if \paranoid
 	.if \paranoid == 1
-	CFI_REMEMBER_STATE
 	testb	$3, CS(%rsp)		/* If coming from userspace, switch */
 	jnz 1f				/* stacks. */
 	.endif
@@ -993,8 +864,6 @@ ENTRY(\sym)
 	.endif
 	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
 
-	DEFAULT_FRAME 0
-
 	.if \paranoid
 	.if \shift_ist != -1
 	TRACE_IRQS_OFF_DEBUG		/* reload IDT in case of recursion */
@@ -1030,7 +899,6 @@ ENTRY(\sym)
 	.endif
 
 	.if \paranoid == 1
-	CFI_RESTORE_STATE
 	/*
 	 * Paranoid entry from userspace.  Switch stacks and treat it
 	 * as a normal entry.  This means that paranoid handlers
@@ -1039,7 +907,6 @@ ENTRY(\sym)
 1:
 	call error_entry
 
-	DEFAULT_FRAME 0
 
 	movq %rsp,%rdi			/* pt_regs pointer */
 	call sync_regs
@@ -1058,8 +925,6 @@ ENTRY(\sym)
 
 	jmp error_exit			/* %ebx: no swapgs flag */
 	.endif
-
-	CFI_ENDPROC
 END(\sym)
 .endm
 
@@ -1092,17 +957,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
 	/* Reload gs selector with exception handling */
 	/* edi:  new selector */
 ENTRY(native_load_gs_index)
-	CFI_STARTPROC
-	pushfq_cfi
+	pushfq
 	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
 	SWAPGS
 gs_change:
 	movl %edi,%gs
 2:	mfence		/* workaround */
 	SWAPGS
-	popfq_cfi
+	popfq
 	ret
-	CFI_ENDPROC
 END(native_load_gs_index)
 
 	_ASM_EXTABLE(gs_change,bad_gs)
@@ -1117,22 +980,15 @@ END(native_load_gs_index)
 
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(do_softirq_own_stack)
-	CFI_STARTPROC
-	pushq_cfi %rbp
-	CFI_REL_OFFSET rbp,0
+	pushq %rbp
 	mov  %rsp,%rbp
-	CFI_DEF_CFA_REGISTER rbp
 	incl PER_CPU_VAR(irq_count)
 	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
 	push  %rbp			# backlink for old unwinder
 	call __do_softirq
 	leaveq
-	CFI_RESTORE		rbp
-	CFI_DEF_CFA_REGISTER	rsp
-	CFI_ADJUST_CFA_OFFSET   -8
 	decl PER_CPU_VAR(irq_count)
 	ret
-	CFI_ENDPROC
 END(do_softirq_own_stack)
 
 #ifdef CONFIG_XEN
@@ -1152,28 +1008,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
  * activation and restart the handler using the previous one.
  */
 ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
-	CFI_STARTPROC
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  * see the correct pointer to the pt_regs
  */
 	movq %rdi, %rsp            # we don't return, adjust the stack frame
-	CFI_ENDPROC
-	DEFAULT_FRAME
 11:	incl PER_CPU_VAR(irq_count)
 	movq %rsp,%rbp
-	CFI_DEF_CFA_REGISTER rbp
 	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
 	pushq %rbp			# backlink for old unwinder
 	call xen_evtchn_do_upcall
 	popq %rsp
-	CFI_DEF_CFA_REGISTER rsp
 	decl PER_CPU_VAR(irq_count)
 #ifndef CONFIG_PREEMPT
 	call xen_maybe_preempt_hcall
 #endif
 	jmp  error_exit
-	CFI_ENDPROC
 END(xen_do_hypervisor_callback)
 
 /*
@@ -1190,16 +1040,8 @@ END(xen_do_hypervisor_callback)
  * with its current contents: any discrepancy means we in category 1.
  */
 ENTRY(xen_failsafe_callback)
-	INTR_FRAME 1 (6*8)
-	/*CFI_REL_OFFSET gs,GS*/
-	/*CFI_REL_OFFSET fs,FS*/
-	/*CFI_REL_OFFSET es,ES*/
-	/*CFI_REL_OFFSET ds,DS*/
-	CFI_REL_OFFSET r11,8
-	CFI_REL_OFFSET rcx,0
 	movl %ds,%ecx
 	cmpw %cx,0x10(%rsp)
-	CFI_REMEMBER_STATE
 	jne 1f
 	movl %es,%ecx
 	cmpw %cx,0x18(%rsp)
@@ -1212,29 +1054,21 @@ ENTRY(xen_failsafe_callback)
 	jne 1f
 	/* All segments match their saved values => Category 2 (Bad IRET). */
 	movq (%rsp),%rcx
-	CFI_RESTORE rcx
 	movq 8(%rsp),%r11
-	CFI_RESTORE r11
 	addq $0x30,%rsp
-	CFI_ADJUST_CFA_OFFSET -0x30
-	pushq_cfi $0	/* RIP */
-	pushq_cfi %r11
-	pushq_cfi %rcx
+	pushq $0	/* RIP */
+	pushq %r11
+	pushq %rcx
 	jmp general_protection
-	CFI_RESTORE_STATE
 1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
 	movq (%rsp),%rcx
-	CFI_RESTORE rcx
 	movq 8(%rsp),%r11
-	CFI_RESTORE r11
 	addq $0x30,%rsp
-	CFI_ADJUST_CFA_OFFSET -0x30
-	pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+	pushq $-1 /* orig_ax = -1 => not a system call */
 	ALLOC_PT_GPREGS_ON_STACK
 	SAVE_C_REGS
 	SAVE_EXTRA_REGS
 	jmp error_exit
-	CFI_ENDPROC
 END(xen_failsafe_callback)
 
 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
@@ -1270,7 +1104,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(paranoid_entry)
-	XCPT_FRAME 1 15*8
 	cld
 	SAVE_C_REGS 8
 	SAVE_EXTRA_REGS 8
@@ -1282,7 +1115,6 @@ ENTRY(paranoid_entry)
 	SWAPGS
 	xorl %ebx,%ebx
 1:	ret
-	CFI_ENDPROC
 END(paranoid_entry)
 
 /*
@@ -1297,7 +1129,6 @@ END(paranoid_entry)
  */
 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
 ENTRY(paranoid_exit)
-	DEFAULT_FRAME
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF_DEBUG
 	testl %ebx,%ebx				/* swapgs needed? */
@@ -1312,7 +1143,6 @@ ENTRY(paranoid_exit)
 	RESTORE_C_REGS
 	REMOVE_PT_GPREGS_FROM_STACK 8
 	INTERRUPT_RETURN
-	CFI_ENDPROC
 END(paranoid_exit)
 
 /*
@@ -1320,7 +1150,6 @@ END(paranoid_exit)
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(error_entry)
-	XCPT_FRAME 1 15*8
 	cld
 	SAVE_C_REGS 8
 	SAVE_EXTRA_REGS 8
@@ -1340,7 +1169,6 @@ ENTRY(error_entry)
 	 * for these here too.
 	 */
 error_kernelspace:
-	CFI_REL_OFFSET rcx, RCX+8
 	incl %ebx
 	leaq native_irq_return_iret(%rip),%rcx
 	cmpq %rcx,RIP+8(%rsp)
@@ -1364,13 +1192,11 @@ ENTRY(error_entry)
 	mov %rax,%rsp
 	decl %ebx	/* Return to usergs */
 	jmp error_sti
-	CFI_ENDPROC
 END(error_entry)
 
 
 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
 ENTRY(error_exit)
-	DEFAULT_FRAME
 	movl %ebx,%eax
 	RESTORE_EXTRA_REGS
 	DISABLE_INTERRUPTS(CLBR_NONE)
@@ -1384,12 +1210,10 @@ ENTRY(error_exit)
 	andl %edi,%edx
 	jnz retint_careful
 	jmp retint_swapgs
-	CFI_ENDPROC
 END(error_exit)
 
 /* Runs on exception stack */
 ENTRY(nmi)
-	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	/*
 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
@@ -1424,8 +1248,7 @@ ENTRY(nmi)
 	 */
 
 	/* Use %rdx as our temp variable throughout */
-	pushq_cfi %rdx
-	CFI_REL_OFFSET rdx, 0
+	pushq %rdx
 
 	/*
 	 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1459,8 +1282,6 @@ ENTRY(nmi)
 	jb	first_nmi
 	/* Ah, it is within the NMI stack, treat it as nested */
 
-	CFI_REMEMBER_STATE
-
 nested_nmi:
 	/*
 	 * Do nothing if we interrupted the fixup in repeat_nmi.
@@ -1478,26 +1299,22 @@ ENTRY(nmi)
 	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
 	leaq -1*8(%rsp), %rdx
 	movq %rdx, %rsp
-	CFI_ADJUST_CFA_OFFSET 1*8
 	leaq -10*8(%rsp), %rdx
-	pushq_cfi $__KERNEL_DS
-	pushq_cfi %rdx
-	pushfq_cfi
-	pushq_cfi $__KERNEL_CS
-	pushq_cfi $repeat_nmi
+	pushq $__KERNEL_DS
+	pushq %rdx
+	pushfq
+	pushq $__KERNEL_CS
+	pushq $repeat_nmi
 
 	/* Put stack back */
 	addq $(6*8), %rsp
-	CFI_ADJUST_CFA_OFFSET -6*8
 
 nested_nmi_out:
-	popq_cfi %rdx
-	CFI_RESTORE rdx
+	popq %rdx
 
 	/* No need to check faults here */
 	INTERRUPT_RETURN
 
-	CFI_RESTORE_STATE
 first_nmi:
 	/*
 	 * Because nested NMIs will use the pushed location that we
@@ -1536,22 +1353,19 @@ ENTRY(nmi)
 	 */
 	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
 	movq (%rsp), %rdx
-	CFI_RESTORE rdx
 
 	/* Set the NMI executing variable on the stack. */
-	pushq_cfi $1
+	pushq $1
 
 	/*
 	 * Leave room for the "copied" frame
 	 */
 	subq $(5*8), %rsp
-	CFI_ADJUST_CFA_OFFSET 5*8
 
 	/* Copy the stack frame to the Saved frame */
 	.rept 5
-	pushq_cfi 11*8(%rsp)
+	pushq 11*8(%rsp)
 	.endr
-	CFI_DEF_CFA_OFFSET 5*8
 
 	/* Everything up to here is safe from nested NMIs */
 
@@ -1574,12 +1388,10 @@ ENTRY(nmi)
 
 	/* Make another copy, this one may be modified by nested NMIs */
 	addq $(10*8), %rsp
-	CFI_ADJUST_CFA_OFFSET -10*8
 	.rept 5
-	pushq_cfi -6*8(%rsp)
+	pushq -6*8(%rsp)
 	.endr
 	subq $(5*8), %rsp
-	CFI_DEF_CFA_OFFSET 5*8
 end_repeat_nmi:
 
 	/*
@@ -1587,7 +1399,7 @@ ENTRY(nmi)
 	 * NMI if the first NMI took an exception and reset our iret stack
 	 * so that we repeat another NMI.
 	 */
-	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
+	pushq $-1		/* ORIG_RAX: no syscall to restart */
 	ALLOC_PT_GPREGS_ON_STACK
 
 	/*
@@ -1598,7 +1410,6 @@ ENTRY(nmi)
 	 * exceptions might do.
 	 */
 	call paranoid_entry
-	DEFAULT_FRAME 0
 
 	/*
 	 * Save off the CR2 register. If we take a page fault in the NMI then
@@ -1635,13 +1446,10 @@ ENTRY(nmi)
 	/* Clear the NMI executing stack variable */
 	movq $0, 5*8(%rsp)
 	jmp irq_return
-	CFI_ENDPROC
 END(nmi)
 
 ENTRY(ignore_sysret)
-	CFI_STARTPROC
 	mov $-ENOSYS,%eax
 	sysret
-	CFI_ENDPROC
 END(ignore_sysret)
 
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 00933d5e992f..9b0ca8fe80fc 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -11,26 +11,23 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
-	pushfl_cfi
+	pushfl
 	cli
 .endm
 
 .macro UNLOCK reg
-	popfl_cfi
+	popfl
 .endm
 
 #define BEGIN(op) \
 .macro endp; \
-	CFI_ENDPROC; \
 ENDPROC(atomic64_##op##_386); \
 .purgem endp; \
 .endm; \
 ENTRY(atomic64_##op##_386); \
-	CFI_STARTPROC; \
 	LOCK v;
 
 #define ENDP endp
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 082a85167a5b..db3ae85440ff 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -11,7 +11,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 .macro read64 reg
 	movl %ebx, %eax
@@ -22,16 +21,11 @@
 .endm
 
 ENTRY(atomic64_read_cx8)
-	CFI_STARTPROC
-
 	read64 %ecx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_read_cx8)
 
 ENTRY(atomic64_set_cx8)
-	CFI_STARTPROC
-
 1:
 /* we don't need LOCK_PREFIX since aligned 64-bit writes
  * are atomic on 586 and newer */
@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
 	jne 1b
 
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_set_cx8)
 
 ENTRY(atomic64_xchg_cx8)
-	CFI_STARTPROC
-
 1:
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
 
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_xchg_cx8)
 
 .macro addsub_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebp
-	pushl_cfi_reg ebx
-	pushl_cfi_reg esi
-	pushl_cfi_reg edi
+	pushl %ebp
+	pushl %ebx
+	pushl %esi
+	pushl %edi
 
 	movl %eax, %esi
 	movl %edx, %edi
@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg edi
-	popl_cfi_reg esi
-	popl_cfi_reg ebx
-	popl_cfi_reg ebp
+	popl %edi
+	popl %esi
+	popl %ebx
+	popl %ebp
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -93,8 +81,7 @@ addsub_return sub sub sbb
 
 .macro incdec_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -119,8 +105,7 @@ incdec_return inc add adc
 incdec_return dec sub sbb
 
 ENTRY(atomic64_dec_if_positive_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
 2:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
 
 ENTRY(atomic64_add_unless_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebp
-	pushl_cfi_reg ebx
+	pushl %ebp
+	pushl %ebx
 /* these just push these two parameters on the stack */
-	pushl_cfi_reg edi
-	pushl_cfi_reg ecx
+	pushl %edi
+	pushl %ecx
 
 	movl %eax, %ebp
 	movl %edx, %edi
@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
 	movl $1, %eax
 3:
 	addl $8, %esp
-	CFI_ADJUST_CFA_OFFSET -8
-	popl_cfi_reg ebx
-	popl_cfi_reg ebp
+	popl %ebx
+	popl %ebp
 	ret
 4:
 	cmpl %edx, 4(%esp)
 	jne 2b
 	xorl %eax, %eax
 	jmp 3b
-	CFI_ENDPROC
 ENDPROC(atomic64_add_unless_cx8)
 
 ENTRY(atomic64_inc_not_zero_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
 
 	movl $1, %eax
 3:
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 9bc944a91274..c1e623209853 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
 				
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
 	   * alignment for the unrolled loop.
 	   */		
 ENTRY(csum_partial)
-	CFI_STARTPROC
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %esi
+	pushl %ebx
 	movl 20(%esp),%eax	# Function arg: unsigned int sum
 	movl 16(%esp),%ecx	# Function arg: int len
 	movl 12(%esp),%esi	# Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
 	jz 8f
 	roll $8, %eax
 8:
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
+	popl %ebx
+	popl %esi
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial)
 
 #else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
 /* Version for PentiumII/PPro */
 
 ENTRY(csum_partial)
-	CFI_STARTPROC
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %esi
+	pushl %ebx
 	movl 20(%esp),%eax	# Function arg: unsigned int sum
 	movl 16(%esp),%ecx	# Function arg: int len
 	movl 12(%esp),%esi	# Function arg:	const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
 	jz 90f
 	roll $8, %eax
 90: 
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
+	popl %ebx
+	popl %esi
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial)
 				
 #endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
 #define FP		12
 		
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
 	subl  $4,%esp	
-	CFI_ADJUST_CFA_OFFSET 4
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %edi
+	pushl %esi
+	pushl %ebx
 	movl ARGBASE+16(%esp),%eax	# sum
 	movl ARGBASE+12(%esp),%ecx	# len
 	movl ARGBASE+4(%esp),%esi	# src
@@ -401,12 +394,11 @@ DST(	movb %cl, (%edi)	)
 
 .previous
 
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi %ecx			# equivalent to addl $4,%esp
+	popl %ebx
+	popl %esi
+	popl %edi
+	popl %ecx			# equivalent to addl $4,%esp
 	ret	
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 
 #else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
 #define ARGBASE 12
 		
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
+	pushl %ebx
+	pushl %edi
+	pushl %esi
 	movl ARGBASE+4(%esp),%esi	#src
 	movl ARGBASE+8(%esp),%edi	#dst	
 	movl ARGBASE+12(%esp),%ecx	#len
@@ -489,11 +480,10 @@ DST(	movb %dl, (%edi)         )
 	jmp  7b			
 .previous				
 
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi_reg ebx
+	popl %esi
+	popl %edi
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 				
 #undef ROUND
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index e67e579c93bd..a2fe51b00cce 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,4 @@
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -15,7 +14,6 @@
  * %rdi	- page
  */
 ENTRY(clear_page)
-	CFI_STARTPROC
 
 	ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
 		      "jmp clear_page_c_e", X86_FEATURE_ERMS
@@ -24,11 +22,9 @@ ENTRY(clear_page)
 	xorl %eax,%eax
 	rep stosq
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page)
 
 ENTRY(clear_page_orig)
-	CFI_STARTPROC
 
 	xorl   %eax,%eax
 	movl   $4096/64,%ecx
@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
 	jnz	.Lloop
 	nop
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page_orig)
 
 ENTRY(clear_page_c_e)
-	CFI_STARTPROC
 	movl $4096,%ecx
 	xorl %eax,%eax
 	rep stosb
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 40a172541ee2..9b330242e740 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -6,7 +6,6 @@
  *
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/percpu.h>
 
 .text
@@ -21,7 +20,6 @@
  * %al  : Operation successful
  */
 ENTRY(this_cpu_cmpxchg16b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -32,7 +30,7 @@ CFI_STARTPROC
 # *atomic* on a single cpu (as provided by the this_cpu_xx class of
 # macros).
 #
-	pushfq_cfi
+	pushfq
 	cli
 
 	cmpq PER_CPU_VAR((%rsi)), %rax
@@ -43,17 +41,13 @@ CFI_STARTPROC
 	movq %rbx, PER_CPU_VAR((%rsi))
 	movq %rcx, PER_CPU_VAR(8(%rsi))
 
-	CFI_REMEMBER_STATE
-	popfq_cfi
+	popfq
 	mov $1, %al
 	ret
 
-	CFI_RESTORE_STATE
 .Lnot_same:
-	popfq_cfi
+	popfq
 	xor %al,%al
 	ret
 
-CFI_ENDPROC
-
 ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index b4807fce5177..ad5349778490 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 .text
 
@@ -20,14 +19,13 @@
  * %ecx : high 32 bits of new value
  */
 ENTRY(cmpxchg8b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg8b (%esi)' on UP except we don't
 # set the whole ZF thing (caller will just compare
 # eax:edx with the expected value)
 #
-	pushfl_cfi
+	pushfl
 	cli
 
 	cmpl  (%esi), %eax
@@ -38,18 +36,15 @@ CFI_STARTPROC
 	movl %ebx,  (%esi)
 	movl %ecx, 4(%esi)
 
-	CFI_REMEMBER_STATE
-	popfl_cfi
+	popfl
 	ret
 
-	CFI_RESTORE_STATE
 .Lnot_same:
 	movl  (%esi), %eax
 .Lhalf_same:
 	movl 4(%esi), %edx
 
-	popfl_cfi
+	popfl
 	ret
 
-CFI_ENDPROC
 ENDPROC(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 8239dbcbf984..009f98216b7e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,6 @@
 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -13,22 +12,16 @@
  */
 	ALIGN
 ENTRY(copy_page)
-	CFI_STARTPROC
 	ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
 	movl	$4096/8, %ecx
 	rep	movsq
 	ret
-	CFI_ENDPROC
 ENDPROC(copy_page)
 
 ENTRY(copy_page_regs)
-	CFI_STARTPROC
 	subq	$2*8,	%rsp
-	CFI_ADJUST_CFA_OFFSET 2*8
 	movq	%rbx,	(%rsp)
-	CFI_REL_OFFSET rbx, 0
 	movq	%r12,	1*8(%rsp)
-	CFI_REL_OFFSET r12, 1*8
 
 	movl	$(4096/64)-5,	%ecx
 	.p2align 4
@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
 	jnz	.Loop2
 
 	movq	(%rsp), %rbx
-	CFI_RESTORE rbx
 	movq	1*8(%rsp), %r12
-	CFI_RESTORE r12
 	addq	$2*8, %rsp
-	CFI_ADJUST_CFA_OFFSET -2*8
 	ret
-	CFI_ENDPROC
 ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index e4b3beee83bd..982ce34f4a9b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
@@ -18,7 +17,6 @@
 
 /* Standard copy_to_user with segment limit checking */
 ENTRY(_copy_to_user)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%rax)
 	movq %rdi,%rcx
 	addq %rdx,%rcx
@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
 		      X86_FEATURE_REP_GOOD,			\
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
-	CFI_ENDPROC
 ENDPROC(_copy_to_user)
 
 /* Standard copy_from_user with segment limit checking */
 ENTRY(_copy_from_user)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%rax)
 	movq %rsi,%rcx
 	addq %rdx,%rcx
@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
 		      X86_FEATURE_REP_GOOD,			\
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
-	CFI_ENDPROC
 ENDPROC(_copy_from_user)
 
 	.section .fixup,"ax"
 	/* must zero dest */
 ENTRY(bad_from_user)
 bad_from_user:
-	CFI_STARTPROC
 	movl %edx,%ecx
 	xorl %eax,%eax
 	rep
@@ -62,7 +56,6 @@ ENTRY(bad_from_user)
 bad_to_user:
 	movl %edx,%eax
 	ret
-	CFI_ENDPROC
 ENDPROC(bad_from_user)
 	.previous
 
@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_unrolled)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
 	_ASM_EXTABLE(19b,40b)
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
-	CFI_ENDPROC
 ENDPROC(copy_user_generic_unrolled)
 
 /* Some CPUs run faster using the string copy instructions.
@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_string)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 2f		/* less than 8 bytes, go to byte copy loop */
@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
 
 	_ASM_EXTABLE(1b,11b)
 	_ASM_EXTABLE(3b,12b)
-	CFI_ENDPROC
 ENDPROC(copy_user_generic_string)
 
 /*
@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_enhanced_fast_string)
-	CFI_STARTPROC
 	ASM_STAC
 	movl %edx,%ecx
 1:	rep
@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
 	.previous
 
 	_ASM_EXTABLE(1b,12b)
-	CFI_ENDPROC
 ENDPROC(copy_user_enhanced_fast_string)
 
 /*
@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
  * This will force destination/source out of cache for more performance.
  */
 ENTRY(__copy_user_nocache)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
 	_ASM_EXTABLE(19b,40b)
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
-	CFI_ENDPROC
 ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 9734182966f3..7e48807b2fa1 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -6,7 +6,6 @@
  * for more details. No warranty for anything given at all.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
 
@@ -47,23 +46,16 @@
 
 
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
 	cmpl	$3*64, %edx
 	jle	.Lignore
 
 .Lignore:
 	subq  $7*8, %rsp
-	CFI_ADJUST_CFA_OFFSET 7*8
 	movq  %rbx, 2*8(%rsp)
-	CFI_REL_OFFSET rbx, 2*8
 	movq  %r12, 3*8(%rsp)
-	CFI_REL_OFFSET r12, 3*8
 	movq  %r14, 4*8(%rsp)
-	CFI_REL_OFFSET r14, 4*8
 	movq  %r13, 5*8(%rsp)
-	CFI_REL_OFFSET r13, 5*8
 	movq  %rbp, 6*8(%rsp)
-	CFI_REL_OFFSET rbp, 6*8
 
 	movq  %r8, (%rsp)
 	movq  %r9, 1*8(%rsp)
@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
 	addl %ebx, %eax
 	adcl %r9d, %eax		/* carry */
 
-	CFI_REMEMBER_STATE
 .Lende:
 	movq 2*8(%rsp), %rbx
-	CFI_RESTORE rbx
 	movq 3*8(%rsp), %r12
-	CFI_RESTORE r12
 	movq 4*8(%rsp), %r14
-	CFI_RESTORE r14
 	movq 5*8(%rsp), %r13
-	CFI_RESTORE r13
 	movq 6*8(%rsp), %rbp
-	CFI_RESTORE rbp
 	addq $7*8, %rsp
-	CFI_ADJUST_CFA_OFFSET -7*8
 	ret
-	CFI_RESTORE_STATE
 
 	/* Exception handlers. Very simple, zeroing is done in the wrappers */
 .Lbad_source:
@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
 	jz   .Lende
 	movl $-EFAULT, (%rax)
 	jmp .Lende
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index a4512359656a..46668cda4ffd 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/page_types.h>
 #include <asm/errno.h>
 #include <asm/asm-offsets.h>
@@ -36,7 +35,6 @@
 
 	.text
 ENTRY(__get_user_1)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
-	CFI_STARTPROC
 	add $1,%_ASM_AX
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
-	CFI_STARTPROC
 	add $3,%_ASM_AX
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_4)
 
 ENTRY(__get_user_8)
-	CFI_STARTPROC
 #ifdef CONFIG_X86_64
 	add $7,%_ASM_AX
 	jc bad_get_user
@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
 	ASM_CLAC
 	ret
 #endif
-	CFI_ENDPROC
 ENDPROC(__get_user_8)
 
 
 bad_get_user:
-	CFI_STARTPROC
 	xor %edx,%edx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 END(bad_get_user)
 
 #ifdef CONFIG_X86_32
 bad_get_user_8:
-	CFI_STARTPROC
 	xor %edx,%edx
 	xor %ecx,%ecx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 END(bad_get_user_8)
 #endif
 
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index 05a95e713da8..33147fef3452 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -16,15 +16,12 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * override generic version in lib/iomap_copy.c
  */
 ENTRY(__iowrite32_copy)
-	CFI_STARTPROC
 	movl %edx,%ecx
 	rep movsd
 	ret
-	CFI_ENDPROC
 ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index b046664f5a1c..16698bba87de 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -2,7 +2,6 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeature.h>
-#include <asm/dwarf2.h>
 #include <asm/alternative-asm.h>
 
 /*
@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
 ENDPROC(memcpy_erms)
 
 ENTRY(memcpy_orig)
-	CFI_STARTPROC
 	movq %rdi, %rax
 
 	cmpq $0x20, %rdx
@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
 
 .Lend:
 	retq
-	CFI_ENDPROC
 ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0f8a0d0331b9..ca2afdd6d98e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,6 @@
  *	- Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -27,7 +26,6 @@
 
 ENTRY(memmove)
 ENTRY(__memmove)
-	CFI_STARTPROC
 
 	/* Handle more 32 bytes in loop */
 	mov %rdi, %rax
@@ -207,6 +205,5 @@ ENTRY(__memmove)
 	movb %r11b, (%rdi)
 13:
 	retq
-	CFI_ENDPROC
 ENDPROC(__memmove)
 ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 93118fb23976..2661fad05827 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,6 @@
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -66,7 +65,6 @@ ENTRY(memset_erms)
 ENDPROC(memset_erms)
 
 ENTRY(memset_orig)
-	CFI_STARTPROC
 	movq %rdi,%r10
 
 	/* expand byte value  */
@@ -78,7 +76,6 @@ ENTRY(memset_orig)
 	movl  %edi,%r9d
 	andl  $7,%r9d
 	jnz  .Lbad_alignment
-	CFI_REMEMBER_STATE
 .Lafter_bad_alignment:
 
 	movq  %rdx,%rcx
@@ -128,7 +125,6 @@ ENTRY(memset_orig)
 	movq	%r10,%rax
 	ret
 
-	CFI_RESTORE_STATE
 .Lbad_alignment:
 	cmpq $7,%rdx
 	jbe	.Lhandle_7
@@ -139,5 +135,4 @@ ENTRY(memset_orig)
 	subq %r8,%rdx
 	jmp .Lafter_bad_alignment
 .Lfinal:
-	CFI_ENDPROC
 ENDPROC(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 3ca5218fbece..c81556409bbb 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,5 @@
 #include <linux/linkage.h>
 #include <linux/errno.h>
-#include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/msr.h>
 
@@ -13,9 +12,8 @@
  */
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-	CFI_STARTPROC
-	pushq_cfi_reg rbx
-	pushq_cfi_reg rbp
+	pushq %rbx
+	pushq %rbp
 	movq	%rdi, %r10	/* Save pointer */
 	xorl	%r11d, %r11d	/* Return value */
 	movl    (%rdi), %eax
@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
 	movl    20(%rdi), %ebp
 	movl    24(%rdi), %esi
 	movl    28(%rdi), %edi
-	CFI_REMEMBER_STATE
 1:	\op
 2:	movl    %eax, (%r10)
 	movl	%r11d, %eax	/* Return value */
@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
 	movl    %ebp, 20(%r10)
 	movl    %esi, 24(%r10)
 	movl    %edi, 28(%r10)
-	popq_cfi_reg rbp
-	popq_cfi_reg rbx
+	popq %rbp
+	popq %rbx
 	ret
 3:
-	CFI_RESTORE_STATE
 	movl    $-EIO, %r11d
 	jmp     2b
 
 	_ASM_EXTABLE(1b, 3b)
-	CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
 
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
-	pushl_cfi_reg ebp
-	pushl_cfi_reg esi
-	pushl_cfi_reg edi
-	pushl_cfi $0              /* Return value */
-	pushl_cfi %eax
+	pushl %ebx
+	pushl %ebp
+	pushl %esi
+	pushl %edi
+	pushl $0              /* Return value */
+	pushl %eax
 	movl    4(%eax), %ecx
 	movl    8(%eax), %edx
 	movl    12(%eax), %ebx
@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
 	movl    24(%eax), %esi
 	movl    28(%eax), %edi
 	movl    (%eax), %eax
-	CFI_REMEMBER_STATE
 1:	\op
-2:	pushl_cfi %eax
+2:	pushl %eax
 	movl    4(%esp), %eax
-	popl_cfi (%eax)
+	popl (%eax)
 	addl    $4, %esp
-	CFI_ADJUST_CFA_OFFSET -4
 	movl    %ecx, 4(%eax)
 	movl    %edx, 8(%eax)
 	movl    %ebx, 12(%eax)
 	movl    %ebp, 20(%eax)
 	movl    %esi, 24(%eax)
 	movl    %edi, 28(%eax)
-	popl_cfi %eax
-	popl_cfi_reg edi
-	popl_cfi_reg esi
-	popl_cfi_reg ebp
-	popl_cfi_reg ebx
+	popl %eax
+	popl %edi
+	popl %esi
+	popl %ebp
+	popl %ebx
 	ret
 3:
-	CFI_RESTORE_STATE
 	movl    $-EIO, 4(%esp)
 	jmp     2b
 
 	_ASM_EXTABLE(1b, 3b)
-	CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index fc6ba17a7eec..e0817a12d323 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -11,7 +11,6 @@
  * return value.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/thread_info.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
@@ -30,11 +29,9 @@
  * as they get called from within inline assembly.
  */
 
-#define ENTER	CFI_STARTPROC ; \
-		GET_THREAD_INFO(%_ASM_BX)
+#define ENTER	GET_THREAD_INFO(%_ASM_BX)
 #define EXIT	ASM_CLAC ;	\
-		ret ;		\
-		CFI_ENDPROC
+		ret
 
 .text
 ENTRY(__put_user_1)
@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
 ENDPROC(__put_user_8)
 
 bad_put_user:
-	CFI_STARTPROC
 	movl $-EFAULT,%eax
 	EXIT
 END(bad_put_user)
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 2322abe4da3b..40027db99140 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -15,7 +15,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 #define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
 #define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
@@ -34,10 +33,10 @@
  */
 
 #define save_common_regs \
-	pushl_cfi_reg ecx
+	pushl %ecx
 
 #define restore_common_regs \
-	popl_cfi_reg ecx
+	popl %ecx
 
 	/* Avoid uglifying the argument copying x86-64 needs to do. */
 	.macro movq src, dst
@@ -64,50 +63,45 @@
  */
 
 #define save_common_regs \
-	pushq_cfi_reg rdi; \
-	pushq_cfi_reg rsi; \
-	pushq_cfi_reg rcx; \
-	pushq_cfi_reg r8;  \
-	pushq_cfi_reg r9;  \
-	pushq_cfi_reg r10; \
-	pushq_cfi_reg r11
+	pushq %rdi; \
+	pushq %rsi; \
+	pushq %rcx; \
+	pushq %r8;  \
+	pushq %r9;  \
+	pushq %r10; \
+	pushq %r11
 
 #define restore_common_regs \
-	popq_cfi_reg r11; \
-	popq_cfi_reg r10; \
-	popq_cfi_reg r9; \
-	popq_cfi_reg r8; \
-	popq_cfi_reg rcx; \
-	popq_cfi_reg rsi; \
-	popq_cfi_reg rdi
+	popq %r11; \
+	popq %r10; \
+	popq %r9; \
+	popq %r8; \
+	popq %rcx; \
+	popq %rsi; \
+	popq %rdi
 
 #endif
 
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_down_read_failed)
-	CFI_STARTPROC
 	save_common_regs
-	__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(push,) %__ASM_REG(dx)
 	movq %rax,%rdi
 	call rwsem_down_read_failed
-	__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(pop,) %__ASM_REG(dx)
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_down_read_failed)
 
 ENTRY(call_rwsem_down_write_failed)
-	CFI_STARTPROC
 	save_common_regs
 	movq %rax,%rdi
 	call rwsem_down_write_failed
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_down_write_failed)
 
 ENTRY(call_rwsem_wake)
-	CFI_STARTPROC
 	/* do nothing if still outstanding active readers */
 	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
 	jnz 1f
@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
 	call rwsem_wake
 	restore_common_regs
 1:	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_wake)
 
 ENTRY(call_rwsem_downgrade_wake)
-	CFI_STARTPROC
 	save_common_regs
-	__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(push,) %__ASM_REG(dx)
 	movq %rax,%rdi
 	call rwsem_downgrade_wake
-	__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(pop,) %__ASM_REG(dx)
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 5eb715087b80..e9acf5f4fc92 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -6,16 +6,14 @@
  */
 	#include <linux/linkage.h>
 	#include <asm/asm.h>
-	#include <asm/dwarf2.h>
 
 	/* put return address in eax (arg1) */
 	.macro THUNK name, func, put_ret_addr_in_eax=0
 	.globl \name
 \name:
-	CFI_STARTPROC
-	pushl_cfi_reg eax
-	pushl_cfi_reg ecx
-	pushl_cfi_reg edx
+	pushl %eax
+	pushl %ecx
+	pushl %edx
 
 	.if \put_ret_addr_in_eax
 	/* Place EIP in the arg1 */
@@ -23,11 +21,10 @@
 	.endif
 
 	call \func
-	popl_cfi_reg edx
-	popl_cfi_reg ecx
-	popl_cfi_reg eax
+	popl %edx
+	popl %ecx
+	popl %eax
 	ret
-	CFI_ENDPROC
 	_ASM_NOKPROBE(\name)
 	.endm
 
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index f89ba4e93025..10f555e435e1 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -6,7 +6,6 @@
  * Subject to the GNU public license, v.2. No warranty of any kind.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm.h>
 
@@ -14,27 +13,25 @@
 	.macro THUNK name, func, put_ret_addr_in_rdi=0
 	.globl \name
 \name:
-	CFI_STARTPROC
 
 	/* this one pushes 9 elems, the next one would be %rIP */
-	pushq_cfi_reg rdi
-	pushq_cfi_reg rsi
-	pushq_cfi_reg rdx
-	pushq_cfi_reg rcx
-	pushq_cfi_reg rax
-	pushq_cfi_reg r8
-	pushq_cfi_reg r9
-	pushq_cfi_reg r10
-	pushq_cfi_reg r11
+	pushq %rdi
+	pushq %rsi
+	pushq %rdx
+	pushq %rcx
+	pushq %rax
+	pushq %r8
+	pushq %r9
+	pushq %r10
+	pushq %r11
 
 	.if \put_ret_addr_in_rdi
 	/* 9*8(%rsp) is return addr on stack */
-	movq_cfi_restore 9*8, rdi
+	movq 9*8(%rsp), %rdi
 	.endif
 
 	call \func
 	jmp  restore
-	CFI_ENDPROC
 	_ASM_NOKPROBE(\name)
 	.endm
 
@@ -57,19 +54,16 @@
 #if defined(CONFIG_TRACE_IRQFLAGS) \
  || defined(CONFIG_DEBUG_LOCK_ALLOC) \
  || defined(CONFIG_PREEMPT)
-	CFI_STARTPROC
-	CFI_ADJUST_CFA_OFFSET 9*8
 restore:
-	popq_cfi_reg r11
-	popq_cfi_reg r10
-	popq_cfi_reg r9
-	popq_cfi_reg r8
-	popq_cfi_reg rax
-	popq_cfi_reg rcx
-	popq_cfi_reg rdx
-	popq_cfi_reg rsi
-	popq_cfi_reg rdi
+	popq %r11
+	popq %r10
+	popq %r9
+	popq %r8
+	popq %rax
+	popq %rcx
+	popq %rdx
+	popq %rsi
+	popq %rdi
 	ret
-	CFI_ENDPROC
 	_ASM_NOKPROBE(restore)
 #endif
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 6440221ced0d..4093216b3791 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -8,7 +8,6 @@
  * of the License.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * Calling convention :

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-28 11:20     ` [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations Ingo Molnar
@ 2015-05-28 11:39       ` Ingo Molnar
  2015-05-28 11:51       ` [PATCH] " Jan Beulich
  1 sibling, 0 replies; 17+ messages in thread
From: Ingo Molnar @ 2015-05-28 11:39 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Andy Lutomirski, mingo, Brian Gerst, tglx, Linus Torvalds,
	Denys Vlasenko, linux-kernel, hpa, Josh Poimboeuf,
	Borislav Petkov, Peter Zijlstra, Frédéric Weisbecker


* Ingo Molnar <mingo@kernel.org> wrote:

> I'll do the attached patch: it gets rid of the unmaintainable dwarf mess from 
> low level x86 assembly code. [...]

The patch below fixes a bug in the removal and is also minimally build and boot 
tested.

Thanks,

	Ingo

===================================>
From: Ingo Molnar <mingo@kernel.org>
Date: Thu, 28 May 2015 12:21:47 +0200
Subject: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

   27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

 - it should be maximally readable, and maximally low-key to
   'ordinary' code reading and maintenance.

 - find a build time method to insert dwarf annotations
   automatically in the most common cases, for pop/push
   instructions that manipulate the stack pointer. This could
   be done for example via a preprocessing step that just
   looks for common patterns - plus special annotations for
   the few cases where we want to depart from the default.
   We have hundreds of CFI annotations, so automating most of
   that makes sense.

 - it should come with build tooling checks that ensure that
   CFI annotations are sensible. We've seen such efforts from
   the framepointer side, and there's no reason it couldn't be
   done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/Makefile              |  10 +-
 arch/x86/ia32/ia32entry.S      | 133 ++++-----------
 arch/x86/include/asm/calling.h |  94 +++++------
 arch/x86/include/asm/dwarf2.h  | 170 -------------------
 arch/x86/include/asm/frame.h   |   7 +-
 arch/x86/kernel/entry_32.S     | 368 ++++++++++++-----------------------------
 arch/x86/kernel/entry_64.S     | 288 ++++++--------------------------
 arch/x86/lib/atomic64_386_32.S |   7 +-
 arch/x86/lib/atomic64_cx8_32.S |  61 +++----
 arch/x86/lib/checksum_32.S     |  52 +++---
 arch/x86/lib/clear_page_64.S   |   7 -
 arch/x86/lib/cmpxchg16b_emu.S  |  12 +-
 arch/x86/lib/cmpxchg8b_emu.S   |  11 +-
 arch/x86/lib/copy_page_64.S    |  11 --
 arch/x86/lib/copy_user_64.S    |  15 --
 arch/x86/lib/csum-copy_64.S    |  17 --
 arch/x86/lib/getuser.S         |  13 --
 arch/x86/lib/iomap_copy_64.S   |   3 -
 arch/x86/lib/memcpy_64.S       |   3 -
 arch/x86/lib/memmove_64.S      |   3 -
 arch/x86/lib/memset_64.S       |   5 -
 arch/x86/lib/msr-reg.S         |  44 ++---
 arch/x86/lib/putuser.S         |   8 +-
 arch/x86/lib/rwsem.S           |  49 +++---
 arch/x86/lib/thunk_32.S        |  15 +-
 arch/x86/lib/thunk_64.S        |  44 +++--
 arch/x86/net/bpf_jit.S         |   1 -
 27 files changed, 350 insertions(+), 1101 deletions(-)

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57996ee840dd..43e8328a23e4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -149,12 +149,6 @@ endif
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
 
-# do binutils support CFI?
-cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
-# is .cfi_signal_frame supported too?
-cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-
 # does binutils support specific instructions?
 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
 asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 63450a596800..2be23c734db5 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -4,7 +4,6 @@
  * Copyright 2000-2002 Andi Kleen, SuSE Labs.
  */		 
 
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm-offsets.h>
 #include <asm/current.h>
@@ -60,17 +59,6 @@
 	movl %eax,%eax			/* zero extension */
 	.endm
 	
-	.macro CFI_STARTPROC32 simple
-	CFI_STARTPROC	\simple
-	CFI_UNDEFINED	r8
-	CFI_UNDEFINED	r9
-	CFI_UNDEFINED	r10
-	CFI_UNDEFINED	r11
-	CFI_UNDEFINED	r12
-	CFI_UNDEFINED	r13
-	CFI_UNDEFINED	r14
-	CFI_UNDEFINED	r15
-	.endm
 
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_usergs_sysret32)
@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
  * with the int 0x80 path.
  */
 ENTRY(ia32_sysenter_target)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rsp,rbp
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
 	movl	%eax, %eax
 
 	movl	ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
-	CFI_REGISTER rip,r10
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi	$__USER32_DS		/* pt_regs->ss */
-	pushq_cfi	%rbp			/* pt_regs->sp */
-	CFI_REL_OFFSET	rsp,0
-	pushfq_cfi				/* pt_regs->flags */
-	pushq_cfi	$__USER32_CS		/* pt_regs->cs */
-	pushq_cfi	%r10 /* pt_regs->ip = thread_info->sysenter_return */
-	CFI_REL_OFFSET	rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	$__USER32_DS		/* pt_regs->ss */
+	pushq	%rbp			/* pt_regs->sp */
+	pushfq				/* pt_regs->flags */
+	pushq	$__USER32_CS		/* pt_regs->cs */
+	pushq	%r10 /* pt_regs->ip = thread_info->sysenter_return */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	cld
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	/*
 	 * no need to do an access_ok check here because rbp has been
@@ -161,8 +140,8 @@ ENTRY(ia32_sysenter_target)
 
 	orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	CFI_REMEMBER_STATE
 	jnz  sysenter_tracesys
+
 sysenter_do_call:
 	/* 32bit syscall -> 64bit C ABI argument conversion */
 	movl	%edi,%r8d	/* arg5 */
@@ -193,14 +172,12 @@ ENTRY(ia32_sysenter_target)
 	 */
 	andl    $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	movl	RIP(%rsp),%ecx		/* User %eip */
-	CFI_REGISTER rip,rcx
 	RESTORE_RSI_RDI
 	xorl	%edx,%edx		/* avoid info leaks */
 	xorq	%r8,%r8
 	xorq	%r9,%r9
 	xorq	%r10,%r10
 	movl	EFLAGS(%rsp),%r11d	/* User eflags */
-	/*CFI_RESTORE rflags*/
 	TRACE_IRQS_ON
 
 	/*
@@ -231,8 +208,6 @@ ENTRY(ia32_sysenter_target)
 	 */
 	USERGS_SYSRET32
 
-	CFI_RESTORE_STATE
-
 #ifdef CONFIG_AUDITSYSCALL
 	.macro auditsys_entry_common
 	movl %esi,%r8d			/* 5th arg: 4th syscall arg */
@@ -282,8 +257,8 @@ ENTRY(ia32_sysenter_target)
 #endif
 
 sysenter_fix_flags:
-	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
-	popfq_cfi
+	pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+	popfq
 	jmp sysenter_flags_fixed
 
 sysenter_tracesys:
@@ -298,7 +273,6 @@ ENTRY(ia32_sysenter_target)
 	LOAD_ARGS32  /* reload args from stack in case ptrace changed it */
 	RESTORE_EXTRA_REGS
 	jmp	sysenter_do_call
-	CFI_ENDPROC
 ENDPROC(ia32_sysenter_target)
 
 /*
@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
  * with the int 0x80 path.
  */
 ENTRY(ia32_cstar_target)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rip,rcx
-	/*CFI_REGISTER	rflags,r11*/
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
 	 */
 	SWAPGS_UNSAFE_STACK
 	movl	%esp,%r8d
-	CFI_REGISTER	rsp,r8
 	movq	PER_CPU_VAR(cpu_current_top_of_stack),%rsp
 	ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
 	movl	%eax,%eax
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi	$__USER32_DS		/* pt_regs->ss */
-	pushq_cfi	%r8			/* pt_regs->sp */
-	CFI_REL_OFFSET rsp,0
-	pushq_cfi	%r11			/* pt_regs->flags */
-	pushq_cfi	$__USER32_CS		/* pt_regs->cs */
-	pushq_cfi	%rcx			/* pt_regs->ip */
-	CFI_REL_OFFSET rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rbp			/* pt_regs->cx */
+	pushq	$__USER32_DS		/* pt_regs->ss */
+	pushq	%r8			/* pt_regs->sp */
+	pushq	%r11			/* pt_regs->flags */
+	pushq	$__USER32_CS		/* pt_regs->cs */
+	pushq	%rcx			/* pt_regs->ip */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rbp			/* pt_regs->cx */
 	movl	%ebp,%ecx
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	/*
 	 * no need to do an access_ok check here because r8 has been
@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
 	ASM_CLAC
 	orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	CFI_REMEMBER_STATE
 	jnz   cstar_tracesys
+
 cstar_do_call:
 	/* 32bit syscall -> 64bit C ABI argument conversion */
 	movl	%edi,%r8d	/* arg5 */
@@ -403,15 +367,12 @@ ENTRY(ia32_cstar_target)
 	andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	RESTORE_RSI_RDI_RDX
 	movl RIP(%rsp),%ecx
-	CFI_REGISTER rip,rcx
 	movl EFLAGS(%rsp),%r11d
-	/*CFI_REGISTER rflags,r11*/
 	xorq	%r10,%r10
 	xorq	%r9,%r9
 	xorq	%r8,%r8
 	TRACE_IRQS_ON
 	movl RSP(%rsp),%esp
-	CFI_RESTORE rsp
 	/*
 	 * 64bit->32bit SYSRET restores eip from ecx,
 	 * eflags from r11 (but RF and VM bits are forced to 0),
@@ -430,7 +391,6 @@ ENTRY(ia32_cstar_target)
 
 #ifdef CONFIG_AUDITSYSCALL
 cstar_auditsys:
-	CFI_RESTORE_STATE
 	movl %r9d,R9(%rsp)	/* register to be clobbered by call */
 	auditsys_entry_common
 	movl R9(%rsp),%r9d	/* reload 6th syscall arg */
@@ -460,7 +420,6 @@ END(ia32_cstar_target)
 	ASM_CLAC
 	movq $-EFAULT,%rax
 	jmp ia32_sysret
-	CFI_ENDPROC
 
 /*
  * Emulated IA32 system calls via int 0x80.
@@ -484,15 +443,6 @@ END(ia32_cstar_target)
  */
 
 ENTRY(ia32_syscall)
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,5*8
-	/*CFI_REL_OFFSET	ss,4*8 */
-	CFI_REL_OFFSET	rsp,3*8
-	/*CFI_REL_OFFSET	rflags,2*8 */
-	/*CFI_REL_OFFSET	cs,1*8 */
-	CFI_REL_OFFSET	rip,0*8
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
 	movl	%eax,%eax
 
 	/* Construct struct pt_regs on stack (iret frame is already on stack) */
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
 	cld
 	sub	$(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 10*8
 
 	orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -544,7 +493,6 @@ ENTRY(ia32_syscall)
 	LOAD_ARGS32	/* reload args from stack in case ptrace changed it */
 	RESTORE_EXTRA_REGS
 	jmp ia32_do_call
-	CFI_ENDPROC
 END(ia32_syscall)
 
 	.macro PTREGSCALL label, func
@@ -554,8 +502,6 @@ GLOBAL(\label)
 	jmp  ia32_ptregs_common	
 	.endm
 
-	CFI_STARTPROC32
-
 	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
 	PTREGSCALL stub32_sigreturn, sys32_sigreturn
 	PTREGSCALL stub32_fork, sys_fork
@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
 
 	ALIGN
 ia32_ptregs_common:
-	CFI_ENDPROC
-	CFI_STARTPROC32	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,SIZEOF_PTREGS
-	CFI_REL_OFFSET	rax,RAX
-	CFI_REL_OFFSET	rcx,RCX
-	CFI_REL_OFFSET	rdx,RDX
-	CFI_REL_OFFSET	rsi,RSI
-	CFI_REL_OFFSET	rdi,RDI
-	CFI_REL_OFFSET	rip,RIP
-/*	CFI_REL_OFFSET	cs,CS*/
-/*	CFI_REL_OFFSET	rflags,EFLAGS*/
-	CFI_REL_OFFSET	rsp,RSP
-/*	CFI_REL_OFFSET	ss,SS*/
 	SAVE_EXTRA_REGS 8
 	call *%rax
 	RESTORE_EXTRA_REGS 8
 	ret
-	CFI_ENDPROC
 END(ia32_ptregs_common)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 1c8b50edb2db..0d76accde45b 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
 
 */
 
-#include <asm/dwarf2.h>
-
 #ifdef CONFIG_X86_64
 
 /*
@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
 	subq	$15*8+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET 15*8+\addskip
 	.endm
 
 	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
 	.if \r11
-	movq_cfi r11, 6*8+\offset
+	movq %r11, 6*8+\offset(%rsp)
 	.endif
 	.if \r8910
-	movq_cfi r10, 7*8+\offset
-	movq_cfi r9,  8*8+\offset
-	movq_cfi r8,  9*8+\offset
+	movq %r10, 7*8+\offset(%rsp)
+	movq %r9,  8*8+\offset(%rsp)
+	movq %r8,  9*8+\offset(%rsp)
 	.endif
 	.if \rax
-	movq_cfi rax, 10*8+\offset
+	movq %rax, 10*8+\offset(%rsp)
 	.endif
 	.if \rcx
-	movq_cfi rcx, 11*8+\offset
+	movq %rcx, 11*8+\offset(%rsp)
 	.endif
-	movq_cfi rdx, 12*8+\offset
-	movq_cfi rsi, 13*8+\offset
-	movq_cfi rdi, 14*8+\offset
+	movq %rdx, 12*8+\offset(%rsp)
+	movq %rsi, 13*8+\offset(%rsp)
+	movq %rdi, 14*8+\offset(%rsp)
 	.endm
 	.macro SAVE_C_REGS offset=0
 	SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
 	.endm
 
 	.macro SAVE_EXTRA_REGS offset=0
-	movq_cfi r15, 0*8+\offset
-	movq_cfi r14, 1*8+\offset
-	movq_cfi r13, 2*8+\offset
-	movq_cfi r12, 3*8+\offset
-	movq_cfi rbp, 4*8+\offset
-	movq_cfi rbx, 5*8+\offset
+	movq %r15, 0*8+\offset(%rsp)
+	movq %r14, 1*8+\offset(%rsp)
+	movq %r13, 2*8+\offset(%rsp)
+	movq %r12, 3*8+\offset(%rsp)
+	movq %rbp, 4*8+\offset(%rsp)
+	movq %rbx, 5*8+\offset(%rsp)
 	.endm
 	.macro SAVE_EXTRA_REGS_RBP offset=0
-	movq_cfi rbp, 4*8+\offset
+	movq %rbp, 4*8+\offset(%rsp)
 	.endm
 
 	.macro RESTORE_EXTRA_REGS offset=0
-	movq_cfi_restore 0*8+\offset, r15
-	movq_cfi_restore 1*8+\offset, r14
-	movq_cfi_restore 2*8+\offset, r13
-	movq_cfi_restore 3*8+\offset, r12
-	movq_cfi_restore 4*8+\offset, rbp
-	movq_cfi_restore 5*8+\offset, rbx
+	movq 0*8+\offset(%rsp), %r15
+	movq 1*8+\offset(%rsp), %r14
+	movq 2*8+\offset(%rsp), %r13
+	movq 3*8+\offset(%rsp), %r12
+	movq 4*8+\offset(%rsp), %rbp
+	movq 5*8+\offset(%rsp), %rbx
 	.endm
 
 	.macro ZERO_EXTRA_REGS
@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
 	.if \rstor_r11
-	movq_cfi_restore 6*8, r11
+	movq 6*8(%rsp), %r11
 	.endif
 	.if \rstor_r8910
-	movq_cfi_restore 7*8, r10
-	movq_cfi_restore 8*8, r9
-	movq_cfi_restore 9*8, r8
+	movq 7*8(%rsp), %r10
+	movq 8*8(%rsp), %r9
+	movq 9*8(%rsp), %r8
 	.endif
 	.if \rstor_rax
-	movq_cfi_restore 10*8, rax
+	movq 10*8(%rsp), %rax
 	.endif
 	.if \rstor_rcx
-	movq_cfi_restore 11*8, rcx
+	movq 11*8(%rsp), %rcx
 	.endif
 	.if \rstor_rdx
-	movq_cfi_restore 12*8, rdx
+	movq 12*8(%rsp), %rdx
 	.endif
-	movq_cfi_restore 13*8, rsi
-	movq_cfi_restore 14*8, rdi
+	movq 13*8(%rsp), %rsi
+	movq 14*8(%rsp), %rdi
 	.endm
 	.macro RESTORE_C_REGS
 	RESTORE_C_REGS_HELPER 1,1,1,1,1
@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
 
 	.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
 	addq $15*8+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
 	.endm
 
 	.macro icebp
@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
  */
 
 	.macro SAVE_ALL
-	pushl_cfi_reg eax
-	pushl_cfi_reg ebp
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg edx
-	pushl_cfi_reg ecx
-	pushl_cfi_reg ebx
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	.endm
 
 	.macro RESTORE_ALL
-	popl_cfi_reg ebx
-	popl_cfi_reg ecx
-	popl_cfi_reg edx
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi_reg ebp
-	popl_cfi_reg eax
+	popl %ebx
+	popl %ecx
+	popl %edx
+	popl %esi
+	popl %edi
+	popl %ebp
+	popl %eax
 	.endm
 
 #endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644
index de1cdaf4d743..000000000000
--- a/arch/x86/include/asm/dwarf2.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef _ASM_X86_DWARF2_H
-#define _ASM_X86_DWARF2_H
-
-#ifndef __ASSEMBLY__
-#warning "asm/dwarf2.h should be only included in pure assembly files"
-#endif
-
-/*
- * Macros for dwarf2 CFI unwind table entries.
- * See "as.info" for details on these pseudo ops. Unfortunately
- * they are only supported in very new binutils, so define them
- * away for older version.
- */
-
-#ifdef CONFIG_AS_CFI
-
-#define CFI_STARTPROC		.cfi_startproc
-#define CFI_ENDPROC		.cfi_endproc
-#define CFI_DEF_CFA		.cfi_def_cfa
-#define CFI_DEF_CFA_REGISTER	.cfi_def_cfa_register
-#define CFI_DEF_CFA_OFFSET	.cfi_def_cfa_offset
-#define CFI_ADJUST_CFA_OFFSET	.cfi_adjust_cfa_offset
-#define CFI_OFFSET		.cfi_offset
-#define CFI_REL_OFFSET		.cfi_rel_offset
-#define CFI_REGISTER		.cfi_register
-#define CFI_RESTORE		.cfi_restore
-#define CFI_REMEMBER_STATE	.cfi_remember_state
-#define CFI_RESTORE_STATE	.cfi_restore_state
-#define CFI_UNDEFINED		.cfi_undefined
-#define CFI_ESCAPE		.cfi_escape
-
-#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
-#define CFI_SIGNAL_FRAME	.cfi_signal_frame
-#else
-#define CFI_SIGNAL_FRAME
-#endif
-
-#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
-	/*
-	 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
-	 * The latter we currently just discard since we don't do DWARF
-	 * unwinding at runtime.  So only the offline DWARF information is
-	 * useful to anyone.  Note we should not use this directive if this
-	 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
-	 * changed so it doesn't discard .eh_frame.
-	 */
-	.cfi_sections .debug_frame
-#endif
-
-#else
-
-/*
- * Due to the structure of pre-exisiting code, don't use assembler line
- * comment character # to ignore the arguments. Instead, use a dummy macro.
- */
-.macro cfi_ignore a=0, b=0, c=0, d=0
-.endm
-
-#define CFI_STARTPROC		cfi_ignore
-#define CFI_ENDPROC		cfi_ignore
-#define CFI_DEF_CFA		cfi_ignore
-#define CFI_DEF_CFA_REGISTER	cfi_ignore
-#define CFI_DEF_CFA_OFFSET	cfi_ignore
-#define CFI_ADJUST_CFA_OFFSET	cfi_ignore
-#define CFI_OFFSET		cfi_ignore
-#define CFI_REL_OFFSET		cfi_ignore
-#define CFI_REGISTER		cfi_ignore
-#define CFI_RESTORE		cfi_ignore
-#define CFI_REMEMBER_STATE	cfi_ignore
-#define CFI_RESTORE_STATE	cfi_ignore
-#define CFI_UNDEFINED		cfi_ignore
-#define CFI_ESCAPE		cfi_ignore
-#define CFI_SIGNAL_FRAME	cfi_ignore
-
-#endif
-
-/*
- * An attempt to make CFI annotations more or less
- * correct and shorter. It is implied that you know
- * what you're doing if you use them.
- */
-#ifdef __ASSEMBLY__
-#ifdef CONFIG_X86_64
-	.macro pushq_cfi reg
-	pushq \reg
-	CFI_ADJUST_CFA_OFFSET 8
-	.endm
-
-	.macro pushq_cfi_reg reg
-	pushq %\reg
-	CFI_ADJUST_CFA_OFFSET 8
-	CFI_REL_OFFSET \reg, 0
-	.endm
-
-	.macro popq_cfi reg
-	popq \reg
-	CFI_ADJUST_CFA_OFFSET -8
-	.endm
-
-	.macro popq_cfi_reg reg
-	popq %\reg
-	CFI_ADJUST_CFA_OFFSET -8
-	CFI_RESTORE \reg
-	.endm
-
-	.macro pushfq_cfi
-	pushfq
-	CFI_ADJUST_CFA_OFFSET 8
-	.endm
-
-	.macro popfq_cfi
-	popfq
-	CFI_ADJUST_CFA_OFFSET -8
-	.endm
-
-	.macro movq_cfi reg offset=0
-	movq %\reg, \offset(%rsp)
-	CFI_REL_OFFSET \reg, \offset
-	.endm
-
-	.macro movq_cfi_restore offset reg
-	movq \offset(%rsp), %\reg
-	CFI_RESTORE \reg
-	.endm
-#else /*!CONFIG_X86_64*/
-	.macro pushl_cfi reg
-	pushl \reg
-	CFI_ADJUST_CFA_OFFSET 4
-	.endm
-
-	.macro pushl_cfi_reg reg
-	pushl %\reg
-	CFI_ADJUST_CFA_OFFSET 4
-	CFI_REL_OFFSET \reg, 0
-	.endm
-
-	.macro popl_cfi reg
-	popl \reg
-	CFI_ADJUST_CFA_OFFSET -4
-	.endm
-
-	.macro popl_cfi_reg reg
-	popl %\reg
-	CFI_ADJUST_CFA_OFFSET -4
-	CFI_RESTORE \reg
-	.endm
-
-	.macro pushfl_cfi
-	pushfl
-	CFI_ADJUST_CFA_OFFSET 4
-	.endm
-
-	.macro popfl_cfi
-	popfl
-	CFI_ADJUST_CFA_OFFSET -4
-	.endm
-
-	.macro movl_cfi reg offset=0
-	movl %\reg, \offset(%esp)
-	CFI_REL_OFFSET \reg, \offset
-	.endm
-
-	.macro movl_cfi_restore offset reg
-	movl \offset(%esp), %\reg
-	CFI_RESTORE \reg
-	.endm
-#endif /*!CONFIG_X86_64*/
-#endif /*__ASSEMBLY__*/
-
-#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f47eb65..793179cf8e21 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,20 +1,17 @@
 #ifdef __ASSEMBLY__
 
 #include <asm/asm.h>
-#include <asm/dwarf2.h>
 
 /* The annotation hides the frame from the unwinder and makes it look
    like a ordinary ebp save/restore. This avoids some special cases for
    frame pointer later */
 #ifdef CONFIG_FRAME_POINTER
 	.macro FRAME
-	__ASM_SIZE(push,_cfi)	%__ASM_REG(bp)
-	CFI_REL_OFFSET		__ASM_REG(bp), 0
+	__ASM_SIZE(push,)	%__ASM_REG(bp)
 	__ASM_SIZE(mov)		%__ASM_REG(sp), %__ASM_REG(bp)
 	.endm
 	.macro ENDFRAME
-	__ASM_SIZE(pop,_cfi)	%__ASM_REG(bp)
-	CFI_RESTORE		__ASM_REG(bp)
+	__ASM_SIZE(pop,)	%__ASM_REG(bp)
 	.endm
 #else
 	.macro FRAME
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1c309763e321..0ac73de925d1 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -50,7 +50,6 @@
 #include <asm/smp.h>
 #include <asm/page_types.h>
 #include <asm/percpu.h>
-#include <asm/dwarf2.h>
 #include <asm/processor-flags.h>
 #include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
@@ -113,11 +112,10 @@
 
  /* unfortunately push/pop can't be no-op */
 .macro PUSH_GS
-	pushl_cfi $0
+	pushl $0
 .endm
 .macro POP_GS pop=0
 	addl $(4 + \pop), %esp
-	CFI_ADJUST_CFA_OFFSET -(4 + \pop)
 .endm
 .macro POP_GS_EX
 .endm
@@ -137,16 +135,13 @@
 #else	/* CONFIG_X86_32_LAZY_GS */
 
 .macro PUSH_GS
-	pushl_cfi %gs
-	/*CFI_REL_OFFSET gs, 0*/
+	pushl %gs
 .endm
 
 .macro POP_GS pop=0
-98:	popl_cfi %gs
-	/*CFI_RESTORE gs*/
+98:	popl %gs
   .if \pop <> 0
 	add $\pop, %esp
-	CFI_ADJUST_CFA_OFFSET -\pop
   .endif
 .endm
 .macro POP_GS_EX
@@ -170,11 +165,9 @@
 
 .macro GS_TO_REG reg
 	movl %gs, \reg
-	/*CFI_REGISTER gs, \reg*/
 .endm
 .macro REG_TO_PTGS reg
 	movl \reg, PT_GS(%esp)
-	/*CFI_REL_OFFSET gs, PT_GS*/
 .endm
 .macro SET_KERNEL_GS reg
 	movl $(__KERNEL_STACK_CANARY), \reg
@@ -186,26 +179,16 @@
 .macro SAVE_ALL
 	cld
 	PUSH_GS
-	pushl_cfi %fs
-	/*CFI_REL_OFFSET fs, 0;*/
-	pushl_cfi %es
-	/*CFI_REL_OFFSET es, 0;*/
-	pushl_cfi %ds
-	/*CFI_REL_OFFSET ds, 0;*/
-	pushl_cfi %eax
-	CFI_REL_OFFSET eax, 0
-	pushl_cfi %ebp
-	CFI_REL_OFFSET ebp, 0
-	pushl_cfi %edi
-	CFI_REL_OFFSET edi, 0
-	pushl_cfi %esi
-	CFI_REL_OFFSET esi, 0
-	pushl_cfi %edx
-	CFI_REL_OFFSET edx, 0
-	pushl_cfi %ecx
-	CFI_REL_OFFSET ecx, 0
-	pushl_cfi %ebx
-	CFI_REL_OFFSET ebx, 0
+	pushl %fs
+	pushl %es
+	pushl %ds
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	movl $(__USER_DS), %edx
 	movl %edx, %ds
 	movl %edx, %es
@@ -215,30 +198,20 @@
 .endm
 
 .macro RESTORE_INT_REGS
-	popl_cfi %ebx
-	CFI_RESTORE ebx
-	popl_cfi %ecx
-	CFI_RESTORE ecx
-	popl_cfi %edx
-	CFI_RESTORE edx
-	popl_cfi %esi
-	CFI_RESTORE esi
-	popl_cfi %edi
-	CFI_RESTORE edi
-	popl_cfi %ebp
-	CFI_RESTORE ebp
-	popl_cfi %eax
-	CFI_RESTORE eax
+	popl %ebx
+	popl %ecx
+	popl %edx
+	popl %esi
+	popl %edi
+	popl %ebp
+	popl %eax
 .endm
 
 .macro RESTORE_REGS pop=0
 	RESTORE_INT_REGS
-1:	popl_cfi %ds
-	/*CFI_RESTORE ds;*/
-2:	popl_cfi %es
-	/*CFI_RESTORE es;*/
-3:	popl_cfi %fs
-	/*CFI_RESTORE fs;*/
+1:	popl %ds
+2:	popl %es
+3:	popl %fs
 	POP_GS \pop
 .pushsection .fixup, "ax"
 4:	movl $0, (%esp)
@@ -254,64 +227,27 @@
 	POP_GS_EX
 .endm
 
-.macro RING0_INT_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 3*4
-	/*CFI_OFFSET cs, -2*4;*/
-	CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_EC_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 4*4
-	/*CFI_OFFSET cs, -2*4;*/
-	CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_PTREGS_FRAME
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
-	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
-	CFI_OFFSET eip, PT_EIP-PT_OLDESP
-	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
-	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
-	CFI_OFFSET eax, PT_EAX-PT_OLDESP
-	CFI_OFFSET ebp, PT_EBP-PT_OLDESP
-	CFI_OFFSET edi, PT_EDI-PT_OLDESP
-	CFI_OFFSET esi, PT_ESI-PT_OLDESP
-	CFI_OFFSET edx, PT_EDX-PT_OLDESP
-	CFI_OFFSET ecx, PT_ECX-PT_OLDESP
-	CFI_OFFSET ebx, PT_EBX-PT_OLDESP
-.endm
-
 ENTRY(ret_from_fork)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	call schedule_tail
 	GET_THREAD_INFO(%ebp)
-	popl_cfi %eax
-	pushl_cfi $0x0202		# Reset kernel eflags
-	popfl_cfi
+	popl %eax
+	pushl $0x0202		# Reset kernel eflags
+	popfl
 	jmp syscall_exit
-	CFI_ENDPROC
 END(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	call schedule_tail
 	GET_THREAD_INFO(%ebp)
-	popl_cfi %eax
-	pushl_cfi $0x0202		# Reset kernel eflags
-	popfl_cfi
+	popl %eax
+	pushl $0x0202		# Reset kernel eflags
+	popfl
 	movl PT_EBP(%esp),%eax
 	call *PT_EBX(%esp)
 	movl $0,PT_EAX(%esp)
 	jmp syscall_exit
-	CFI_ENDPROC
 ENDPROC(ret_from_kernel_thread)
 
 /*
@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread)
 
 	# userspace resumption stub bypassing syscall exit tracing
 	ALIGN
-	RING0_PTREGS_FRAME
 ret_from_exception:
 	preempt_stop(CLBR_ANY)
 ret_from_intr:
@@ -367,17 +302,12 @@ ENTRY(resume_kernel)
 	jmp need_resched
 END(resume_kernel)
 #endif
-	CFI_ENDPROC
 
 /* SYSENTER_RETURN points to after the "sysenter" instruction in
    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
 
 	# sysenter call handler stub
 ENTRY(ia32_sysenter_target)
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA esp, 0
-	CFI_REGISTER esp, ebp
 	movl TSS_sysenter_sp0(%esp),%esp
 sysenter_past_esp:
 	/*
@@ -385,14 +315,11 @@ ENTRY(ia32_sysenter_target)
 	 * enough kernel state to call TRACE_IRQS_OFF can be called - but
 	 * we immediately enable interrupts at that point anyway.
 	 */
-	pushl_cfi $__USER_DS
-	/*CFI_REL_OFFSET ss, 0*/
-	pushl_cfi %ebp
-	CFI_REL_OFFSET esp, 0
-	pushfl_cfi
+	pushl $__USER_DS
+	pushl %ebp
+	pushfl
 	orl $X86_EFLAGS_IF, (%esp)
-	pushl_cfi $__USER_CS
-	/*CFI_REL_OFFSET cs, 0*/
+	pushl $__USER_CS
 	/*
 	 * Push current_thread_info()->sysenter_return to the stack.
 	 * A tiny bit of offset fixup is necessary: TI_sysenter_return
@@ -401,10 +328,9 @@ ENTRY(ia32_sysenter_target)
 	 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
 	 * and THREAD_SIZE takes us to the bottom.
 	 */
-	pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
-	CFI_REL_OFFSET eip, 0
+	pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
 
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	ENABLE_INTERRUPTS(CLBR_NONE)
 
@@ -453,11 +379,11 @@ ENTRY(ia32_sysenter_target)
 	/* movl PT_EAX(%esp), %eax	already set, syscall number: 1st arg to audit */
 	movl PT_EBX(%esp), %edx		/* ebx/a0: 2nd arg to audit */
 	/* movl PT_ECX(%esp), %ecx	already set, a1: 3nd arg to audit */
-	pushl_cfi PT_ESI(%esp)		/* a3: 5th arg */
-	pushl_cfi PT_EDX+4(%esp)	/* a2: 4th arg */
+	pushl PT_ESI(%esp)		/* a3: 5th arg */
+	pushl PT_EDX+4(%esp)	/* a2: 4th arg */
 	call __audit_syscall_entry
-	popl_cfi %ecx /* get that remapped edx off the stack */
-	popl_cfi %ecx /* get that remapped esi off the stack */
+	popl %ecx /* get that remapped edx off the stack */
+	popl %ecx /* get that remapped esi off the stack */
 	movl PT_EAX(%esp),%eax		/* reload syscall number */
 	jmp sysenter_do_call
 
@@ -480,7 +406,6 @@ ENTRY(ia32_sysenter_target)
 	jmp sysenter_exit
 #endif
 
-	CFI_ENDPROC
 .pushsection .fixup,"ax"
 2:	movl $0,PT_FS(%esp)
 	jmp 1b
@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target)
 
 	# system call handler stub
 ENTRY(system_call)
-	RING0_INT_FRAME			# can't unwind into user space anyway
 	ASM_CLAC
-	pushl_cfi %eax			# save orig_eax
+	pushl %eax			# save orig_eax
 	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
 					# system call tracing in operation / emulation
@@ -527,7 +451,6 @@ ENTRY(system_call)
 	movb PT_CS(%esp), %al
 	andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
-	CFI_REMEMBER_STATE
 	je ldt_ss			# returning to user-space with LDT SS
 #endif
 restore_nocheck:
@@ -543,7 +466,6 @@ ENTRY(iret_exc)
 	_ASM_EXTABLE(irq_return,iret_exc)
 
 #ifdef CONFIG_X86_ESPFIX32
-	CFI_RESTORE_STATE
 ldt_ss:
 #ifdef CONFIG_PARAVIRT
 	/*
@@ -577,22 +499,19 @@ ENTRY(iret_exc)
 	shr $16, %edx
 	mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
 	mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-	pushl_cfi $__ESPFIX_SS
-	pushl_cfi %eax			/* new kernel esp */
+	pushl $__ESPFIX_SS
+	pushl %eax			/* new kernel esp */
 	/* Disable interrupts, but do not irqtrace this section: we
 	 * will soon execute iret and the tracer was already set to
 	 * the irqstate after the iret */
 	DISABLE_INTERRUPTS(CLBR_EAX)
 	lss (%esp), %esp		/* switch to espfix segment */
-	CFI_ADJUST_CFA_OFFSET -8
 	jmp restore_nocheck
 #endif
-	CFI_ENDPROC
 ENDPROC(system_call)
 
 	# perform work that needs to be done immediately before resumption
 	ALIGN
-	RING0_PTREGS_FRAME		# can't unwind into user space anyway
 work_pending:
 	testb $_TIF_NEED_RESCHED, %cl
 	jz work_notifysig
@@ -634,9 +553,9 @@ work_notifysig:				# deal with pending signals and
 #ifdef CONFIG_VM86
 	ALIGN
 work_notifysig_v86:
-	pushl_cfi %ecx			# save ti_flags for do_notify_resume
+	pushl %ecx			# save ti_flags for do_notify_resume
 	call save_v86_state		# %eax contains pt_regs pointer
-	popl_cfi %ecx
+	popl %ecx
 	movl %eax, %esp
 	jmp 1b
 #endif
@@ -666,9 +585,7 @@ END(syscall_trace_entry)
 	call syscall_trace_leave
 	jmp resume_userspace
 END(syscall_exit_work)
-	CFI_ENDPROC
 
-	RING0_INT_FRAME			# can't unwind into user space anyway
 syscall_fault:
 	ASM_CLAC
 	GET_THREAD_INFO(%ebp)
@@ -685,7 +602,6 @@ END(syscall_badsys)
 	movl $-ENOSYS,%eax
 	jmp sysenter_after_call
 END(sysenter_badsys)
-	CFI_ENDPROC
 
 .macro FIXUP_ESPFIX_STACK
 /*
@@ -701,10 +617,9 @@ END(sysenter_badsys)
 	mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
 	shl $16, %eax
 	addl %esp, %eax			/* the adjusted stack pointer */
-	pushl_cfi $__KERNEL_DS
-	pushl_cfi %eax
+	pushl $__KERNEL_DS
+	pushl %eax
 	lss (%esp), %esp		/* switch to the normal stack segment */
-	CFI_ADJUST_CFA_OFFSET -8
 #endif
 .endm
 .macro UNWIND_ESPFIX_STACK
@@ -728,13 +643,11 @@ END(sysenter_badsys)
  */
 	.align 8
 ENTRY(irq_entries_start)
-	RING0_INT_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-	pushl_cfi $(~vector+0x80)	/* Note: always in signed byte range */
+	pushl $(~vector+0x80)	/* Note: always in signed byte range */
     vector=vector+1
 	jmp	common_interrupt
-	CFI_ADJUST_CFA_OFFSET -4
 	.align	8
     .endr
 END(irq_entries_start)
@@ -753,19 +666,16 @@ END(irq_entries_start)
 	call do_IRQ
 	jmp ret_from_intr
 ENDPROC(common_interrupt)
-	CFI_ENDPROC
 
 #define BUILD_INTERRUPT3(name, nr, fn)	\
 ENTRY(name)				\
-	RING0_INT_FRAME;		\
 	ASM_CLAC;			\
-	pushl_cfi $~(nr);		\
+	pushl $~(nr);		\
 	SAVE_ALL;			\
 	TRACE_IRQS_OFF			\
 	movl %esp,%eax;			\
 	call fn;			\
 	jmp ret_from_intr;		\
-	CFI_ENDPROC;			\
 ENDPROC(name)
 
 
@@ -784,37 +694,31 @@ ENDPROC(name)
 #include <asm/entry_arch.h>
 
 ENTRY(coprocessor_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_coprocessor_error
+	pushl $0
+	pushl $do_coprocessor_error
 	jmp error_code
-	CFI_ENDPROC
 END(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
+	pushl $0
 #ifdef CONFIG_X86_INVD_BUG
 	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-	ALTERNATIVE "pushl_cfi $do_general_protection",	\
+	ALTERNATIVE "pushl $do_general_protection",	\
 		    "pushl $do_simd_coprocessor_error", \
 		    X86_FEATURE_XMM
 #else
-	pushl_cfi $do_simd_coprocessor_error
+	pushl $do_simd_coprocessor_error
 #endif
 	jmp error_code
-	CFI_ENDPROC
 END(simd_coprocessor_error)
 
 ENTRY(device_not_available)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $-1			# mark this as an int
-	pushl_cfi $do_device_not_available
+	pushl $-1			# mark this as an int
+	pushl $do_device_not_available
 	jmp error_code
-	CFI_ENDPROC
 END(device_not_available)
 
 #ifdef CONFIG_PARAVIRT
@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit)
 #endif
 
 ENTRY(overflow)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_overflow
+	pushl $0
+	pushl $do_overflow
 	jmp error_code
-	CFI_ENDPROC
 END(overflow)
 
 ENTRY(bounds)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_bounds
+	pushl $0
+	pushl $do_bounds
 	jmp error_code
-	CFI_ENDPROC
 END(bounds)
 
 ENTRY(invalid_op)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_invalid_op
+	pushl $0
+	pushl $do_invalid_op
 	jmp error_code
-	CFI_ENDPROC
 END(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_coprocessor_segment_overrun
+	pushl $0
+	pushl $do_coprocessor_segment_overrun
 	jmp error_code
-	CFI_ENDPROC
 END(coprocessor_segment_overrun)
 
 ENTRY(invalid_TSS)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_invalid_TSS
+	pushl $do_invalid_TSS
 	jmp error_code
-	CFI_ENDPROC
 END(invalid_TSS)
 
 ENTRY(segment_not_present)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_segment_not_present
+	pushl $do_segment_not_present
 	jmp error_code
-	CFI_ENDPROC
 END(segment_not_present)
 
 ENTRY(stack_segment)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_stack_segment
+	pushl $do_stack_segment
 	jmp error_code
-	CFI_ENDPROC
 END(stack_segment)
 
 ENTRY(alignment_check)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_alignment_check
+	pushl $do_alignment_check
 	jmp error_code
-	CFI_ENDPROC
 END(alignment_check)
 
 ENTRY(divide_error)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0			# no error code
-	pushl_cfi $do_divide_error
+	pushl $0			# no error code
+	pushl $do_divide_error
 	jmp error_code
-	CFI_ENDPROC
 END(divide_error)
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi machine_check_vector
+	pushl $0
+	pushl machine_check_vector
 	jmp error_code
-	CFI_ENDPROC
 END(machine_check)
 #endif
 
 ENTRY(spurious_interrupt_bug)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $0
-	pushl_cfi $do_spurious_interrupt_bug
+	pushl $0
+	pushl $do_spurious_interrupt_bug
 	jmp error_code
-	CFI_ENDPROC
 END(spurious_interrupt_bug)
 
 #ifdef CONFIG_XEN
 /* Xen doesn't set %esp to be precisely what the normal sysenter
    entrypoint expects, so fix it up before using the normal path. */
 ENTRY(xen_sysenter_target)
-	RING0_INT_FRAME
 	addl $5*4, %esp		/* remove xen-provided frame */
-	CFI_ADJUST_CFA_OFFSET -5*4
 	jmp sysenter_past_esp
-	CFI_ENDPROC
 
 ENTRY(xen_hypervisor_callback)
-	CFI_STARTPROC
-	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+	pushl $-1 /* orig_ax = -1 => not a system call */
 	SAVE_ALL
 	TRACE_IRQS_OFF
 
@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall)
 	call xen_maybe_preempt_hcall
 #endif
 	jmp  ret_from_intr
-	CFI_ENDPROC
 ENDPROC(xen_hypervisor_callback)
 
 # Hypervisor uses this for application faults while it executes.
@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback)
 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 # We distinguish between categories by maintaining a status value in EAX.
 ENTRY(xen_failsafe_callback)
-	CFI_STARTPROC
-	pushl_cfi %eax
+	pushl %eax
 	movl $1,%eax
 1:	mov 4(%esp),%ds
 2:	mov 8(%esp),%es
@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback)
 	/* EAX == 0 => Category 1 (Bad segment)
 	   EAX != 0 => Category 2 (Bad IRET) */
 	testl %eax,%eax
-	popl_cfi %eax
+	popl %eax
 	lea 16(%esp),%esp
-	CFI_ADJUST_CFA_OFFSET -16
 	jz 5f
 	jmp iret_exc
-5:	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+5:	pushl $-1 /* orig_ax = -1 => not a system call */
 	SAVE_ALL
 	jmp ret_from_exception
-	CFI_ENDPROC
 
 .section .fixup,"ax"
 6:	xorl %eax,%eax
@@ -1195,34 +1069,28 @@ END(ftrace_graph_caller)
 
 #ifdef CONFIG_TRACING
 ENTRY(trace_page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $trace_do_page_fault
+	pushl $trace_do_page_fault
 	jmp error_code
-	CFI_ENDPROC
 END(trace_page_fault)
 #endif
 
 ENTRY(page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_page_fault
+	pushl $do_page_fault
 	ALIGN
 error_code:
 	/* the function address is in %gs's slot on the stack */
-	pushl_cfi %fs
-	/*CFI_REL_OFFSET fs, 0*/
-	pushl_cfi %es
-	/*CFI_REL_OFFSET es, 0*/
-	pushl_cfi %ds
-	/*CFI_REL_OFFSET ds, 0*/
-	pushl_cfi_reg eax
-	pushl_cfi_reg ebp
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg edx
-	pushl_cfi_reg ecx
-	pushl_cfi_reg ebx
+	pushl %fs
+	pushl %es
+	pushl %ds
+	pushl %eax
+	pushl %ebp
+	pushl %edi
+	pushl %esi
+	pushl %edx
+	pushl %ecx
+	pushl %ebx
 	cld
 	movl $(__KERNEL_PERCPU), %ecx
 	movl %ecx, %fs
@@ -1240,7 +1108,6 @@ ENTRY(page_fault)
 	movl %esp,%eax			# pt_regs pointer
 	call *%edi
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(page_fault)
 
 /*
@@ -1261,29 +1128,24 @@ END(page_fault)
 	jne \ok
 \label:
 	movl TSS_sysenter_sp0 + \offset(%esp), %esp
-	CFI_DEF_CFA esp, 0
-	CFI_UNDEFINED eip
-	pushfl_cfi
-	pushl_cfi $__KERNEL_CS
-	pushl_cfi $sysenter_past_esp
-	CFI_REL_OFFSET eip, 0
+	pushfl
+	pushl $__KERNEL_CS
+	pushl $sysenter_past_esp
 .endm
 
 ENTRY(debug)
-	RING0_INT_FRAME
 	ASM_CLAC
 	cmpl $ia32_sysenter_target,(%esp)
 	jne debug_stack_correct
 	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
-	pushl_cfi $-1			# mark this as an int
+	pushl $-1			# mark this as an int
 	SAVE_ALL
 	TRACE_IRQS_OFF
 	xorl %edx,%edx			# error code 0
 	movl %esp,%eax			# pt_regs pointer
 	call do_debug
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(debug)
 
 /*
@@ -1295,45 +1157,40 @@ END(debug)
  * fault happened on the sysenter path.
  */
 ENTRY(nmi)
-	RING0_INT_FRAME
 	ASM_CLAC
 #ifdef CONFIG_X86_ESPFIX32
-	pushl_cfi %eax
+	pushl %eax
 	movl %ss, %eax
 	cmpw $__ESPFIX_SS, %ax
-	popl_cfi %eax
+	popl %eax
 	je nmi_espfix_stack
 #endif
 	cmpl $ia32_sysenter_target,(%esp)
 	je nmi_stack_fixup
-	pushl_cfi %eax
+	pushl %eax
 	movl %esp,%eax
 	/* Do not access memory above the end of our stack page,
 	 * it might not exist.
 	 */
 	andl $(THREAD_SIZE-1),%eax
 	cmpl $(THREAD_SIZE-20),%eax
-	popl_cfi %eax
+	popl %eax
 	jae nmi_stack_correct
 	cmpl $ia32_sysenter_target,12(%esp)
 	je nmi_debug_stack_check
 nmi_stack_correct:
-	/* We have a RING0_INT_FRAME here */
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_nmi
 	jmp restore_all_notrace
-	CFI_ENDPROC
 
 nmi_stack_fixup:
-	RING0_INT_FRAME
 	FIX_STACK 12, nmi_stack_correct, 1
 	jmp nmi_stack_correct
 
 nmi_debug_stack_check:
-	/* We have a RING0_INT_FRAME here */
 	cmpw $__KERNEL_CS,16(%esp)
 	jne nmi_stack_correct
 	cmpl $debug,(%esp)
@@ -1345,57 +1202,48 @@ ENTRY(nmi)
 
 #ifdef CONFIG_X86_ESPFIX32
 nmi_espfix_stack:
-	/* We have a RING0_INT_FRAME here.
-	 *
+	/*
 	 * create the pointer to lss back
 	 */
-	pushl_cfi %ss
-	pushl_cfi %esp
+	pushl %ss
+	pushl %esp
 	addl $4, (%esp)
 	/* copy the iret frame of 12 bytes */
 	.rept 3
-	pushl_cfi 16(%esp)
+	pushl 16(%esp)
 	.endr
-	pushl_cfi %eax
+	pushl %eax
 	SAVE_ALL
 	FIXUP_ESPFIX_STACK		# %eax == %esp
 	xorl %edx,%edx			# zero error code
 	call do_nmi
 	RESTORE_REGS
 	lss 12+4(%esp), %esp		# back to espfix stack
-	CFI_ADJUST_CFA_OFFSET -24
 	jmp irq_return
 #endif
-	CFI_ENDPROC
 END(nmi)
 
 ENTRY(int3)
-	RING0_INT_FRAME
 	ASM_CLAC
-	pushl_cfi $-1			# mark this as an int
+	pushl $-1			# mark this as an int
 	SAVE_ALL
 	TRACE_IRQS_OFF
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_int3
 	jmp ret_from_exception
-	CFI_ENDPROC
 END(int3)
 
 ENTRY(general_protection)
-	RING0_EC_FRAME
-	pushl_cfi $do_general_protection
+	pushl $do_general_protection
 	jmp error_code
-	CFI_ENDPROC
 END(general_protection)
 
 #ifdef CONFIG_KVM_GUEST
 ENTRY(async_page_fault)
-	RING0_EC_FRAME
 	ASM_CLAC
-	pushl_cfi $do_async_page_fault
+	pushl $do_async_page_fault
 	jmp error_code
-	CFI_ENDPROC
 END(async_page_fault)
 #endif
 
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0395a59f67c4..c21b4356aa8b 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -19,8 +19,6 @@
  * at the top of the kernel process stack.
  *
  * Some macro usage:
- * - CFI macros are used to generate dwarf2 unwind information for better
- * backtraces. They don't change any code.
  * - ENTRY/END Define functions in the symbol table.
  * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  * - idtentry - Define exception entry points.
@@ -30,7 +28,6 @@
 #include <asm/segment.h>
 #include <asm/cache.h>
 #include <asm/errno.h>
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm-offsets.h>
 #include <asm/msr.h>
@@ -113,61 +110,6 @@ ENDPROC(native_usergs_sysret64)
 #endif
 
 /*
- * empty frame
- */
-	.macro EMPTY_FRAME start=1 offset=0
-	.if \start
-	CFI_STARTPROC simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA rsp,8+\offset
-	.else
-	CFI_DEF_CFA_OFFSET 8+\offset
-	.endif
-	.endm
-
-/*
- * initial frame state for interrupts (and exceptions without error code)
- */
-	.macro INTR_FRAME start=1 offset=0
-	EMPTY_FRAME \start, 5*8+\offset
-	/*CFI_REL_OFFSET ss, 4*8+\offset*/
-	CFI_REL_OFFSET rsp, 3*8+\offset
-	/*CFI_REL_OFFSET rflags, 2*8+\offset*/
-	/*CFI_REL_OFFSET cs, 1*8+\offset*/
-	CFI_REL_OFFSET rip, 0*8+\offset
-	.endm
-
-/*
- * initial frame state for exceptions with error code (and interrupts
- * with vector already pushed)
- */
-	.macro XCPT_FRAME start=1 offset=0
-	INTR_FRAME \start, 1*8+\offset
-	.endm
-
-/*
- * frame that enables passing a complete pt_regs to a C function.
- */
-	.macro DEFAULT_FRAME start=1 offset=0
-	XCPT_FRAME \start, ORIG_RAX+\offset
-	CFI_REL_OFFSET rdi, RDI+\offset
-	CFI_REL_OFFSET rsi, RSI+\offset
-	CFI_REL_OFFSET rdx, RDX+\offset
-	CFI_REL_OFFSET rcx, RCX+\offset
-	CFI_REL_OFFSET rax, RAX+\offset
-	CFI_REL_OFFSET r8, R8+\offset
-	CFI_REL_OFFSET r9, R9+\offset
-	CFI_REL_OFFSET r10, R10+\offset
-	CFI_REL_OFFSET r11, R11+\offset
-	CFI_REL_OFFSET rbx, RBX+\offset
-	CFI_REL_OFFSET rbp, RBP+\offset
-	CFI_REL_OFFSET r12, R12+\offset
-	CFI_REL_OFFSET r13, R13+\offset
-	CFI_REL_OFFSET r14, R14+\offset
-	CFI_REL_OFFSET r15, R15+\offset
-	.endm
-
-/*
  * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
  *
  * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64)
  */
 
 ENTRY(system_call)
-	CFI_STARTPROC	simple
-	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,0
-	CFI_REGISTER	rip,rcx
-	/*CFI_REGISTER	rflags,r11*/
-
 	/*
 	 * Interrupts are off on entry.
 	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs)
 	movq	PER_CPU_VAR(cpu_current_top_of_stack),%rsp
 
 	/* Construct struct pt_regs on stack */
-	pushq_cfi $__USER_DS			/* pt_regs->ss */
-	pushq_cfi PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
+	pushq $__USER_DS			/* pt_regs->ss */
+	pushq PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
 	/*
 	 * Re-enable interrupts.
 	 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs)
 	 * with using rsp_scratch:
 	 */
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi	%r11			/* pt_regs->flags */
-	pushq_cfi	$__USER_CS		/* pt_regs->cs */
-	pushq_cfi	%rcx			/* pt_regs->ip */
-	CFI_REL_OFFSET rip,0
-	pushq_cfi_reg	rax			/* pt_regs->orig_ax */
-	pushq_cfi_reg	rdi			/* pt_regs->di */
-	pushq_cfi_reg	rsi			/* pt_regs->si */
-	pushq_cfi_reg	rdx			/* pt_regs->dx */
-	pushq_cfi_reg	rcx			/* pt_regs->cx */
-	pushq_cfi	$-ENOSYS		/* pt_regs->ax */
-	pushq_cfi_reg	r8			/* pt_regs->r8 */
-	pushq_cfi_reg	r9			/* pt_regs->r9 */
-	pushq_cfi_reg	r10			/* pt_regs->r10 */
-	pushq_cfi_reg	r11			/* pt_regs->r11 */
+	pushq	%r11			/* pt_regs->flags */
+	pushq	$__USER_CS		/* pt_regs->cs */
+	pushq	%rcx			/* pt_regs->ip */
+	pushq	%rax			/* pt_regs->orig_ax */
+	pushq	%rdi			/* pt_regs->di */
+	pushq	%rsi			/* pt_regs->si */
+	pushq	%rdx			/* pt_regs->dx */
+	pushq	%rcx			/* pt_regs->cx */
+	pushq	$-ENOSYS		/* pt_regs->ax */
+	pushq	%r8			/* pt_regs->r8 */
+	pushq	%r9			/* pt_regs->r9 */
+	pushq	%r10			/* pt_regs->r10 */
+	pushq	%r11			/* pt_regs->r11 */
 	sub	$(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
-	CFI_ADJUST_CFA_OFFSET 6*8
 
 	testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 	jnz tracesys
@@ -282,13 +216,9 @@ GLOBAL(system_call_after_swapgs)
 	testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 	jnz int_ret_from_sys_call_irqs_off	/* Go to the slow path */
 
-	CFI_REMEMBER_STATE
-
 	RESTORE_C_REGS_EXCEPT_RCX_R11
 	movq	RIP(%rsp),%rcx
-	CFI_REGISTER	rip,rcx
 	movq	EFLAGS(%rsp),%r11
-	/*CFI_REGISTER	rflags,r11*/
 	movq	RSP(%rsp),%rsp
 	/*
 	 * 64bit SYSRET restores rip from rcx,
@@ -307,8 +237,6 @@ GLOBAL(system_call_after_swapgs)
 	 */
 	USERGS_SYSRET64
 
-	CFI_RESTORE_STATE
-
 	/* Do syscall entry tracing */
 tracesys:
 	movq %rsp, %rdi
@@ -374,9 +302,9 @@ GLOBAL(int_with_check)
 	jnc  int_very_careful
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi %rdi
+	pushq %rdi
 	SCHEDULE_USER
-	popq_cfi %rdi
+	popq %rdi
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	jmp int_with_check
@@ -389,10 +317,10 @@ GLOBAL(int_with_check)
 	/* Check for syscall exit trace */
 	testl $_TIF_WORK_SYSCALL_EXIT,%edx
 	jz int_signal
-	pushq_cfi %rdi
+	pushq %rdi
 	leaq 8(%rsp),%rdi	# &ptregs -> arg1
 	call syscall_trace_leave
-	popq_cfi %rdi
+	popq %rdi
 	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
 	jmp int_restore_rest
 
@@ -475,27 +403,21 @@ GLOBAL(int_with_check)
 	 * perf profiles.  Nothing jumps here.
 	 */
 syscall_return_via_sysret:
-	CFI_REMEMBER_STATE
 	/* rcx and r11 are already restored (see code above) */
 	RESTORE_C_REGS_EXCEPT_RCX_R11
 	movq RSP(%rsp),%rsp
 	USERGS_SYSRET64
-	CFI_RESTORE_STATE
 
 opportunistic_sysret_failed:
 	SWAPGS
 	jmp	restore_c_regs_and_iret
-	CFI_ENDPROC
 END(system_call)
 
 
 	.macro FORK_LIKE func
 ENTRY(stub_\func)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8		/* offset 8: return address */
 	SAVE_EXTRA_REGS 8
 	jmp sys_\func
-	CFI_ENDPROC
 END(stub_\func)
 	.endm
 
@@ -504,8 +426,6 @@ END(stub_\func)
 	FORK_LIKE  vfork
 
 ENTRY(stub_execve)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	sys_execve
 return_from_execve:
 	testl	%eax, %eax
@@ -515,11 +435,9 @@ ENTRY(stub_execve)
 1:
 	/* must use IRET code path (pt_regs->cs may have changed) */
 	addq	$8, %rsp
-	CFI_ADJUST_CFA_OFFSET -8
 	ZERO_EXTRA_REGS
 	movq	%rax,RAX(%rsp)
 	jmp	int_ret_from_sys_call
-	CFI_ENDPROC
 END(stub_execve)
 /*
  * Remaining execve stubs are only 7 bytes long.
@@ -527,32 +445,23 @@ END(stub_execve)
  */
 	.align	8
 GLOBAL(stub_execveat)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	sys_execveat
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub_execveat)
 
 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
 	.align	8
 GLOBAL(stub_x32_execve)
 GLOBAL(stub32_execve)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	compat_sys_execve
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub32_execve)
 END(stub_x32_execve)
 	.align	8
 GLOBAL(stub_x32_execveat)
 GLOBAL(stub32_execveat)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	call	compat_sys_execveat
 	jmp	return_from_execve
-	CFI_ENDPROC
 END(stub32_execveat)
 END(stub_x32_execveat)
 #endif
@@ -562,8 +471,6 @@ END(stub_x32_execveat)
  * This cannot be done with SYSRET, so use the IRET return path instead.
  */
 ENTRY(stub_rt_sigreturn)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	/*
 	 * SAVE_EXTRA_REGS result is not normally needed:
 	 * sigreturn overwrites all pt_regs->GPREGS.
@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
 	call sys_rt_sigreturn
 return_from_stub:
 	addq	$8, %rsp
-	CFI_ADJUST_CFA_OFFSET -8
 	RESTORE_EXTRA_REGS
 	movq %rax,RAX(%rsp)
 	jmp int_ret_from_sys_call
-	CFI_ENDPROC
 END(stub_rt_sigreturn)
 
 #ifdef CONFIG_X86_X32_ABI
 ENTRY(stub_x32_rt_sigreturn)
-	CFI_STARTPROC
-	DEFAULT_FRAME 0, 8
 	SAVE_EXTRA_REGS 8
 	call sys32_x32_rt_sigreturn
 	jmp  return_from_stub
-	CFI_ENDPROC
 END(stub_x32_rt_sigreturn)
 #endif
 
@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
  * rdi: prev task we switched from
  */
 ENTRY(ret_from_fork)
-	DEFAULT_FRAME
 
 	LOCK ; btr $TIF_FORK,TI_flags(%r8)
 
-	pushq_cfi $0x0002
-	popfq_cfi				# reset kernel eflags
+	pushq $0x0002
+	popfq				# reset kernel eflags
 
 	call schedule_tail			# rdi: 'prev' task parameter
 
@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
 	movl $0, RAX(%rsp)
 	RESTORE_EXTRA_REGS
 	jmp int_ret_from_sys_call
-	CFI_ENDPROC
 END(ret_from_fork)
 
 /*
@@ -637,16 +537,13 @@ END(ret_from_fork)
  */
 	.align 8
 ENTRY(irq_entries_start)
-	INTR_FRAME
     vector=FIRST_EXTERNAL_VECTOR
     .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-	pushq_cfi $(~vector+0x80)	/* Note: always in signed byte range */
+	pushq $(~vector+0x80)	/* Note: always in signed byte range */
     vector=vector+1
 	jmp	common_interrupt
-	CFI_ADJUST_CFA_OFFSET -8
 	.align	8
     .endr
-	CFI_ENDPROC
 END(irq_entries_start)
 
 /*
@@ -688,17 +585,7 @@ END(irq_entries_start)
 	movq %rsp, %rsi
 	incl PER_CPU_VAR(irq_count)
 	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
-	CFI_DEF_CFA_REGISTER	rsi
 	pushq %rsi
-	/*
-	 * For debugger:
-	 * "CFA (Current Frame Address) is the value on stack + offset"
-	 */
-	CFI_ESCAPE	0x0f /* DW_CFA_def_cfa_expression */, 6, \
-			0x77 /* DW_OP_breg7 (rsp) */, 0, \
-			0x06 /* DW_OP_deref */, \
-			0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
-			0x22 /* DW_OP_plus */
 	/* We entered an interrupt context - irqs are off: */
 	TRACE_IRQS_OFF
 
@@ -711,7 +598,6 @@ END(irq_entries_start)
 	 */
 	.p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-	XCPT_FRAME
 	ASM_CLAC
 	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
 	interrupt do_IRQ
@@ -723,11 +609,8 @@ END(irq_entries_start)
 
 	/* Restore saved previous stack */
 	popq %rsi
-	CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
 	/* return code expects complete pt_regs - adjust rsp accordingly: */
 	leaq -RBP(%rsi),%rsp
-	CFI_DEF_CFA_REGISTER	rsp
-	CFI_ADJUST_CFA_OFFSET	RBP
 
 	testb	$3, CS(%rsp)
 	jz	retint_kernel
@@ -743,7 +626,6 @@ END(irq_entries_start)
 	LOCKDEP_SYS_EXIT_IRQ
 	movl TI_flags(%rcx),%edx
 	andl %edi,%edx
-	CFI_REMEMBER_STATE
 	jnz  retint_careful
 
 retint_swapgs:		/* return to user-space */
@@ -807,8 +689,8 @@ ENTRY(native_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
 native_irq_return_ldt:
-	pushq_cfi %rax
-	pushq_cfi %rdi
+	pushq %rax
+	pushq %rdi
 	SWAPGS
 	movq PER_CPU_VAR(espfix_waddr),%rdi
 	movq %rax,(0*8)(%rdi)	/* RAX */
@@ -823,24 +705,23 @@ ENTRY(native_iret)
 	movq (5*8)(%rsp),%rax	/* RSP */
 	movq %rax,(4*8)(%rdi)
 	andl $0xffff0000,%eax
-	popq_cfi %rdi
+	popq %rdi
 	orq PER_CPU_VAR(espfix_stack),%rax
 	SWAPGS
 	movq %rax,%rsp
-	popq_cfi %rax
+	popq %rax
 	jmp native_irq_return_iret
 #endif
 
 	/* edi: workmask, edx: work */
 retint_careful:
-	CFI_RESTORE_STATE
 	bt    $TIF_NEED_RESCHED,%edx
 	jnc   retint_signal
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq_cfi %rdi
+	pushq %rdi
 	SCHEDULE_USER
-	popq_cfi %rdi
+	popq %rdi
 	GET_THREAD_INFO(%rcx)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
@@ -862,7 +743,6 @@ ENTRY(native_iret)
 	GET_THREAD_INFO(%rcx)
 	jmp retint_with_reschedule
 
-	CFI_ENDPROC
 END(common_interrupt)
 
 /*
@@ -870,13 +750,11 @@ END(common_interrupt)
  */
 .macro apicinterrupt3 num sym do_sym
 ENTRY(\sym)
-	INTR_FRAME
 	ASM_CLAC
-	pushq_cfi $~(\num)
+	pushq $~(\num)
 .Lcommon_\sym:
 	interrupt \do_sym
 	jmp ret_from_intr
-	CFI_ENDPROC
 END(\sym)
 .endm
 
@@ -966,24 +844,17 @@ ENTRY(\sym)
 	.error "using shift_ist requires paranoid=1"
 	.endif
 
-	.if \has_error_code
-	XCPT_FRAME
-	.else
-	INTR_FRAME
-	.endif
-
 	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 
 	.ifeq \has_error_code
-	pushq_cfi $-1			/* ORIG_RAX: no syscall to restart */
+	pushq $-1			/* ORIG_RAX: no syscall to restart */
 	.endif
 
 	ALLOC_PT_GPREGS_ON_STACK
 
 	.if \paranoid
 	.if \paranoid == 1
-	CFI_REMEMBER_STATE
 	testb	$3, CS(%rsp)		/* If coming from userspace, switch */
 	jnz 1f				/* stacks. */
 	.endif
@@ -993,8 +864,6 @@ ENTRY(\sym)
 	.endif
 	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
 
-	DEFAULT_FRAME 0
-
 	.if \paranoid
 	.if \shift_ist != -1
 	TRACE_IRQS_OFF_DEBUG		/* reload IDT in case of recursion */
@@ -1030,7 +899,6 @@ ENTRY(\sym)
 	.endif
 
 	.if \paranoid == 1
-	CFI_RESTORE_STATE
 	/*
 	 * Paranoid entry from userspace.  Switch stacks and treat it
 	 * as a normal entry.  This means that paranoid handlers
@@ -1039,7 +907,6 @@ ENTRY(\sym)
 1:
 	call error_entry
 
-	DEFAULT_FRAME 0
 
 	movq %rsp,%rdi			/* pt_regs pointer */
 	call sync_regs
@@ -1058,8 +925,6 @@ ENTRY(\sym)
 
 	jmp error_exit			/* %ebx: no swapgs flag */
 	.endif
-
-	CFI_ENDPROC
 END(\sym)
 .endm
 
@@ -1092,17 +957,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
 	/* Reload gs selector with exception handling */
 	/* edi:  new selector */
 ENTRY(native_load_gs_index)
-	CFI_STARTPROC
-	pushfq_cfi
+	pushfq
 	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
 	SWAPGS
 gs_change:
 	movl %edi,%gs
 2:	mfence		/* workaround */
 	SWAPGS
-	popfq_cfi
+	popfq
 	ret
-	CFI_ENDPROC
 END(native_load_gs_index)
 
 	_ASM_EXTABLE(gs_change,bad_gs)
@@ -1117,22 +980,15 @@ END(native_load_gs_index)
 
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(do_softirq_own_stack)
-	CFI_STARTPROC
-	pushq_cfi %rbp
-	CFI_REL_OFFSET rbp,0
+	pushq %rbp
 	mov  %rsp,%rbp
-	CFI_DEF_CFA_REGISTER rbp
 	incl PER_CPU_VAR(irq_count)
 	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
 	push  %rbp			# backlink for old unwinder
 	call __do_softirq
 	leaveq
-	CFI_RESTORE		rbp
-	CFI_DEF_CFA_REGISTER	rsp
-	CFI_ADJUST_CFA_OFFSET   -8
 	decl PER_CPU_VAR(irq_count)
 	ret
-	CFI_ENDPROC
 END(do_softirq_own_stack)
 
 #ifdef CONFIG_XEN
@@ -1152,28 +1008,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
  * activation and restart the handler using the previous one.
  */
 ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
-	CFI_STARTPROC
 /*
  * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  * see the correct pointer to the pt_regs
  */
 	movq %rdi, %rsp            # we don't return, adjust the stack frame
-	CFI_ENDPROC
-	DEFAULT_FRAME
 11:	incl PER_CPU_VAR(irq_count)
 	movq %rsp,%rbp
-	CFI_DEF_CFA_REGISTER rbp
 	cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
 	pushq %rbp			# backlink for old unwinder
 	call xen_evtchn_do_upcall
 	popq %rsp
-	CFI_DEF_CFA_REGISTER rsp
 	decl PER_CPU_VAR(irq_count)
 #ifndef CONFIG_PREEMPT
 	call xen_maybe_preempt_hcall
 #endif
 	jmp  error_exit
-	CFI_ENDPROC
 END(xen_do_hypervisor_callback)
 
 /*
@@ -1190,16 +1040,8 @@ END(xen_do_hypervisor_callback)
  * with its current contents: any discrepancy means we in category 1.
  */
 ENTRY(xen_failsafe_callback)
-	INTR_FRAME 1 (6*8)
-	/*CFI_REL_OFFSET gs,GS*/
-	/*CFI_REL_OFFSET fs,FS*/
-	/*CFI_REL_OFFSET es,ES*/
-	/*CFI_REL_OFFSET ds,DS*/
-	CFI_REL_OFFSET r11,8
-	CFI_REL_OFFSET rcx,0
 	movl %ds,%ecx
 	cmpw %cx,0x10(%rsp)
-	CFI_REMEMBER_STATE
 	jne 1f
 	movl %es,%ecx
 	cmpw %cx,0x18(%rsp)
@@ -1212,29 +1054,21 @@ ENTRY(xen_failsafe_callback)
 	jne 1f
 	/* All segments match their saved values => Category 2 (Bad IRET). */
 	movq (%rsp),%rcx
-	CFI_RESTORE rcx
 	movq 8(%rsp),%r11
-	CFI_RESTORE r11
 	addq $0x30,%rsp
-	CFI_ADJUST_CFA_OFFSET -0x30
-	pushq_cfi $0	/* RIP */
-	pushq_cfi %r11
-	pushq_cfi %rcx
+	pushq $0	/* RIP */
+	pushq %r11
+	pushq %rcx
 	jmp general_protection
-	CFI_RESTORE_STATE
 1:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
 	movq (%rsp),%rcx
-	CFI_RESTORE rcx
 	movq 8(%rsp),%r11
-	CFI_RESTORE r11
 	addq $0x30,%rsp
-	CFI_ADJUST_CFA_OFFSET -0x30
-	pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+	pushq $-1 /* orig_ax = -1 => not a system call */
 	ALLOC_PT_GPREGS_ON_STACK
 	SAVE_C_REGS
 	SAVE_EXTRA_REGS
 	jmp error_exit
-	CFI_ENDPROC
 END(xen_failsafe_callback)
 
 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
@@ -1270,7 +1104,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(paranoid_entry)
-	XCPT_FRAME 1 15*8
 	cld
 	SAVE_C_REGS 8
 	SAVE_EXTRA_REGS 8
@@ -1282,7 +1115,6 @@ ENTRY(paranoid_entry)
 	SWAPGS
 	xorl %ebx,%ebx
 1:	ret
-	CFI_ENDPROC
 END(paranoid_entry)
 
 /*
@@ -1297,7 +1129,6 @@ END(paranoid_entry)
  */
 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
 ENTRY(paranoid_exit)
-	DEFAULT_FRAME
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF_DEBUG
 	testl %ebx,%ebx				/* swapgs needed? */
@@ -1312,7 +1143,6 @@ ENTRY(paranoid_exit)
 	RESTORE_C_REGS
 	REMOVE_PT_GPREGS_FROM_STACK 8
 	INTERRUPT_RETURN
-	CFI_ENDPROC
 END(paranoid_exit)
 
 /*
@@ -1320,7 +1150,6 @@ END(paranoid_exit)
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(error_entry)
-	XCPT_FRAME 1 15*8
 	cld
 	SAVE_C_REGS 8
 	SAVE_EXTRA_REGS 8
@@ -1340,7 +1169,6 @@ ENTRY(error_entry)
 	 * for these here too.
 	 */
 error_kernelspace:
-	CFI_REL_OFFSET rcx, RCX+8
 	incl %ebx
 	leaq native_irq_return_iret(%rip),%rcx
 	cmpq %rcx,RIP+8(%rsp)
@@ -1364,13 +1192,11 @@ ENTRY(error_entry)
 	mov %rax,%rsp
 	decl %ebx	/* Return to usergs */
 	jmp error_sti
-	CFI_ENDPROC
 END(error_entry)
 
 
 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
 ENTRY(error_exit)
-	DEFAULT_FRAME
 	movl %ebx,%eax
 	RESTORE_EXTRA_REGS
 	DISABLE_INTERRUPTS(CLBR_NONE)
@@ -1384,12 +1210,10 @@ ENTRY(error_exit)
 	andl %edi,%edx
 	jnz retint_careful
 	jmp retint_swapgs
-	CFI_ENDPROC
 END(error_exit)
 
 /* Runs on exception stack */
 ENTRY(nmi)
-	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	/*
 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
@@ -1424,8 +1248,7 @@ ENTRY(nmi)
 	 */
 
 	/* Use %rdx as our temp variable throughout */
-	pushq_cfi %rdx
-	CFI_REL_OFFSET rdx, 0
+	pushq %rdx
 
 	/*
 	 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1459,8 +1282,6 @@ ENTRY(nmi)
 	jb	first_nmi
 	/* Ah, it is within the NMI stack, treat it as nested */
 
-	CFI_REMEMBER_STATE
-
 nested_nmi:
 	/*
 	 * Do nothing if we interrupted the fixup in repeat_nmi.
@@ -1478,26 +1299,22 @@ ENTRY(nmi)
 	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
 	leaq -1*8(%rsp), %rdx
 	movq %rdx, %rsp
-	CFI_ADJUST_CFA_OFFSET 1*8
 	leaq -10*8(%rsp), %rdx
-	pushq_cfi $__KERNEL_DS
-	pushq_cfi %rdx
-	pushfq_cfi
-	pushq_cfi $__KERNEL_CS
-	pushq_cfi $repeat_nmi
+	pushq $__KERNEL_DS
+	pushq %rdx
+	pushfq
+	pushq $__KERNEL_CS
+	pushq $repeat_nmi
 
 	/* Put stack back */
 	addq $(6*8), %rsp
-	CFI_ADJUST_CFA_OFFSET -6*8
 
 nested_nmi_out:
-	popq_cfi %rdx
-	CFI_RESTORE rdx
+	popq %rdx
 
 	/* No need to check faults here */
 	INTERRUPT_RETURN
 
-	CFI_RESTORE_STATE
 first_nmi:
 	/*
 	 * Because nested NMIs will use the pushed location that we
@@ -1536,22 +1353,19 @@ ENTRY(nmi)
 	 */
 	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
 	movq (%rsp), %rdx
-	CFI_RESTORE rdx
 
 	/* Set the NMI executing variable on the stack. */
-	pushq_cfi $1
+	pushq $1
 
 	/*
 	 * Leave room for the "copied" frame
 	 */
 	subq $(5*8), %rsp
-	CFI_ADJUST_CFA_OFFSET 5*8
 
 	/* Copy the stack frame to the Saved frame */
 	.rept 5
-	pushq_cfi 11*8(%rsp)
+	pushq 11*8(%rsp)
 	.endr
-	CFI_DEF_CFA_OFFSET 5*8
 
 	/* Everything up to here is safe from nested NMIs */
 
@@ -1574,12 +1388,10 @@ ENTRY(nmi)
 
 	/* Make another copy, this one may be modified by nested NMIs */
 	addq $(10*8), %rsp
-	CFI_ADJUST_CFA_OFFSET -10*8
 	.rept 5
-	pushq_cfi -6*8(%rsp)
+	pushq -6*8(%rsp)
 	.endr
 	subq $(5*8), %rsp
-	CFI_DEF_CFA_OFFSET 5*8
 end_repeat_nmi:
 
 	/*
@@ -1587,7 +1399,7 @@ ENTRY(nmi)
 	 * NMI if the first NMI took an exception and reset our iret stack
 	 * so that we repeat another NMI.
 	 */
-	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
+	pushq $-1		/* ORIG_RAX: no syscall to restart */
 	ALLOC_PT_GPREGS_ON_STACK
 
 	/*
@@ -1598,7 +1410,6 @@ ENTRY(nmi)
 	 * exceptions might do.
 	 */
 	call paranoid_entry
-	DEFAULT_FRAME 0
 
 	/*
 	 * Save off the CR2 register. If we take a page fault in the NMI then
@@ -1635,13 +1446,10 @@ ENTRY(nmi)
 	/* Clear the NMI executing stack variable */
 	movq $0, 5*8(%rsp)
 	jmp irq_return
-	CFI_ENDPROC
 END(nmi)
 
 ENTRY(ignore_sysret)
-	CFI_STARTPROC
 	mov $-ENOSYS,%eax
 	sysret
-	CFI_ENDPROC
 END(ignore_sysret)
 
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 00933d5e992f..9b0ca8fe80fc 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -11,26 +11,23 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
-	pushfl_cfi
+	pushfl
 	cli
 .endm
 
 .macro UNLOCK reg
-	popfl_cfi
+	popfl
 .endm
 
 #define BEGIN(op) \
 .macro endp; \
-	CFI_ENDPROC; \
 ENDPROC(atomic64_##op##_386); \
 .purgem endp; \
 .endm; \
 ENTRY(atomic64_##op##_386); \
-	CFI_STARTPROC; \
 	LOCK v;
 
 #define ENDP endp
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 082a85167a5b..db3ae85440ff 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -11,7 +11,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 .macro read64 reg
 	movl %ebx, %eax
@@ -22,16 +21,11 @@
 .endm
 
 ENTRY(atomic64_read_cx8)
-	CFI_STARTPROC
-
 	read64 %ecx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_read_cx8)
 
 ENTRY(atomic64_set_cx8)
-	CFI_STARTPROC
-
 1:
 /* we don't need LOCK_PREFIX since aligned 64-bit writes
  * are atomic on 586 and newer */
@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
 	jne 1b
 
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_set_cx8)
 
 ENTRY(atomic64_xchg_cx8)
-	CFI_STARTPROC
-
 1:
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
 
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_xchg_cx8)
 
 .macro addsub_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebp
-	pushl_cfi_reg ebx
-	pushl_cfi_reg esi
-	pushl_cfi_reg edi
+	pushl %ebp
+	pushl %ebx
+	pushl %esi
+	pushl %edi
 
 	movl %eax, %esi
 	movl %edx, %edi
@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg edi
-	popl_cfi_reg esi
-	popl_cfi_reg ebx
-	popl_cfi_reg ebp
+	popl %edi
+	popl %esi
+	popl %ebx
+	popl %ebp
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -93,8 +81,7 @@ addsub_return sub sub sbb
 
 .macro incdec_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -119,8 +105,7 @@ incdec_return inc add adc
 incdec_return dec sub sbb
 
 ENTRY(atomic64_dec_if_positive_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
 2:
 	movl %ebx, %eax
 	movl %ecx, %edx
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
 
 ENTRY(atomic64_add_unless_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebp
-	pushl_cfi_reg ebx
+	pushl %ebp
+	pushl %ebx
 /* these just push these two parameters on the stack */
-	pushl_cfi_reg edi
-	pushl_cfi_reg ecx
+	pushl %edi
+	pushl %ecx
 
 	movl %eax, %ebp
 	movl %edx, %edi
@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
 	movl $1, %eax
 3:
 	addl $8, %esp
-	CFI_ADJUST_CFA_OFFSET -8
-	popl_cfi_reg ebx
-	popl_cfi_reg ebp
+	popl %ebx
+	popl %ebp
 	ret
 4:
 	cmpl %edx, 4(%esp)
 	jne 2b
 	xorl %eax, %eax
 	jmp 3b
-	CFI_ENDPROC
 ENDPROC(atomic64_add_unless_cx8)
 
 ENTRY(atomic64_inc_not_zero_cx8)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
+	pushl %ebx
 
 	read64 %esi
 1:
@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
 
 	movl $1, %eax
 3:
-	popl_cfi_reg ebx
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 9bc944a91274..c1e623209853 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
 				
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
 	   * alignment for the unrolled loop.
 	   */		
 ENTRY(csum_partial)
-	CFI_STARTPROC
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %esi
+	pushl %ebx
 	movl 20(%esp),%eax	# Function arg: unsigned int sum
 	movl 16(%esp),%ecx	# Function arg: int len
 	movl 12(%esp),%esi	# Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
 	jz 8f
 	roll $8, %eax
 8:
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
+	popl %ebx
+	popl %esi
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial)
 
 #else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
 /* Version for PentiumII/PPro */
 
 ENTRY(csum_partial)
-	CFI_STARTPROC
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %esi
+	pushl %ebx
 	movl 20(%esp),%eax	# Function arg: unsigned int sum
 	movl 16(%esp),%ecx	# Function arg: int len
 	movl 12(%esp),%esi	# Function arg:	const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
 	jz 90f
 	roll $8, %eax
 90: 
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
+	popl %ebx
+	popl %esi
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial)
 				
 #endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
 #define FP		12
 		
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
 	subl  $4,%esp	
-	CFI_ADJUST_CFA_OFFSET 4
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
-	pushl_cfi_reg ebx
+	pushl %edi
+	pushl %esi
+	pushl %ebx
 	movl ARGBASE+16(%esp),%eax	# sum
 	movl ARGBASE+12(%esp),%ecx	# len
 	movl ARGBASE+4(%esp),%esi	# src
@@ -401,12 +394,11 @@ DST(	movb %cl, (%edi)	)
 
 .previous
 
-	popl_cfi_reg ebx
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi %ecx			# equivalent to addl $4,%esp
+	popl %ebx
+	popl %esi
+	popl %edi
+	popl %ecx			# equivalent to addl $4,%esp
 	ret	
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 
 #else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
 #define ARGBASE 12
 		
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
-	pushl_cfi_reg edi
-	pushl_cfi_reg esi
+	pushl %ebx
+	pushl %edi
+	pushl %esi
 	movl ARGBASE+4(%esp),%esi	#src
 	movl ARGBASE+8(%esp),%edi	#dst	
 	movl ARGBASE+12(%esp),%ecx	#len
@@ -489,11 +480,10 @@ DST(	movb %dl, (%edi)         )
 	jmp  7b			
 .previous				
 
-	popl_cfi_reg esi
-	popl_cfi_reg edi
-	popl_cfi_reg ebx
+	popl %esi
+	popl %edi
+	popl %ebx
 	ret
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 				
 #undef ROUND
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index e67e579c93bd..a2fe51b00cce 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,4 @@
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -15,7 +14,6 @@
  * %rdi	- page
  */
 ENTRY(clear_page)
-	CFI_STARTPROC
 
 	ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
 		      "jmp clear_page_c_e", X86_FEATURE_ERMS
@@ -24,11 +22,9 @@ ENTRY(clear_page)
 	xorl %eax,%eax
 	rep stosq
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page)
 
 ENTRY(clear_page_orig)
-	CFI_STARTPROC
 
 	xorl   %eax,%eax
 	movl   $4096/64,%ecx
@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
 	jnz	.Lloop
 	nop
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page_orig)
 
 ENTRY(clear_page_c_e)
-	CFI_STARTPROC
 	movl $4096,%ecx
 	xorl %eax,%eax
 	rep stosb
 	ret
-	CFI_ENDPROC
 ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 40a172541ee2..9b330242e740 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -6,7 +6,6 @@
  *
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/percpu.h>
 
 .text
@@ -21,7 +20,6 @@
  * %al  : Operation successful
  */
 ENTRY(this_cpu_cmpxchg16b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -32,7 +30,7 @@ CFI_STARTPROC
 # *atomic* on a single cpu (as provided by the this_cpu_xx class of
 # macros).
 #
-	pushfq_cfi
+	pushfq
 	cli
 
 	cmpq PER_CPU_VAR((%rsi)), %rax
@@ -43,17 +41,13 @@ CFI_STARTPROC
 	movq %rbx, PER_CPU_VAR((%rsi))
 	movq %rcx, PER_CPU_VAR(8(%rsi))
 
-	CFI_REMEMBER_STATE
-	popfq_cfi
+	popfq
 	mov $1, %al
 	ret
 
-	CFI_RESTORE_STATE
 .Lnot_same:
-	popfq_cfi
+	popfq
 	xor %al,%al
 	ret
 
-CFI_ENDPROC
-
 ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index b4807fce5177..ad5349778490 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 .text
 
@@ -20,14 +19,13 @@
  * %ecx : high 32 bits of new value
  */
 ENTRY(cmpxchg8b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg8b (%esi)' on UP except we don't
 # set the whole ZF thing (caller will just compare
 # eax:edx with the expected value)
 #
-	pushfl_cfi
+	pushfl
 	cli
 
 	cmpl  (%esi), %eax
@@ -38,18 +36,15 @@ CFI_STARTPROC
 	movl %ebx,  (%esi)
 	movl %ecx, 4(%esi)
 
-	CFI_REMEMBER_STATE
-	popfl_cfi
+	popfl
 	ret
 
-	CFI_RESTORE_STATE
 .Lnot_same:
 	movl  (%esi), %eax
 .Lhalf_same:
 	movl 4(%esi), %edx
 
-	popfl_cfi
+	popfl
 	ret
 
-CFI_ENDPROC
 ENDPROC(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 8239dbcbf984..009f98216b7e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,6 @@
 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -13,22 +12,16 @@
  */
 	ALIGN
 ENTRY(copy_page)
-	CFI_STARTPROC
 	ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
 	movl	$4096/8, %ecx
 	rep	movsq
 	ret
-	CFI_ENDPROC
 ENDPROC(copy_page)
 
 ENTRY(copy_page_regs)
-	CFI_STARTPROC
 	subq	$2*8,	%rsp
-	CFI_ADJUST_CFA_OFFSET 2*8
 	movq	%rbx,	(%rsp)
-	CFI_REL_OFFSET rbx, 0
 	movq	%r12,	1*8(%rsp)
-	CFI_REL_OFFSET r12, 1*8
 
 	movl	$(4096/64)-5,	%ecx
 	.p2align 4
@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
 	jnz	.Loop2
 
 	movq	(%rsp), %rbx
-	CFI_RESTORE rbx
 	movq	1*8(%rsp), %r12
-	CFI_RESTORE r12
 	addq	$2*8, %rsp
-	CFI_ADJUST_CFA_OFFSET -2*8
 	ret
-	CFI_ENDPROC
 ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index e4b3beee83bd..982ce34f4a9b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
@@ -18,7 +17,6 @@
 
 /* Standard copy_to_user with segment limit checking */
 ENTRY(_copy_to_user)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%rax)
 	movq %rdi,%rcx
 	addq %rdx,%rcx
@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
 		      X86_FEATURE_REP_GOOD,			\
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
-	CFI_ENDPROC
 ENDPROC(_copy_to_user)
 
 /* Standard copy_from_user with segment limit checking */
 ENTRY(_copy_from_user)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%rax)
 	movq %rsi,%rcx
 	addq %rdx,%rcx
@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
 		      X86_FEATURE_REP_GOOD,			\
 		      "jmp copy_user_enhanced_fast_string",	\
 		      X86_FEATURE_ERMS
-	CFI_ENDPROC
 ENDPROC(_copy_from_user)
 
 	.section .fixup,"ax"
 	/* must zero dest */
 ENTRY(bad_from_user)
 bad_from_user:
-	CFI_STARTPROC
 	movl %edx,%ecx
 	xorl %eax,%eax
 	rep
@@ -62,7 +56,6 @@ ENTRY(bad_from_user)
 bad_to_user:
 	movl %edx,%eax
 	ret
-	CFI_ENDPROC
 ENDPROC(bad_from_user)
 	.previous
 
@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_unrolled)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
 	_ASM_EXTABLE(19b,40b)
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
-	CFI_ENDPROC
 ENDPROC(copy_user_generic_unrolled)
 
 /* Some CPUs run faster using the string copy instructions.
@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_string)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 2f		/* less than 8 bytes, go to byte copy loop */
@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
 
 	_ASM_EXTABLE(1b,11b)
 	_ASM_EXTABLE(3b,12b)
-	CFI_ENDPROC
 ENDPROC(copy_user_generic_string)
 
 /*
@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_enhanced_fast_string)
-	CFI_STARTPROC
 	ASM_STAC
 	movl %edx,%ecx
 1:	rep
@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
 	.previous
 
 	_ASM_EXTABLE(1b,12b)
-	CFI_ENDPROC
 ENDPROC(copy_user_enhanced_fast_string)
 
 /*
@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
  * This will force destination/source out of cache for more performance.
  */
 ENTRY(__copy_user_nocache)
-	CFI_STARTPROC
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
 	_ASM_EXTABLE(19b,40b)
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
-	CFI_ENDPROC
 ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 9734182966f3..7e48807b2fa1 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -6,7 +6,6 @@
  * for more details. No warranty for anything given at all.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
 
@@ -47,23 +46,16 @@
 
 
 ENTRY(csum_partial_copy_generic)
-	CFI_STARTPROC
 	cmpl	$3*64, %edx
 	jle	.Lignore
 
 .Lignore:
 	subq  $7*8, %rsp
-	CFI_ADJUST_CFA_OFFSET 7*8
 	movq  %rbx, 2*8(%rsp)
-	CFI_REL_OFFSET rbx, 2*8
 	movq  %r12, 3*8(%rsp)
-	CFI_REL_OFFSET r12, 3*8
 	movq  %r14, 4*8(%rsp)
-	CFI_REL_OFFSET r14, 4*8
 	movq  %r13, 5*8(%rsp)
-	CFI_REL_OFFSET r13, 5*8
 	movq  %rbp, 6*8(%rsp)
-	CFI_REL_OFFSET rbp, 6*8
 
 	movq  %r8, (%rsp)
 	movq  %r9, 1*8(%rsp)
@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
 	addl %ebx, %eax
 	adcl %r9d, %eax		/* carry */
 
-	CFI_REMEMBER_STATE
 .Lende:
 	movq 2*8(%rsp), %rbx
-	CFI_RESTORE rbx
 	movq 3*8(%rsp), %r12
-	CFI_RESTORE r12
 	movq 4*8(%rsp), %r14
-	CFI_RESTORE r14
 	movq 5*8(%rsp), %r13
-	CFI_RESTORE r13
 	movq 6*8(%rsp), %rbp
-	CFI_RESTORE rbp
 	addq $7*8, %rsp
-	CFI_ADJUST_CFA_OFFSET -7*8
 	ret
-	CFI_RESTORE_STATE
 
 	/* Exception handlers. Very simple, zeroing is done in the wrappers */
 .Lbad_source:
@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
 	jz   .Lende
 	movl $-EFAULT, (%rax)
 	jmp .Lende
-	CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index a4512359656a..46668cda4ffd 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/page_types.h>
 #include <asm/errno.h>
 #include <asm/asm-offsets.h>
@@ -36,7 +35,6 @@
 
 	.text
 ENTRY(__get_user_1)
-	CFI_STARTPROC
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
-	CFI_STARTPROC
 	add $1,%_ASM_AX
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
-	CFI_STARTPROC
 	add $3,%_ASM_AX
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
 	xor %eax,%eax
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 ENDPROC(__get_user_4)
 
 ENTRY(__get_user_8)
-	CFI_STARTPROC
 #ifdef CONFIG_X86_64
 	add $7,%_ASM_AX
 	jc bad_get_user
@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
 	ASM_CLAC
 	ret
 #endif
-	CFI_ENDPROC
 ENDPROC(__get_user_8)
 
 
 bad_get_user:
-	CFI_STARTPROC
 	xor %edx,%edx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 END(bad_get_user)
 
 #ifdef CONFIG_X86_32
 bad_get_user_8:
-	CFI_STARTPROC
 	xor %edx,%edx
 	xor %ecx,%ecx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
 	ret
-	CFI_ENDPROC
 END(bad_get_user_8)
 #endif
 
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index 05a95e713da8..33147fef3452 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -16,15 +16,12 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * override generic version in lib/iomap_copy.c
  */
 ENTRY(__iowrite32_copy)
-	CFI_STARTPROC
 	movl %edx,%ecx
 	rep movsd
 	ret
-	CFI_ENDPROC
 ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index b046664f5a1c..16698bba87de 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -2,7 +2,6 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeature.h>
-#include <asm/dwarf2.h>
 #include <asm/alternative-asm.h>
 
 /*
@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
 ENDPROC(memcpy_erms)
 
 ENTRY(memcpy_orig)
-	CFI_STARTPROC
 	movq %rdi, %rax
 
 	cmpq $0x20, %rdx
@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
 
 .Lend:
 	retq
-	CFI_ENDPROC
 ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0f8a0d0331b9..ca2afdd6d98e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,6 @@
  *	- Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -27,7 +26,6 @@
 
 ENTRY(memmove)
 ENTRY(__memmove)
-	CFI_STARTPROC
 
 	/* Handle more 32 bytes in loop */
 	mov %rdi, %rax
@@ -207,6 +205,5 @@ ENTRY(__memmove)
 	movb %r11b, (%rdi)
 13:
 	retq
-	CFI_ENDPROC
 ENDPROC(__memmove)
 ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 93118fb23976..2661fad05827 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,6 @@
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -66,7 +65,6 @@ ENTRY(memset_erms)
 ENDPROC(memset_erms)
 
 ENTRY(memset_orig)
-	CFI_STARTPROC
 	movq %rdi,%r10
 
 	/* expand byte value  */
@@ -78,7 +76,6 @@ ENTRY(memset_orig)
 	movl  %edi,%r9d
 	andl  $7,%r9d
 	jnz  .Lbad_alignment
-	CFI_REMEMBER_STATE
 .Lafter_bad_alignment:
 
 	movq  %rdx,%rcx
@@ -128,7 +125,6 @@ ENTRY(memset_orig)
 	movq	%r10,%rax
 	ret
 
-	CFI_RESTORE_STATE
 .Lbad_alignment:
 	cmpq $7,%rdx
 	jbe	.Lhandle_7
@@ -139,5 +135,4 @@ ENTRY(memset_orig)
 	subq %r8,%rdx
 	jmp .Lafter_bad_alignment
 .Lfinal:
-	CFI_ENDPROC
 ENDPROC(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 3ca5218fbece..c81556409bbb 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,5 @@
 #include <linux/linkage.h>
 #include <linux/errno.h>
-#include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/msr.h>
 
@@ -13,9 +12,8 @@
  */
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-	CFI_STARTPROC
-	pushq_cfi_reg rbx
-	pushq_cfi_reg rbp
+	pushq %rbx
+	pushq %rbp
 	movq	%rdi, %r10	/* Save pointer */
 	xorl	%r11d, %r11d	/* Return value */
 	movl    (%rdi), %eax
@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
 	movl    20(%rdi), %ebp
 	movl    24(%rdi), %esi
 	movl    28(%rdi), %edi
-	CFI_REMEMBER_STATE
 1:	\op
 2:	movl    %eax, (%r10)
 	movl	%r11d, %eax	/* Return value */
@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
 	movl    %ebp, 20(%r10)
 	movl    %esi, 24(%r10)
 	movl    %edi, 28(%r10)
-	popq_cfi_reg rbp
-	popq_cfi_reg rbx
+	popq %rbp
+	popq %rbx
 	ret
 3:
-	CFI_RESTORE_STATE
 	movl    $-EIO, %r11d
 	jmp     2b
 
 	_ASM_EXTABLE(1b, 3b)
-	CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
 
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-	CFI_STARTPROC
-	pushl_cfi_reg ebx
-	pushl_cfi_reg ebp
-	pushl_cfi_reg esi
-	pushl_cfi_reg edi
-	pushl_cfi $0              /* Return value */
-	pushl_cfi %eax
+	pushl %ebx
+	pushl %ebp
+	pushl %esi
+	pushl %edi
+	pushl $0              /* Return value */
+	pushl %eax
 	movl    4(%eax), %ecx
 	movl    8(%eax), %edx
 	movl    12(%eax), %ebx
@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
 	movl    24(%eax), %esi
 	movl    28(%eax), %edi
 	movl    (%eax), %eax
-	CFI_REMEMBER_STATE
 1:	\op
-2:	pushl_cfi %eax
+2:	pushl %eax
 	movl    4(%esp), %eax
-	popl_cfi (%eax)
+	popl (%eax)
 	addl    $4, %esp
-	CFI_ADJUST_CFA_OFFSET -4
 	movl    %ecx, 4(%eax)
 	movl    %edx, 8(%eax)
 	movl    %ebx, 12(%eax)
 	movl    %ebp, 20(%eax)
 	movl    %esi, 24(%eax)
 	movl    %edi, 28(%eax)
-	popl_cfi %eax
-	popl_cfi_reg edi
-	popl_cfi_reg esi
-	popl_cfi_reg ebp
-	popl_cfi_reg ebx
+	popl %eax
+	popl %edi
+	popl %esi
+	popl %ebp
+	popl %ebx
 	ret
 3:
-	CFI_RESTORE_STATE
 	movl    $-EIO, 4(%esp)
 	jmp     2b
 
 	_ASM_EXTABLE(1b, 3b)
-	CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index fc6ba17a7eec..e0817a12d323 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -11,7 +11,6 @@
  * return value.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/thread_info.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
@@ -30,11 +29,9 @@
  * as they get called from within inline assembly.
  */
 
-#define ENTER	CFI_STARTPROC ; \
-		GET_THREAD_INFO(%_ASM_BX)
+#define ENTER	GET_THREAD_INFO(%_ASM_BX)
 #define EXIT	ASM_CLAC ;	\
-		ret ;		\
-		CFI_ENDPROC
+		ret
 
 .text
 ENTRY(__put_user_1)
@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
 ENDPROC(__put_user_8)
 
 bad_put_user:
-	CFI_STARTPROC
 	movl $-EFAULT,%eax
 	EXIT
 END(bad_put_user)
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 2322abe4da3b..40027db99140 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -15,7 +15,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 #define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
 #define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
@@ -34,10 +33,10 @@
  */
 
 #define save_common_regs \
-	pushl_cfi_reg ecx
+	pushl %ecx
 
 #define restore_common_regs \
-	popl_cfi_reg ecx
+	popl %ecx
 
 	/* Avoid uglifying the argument copying x86-64 needs to do. */
 	.macro movq src, dst
@@ -64,50 +63,45 @@
  */
 
 #define save_common_regs \
-	pushq_cfi_reg rdi; \
-	pushq_cfi_reg rsi; \
-	pushq_cfi_reg rcx; \
-	pushq_cfi_reg r8;  \
-	pushq_cfi_reg r9;  \
-	pushq_cfi_reg r10; \
-	pushq_cfi_reg r11
+	pushq %rdi; \
+	pushq %rsi; \
+	pushq %rcx; \
+	pushq %r8;  \
+	pushq %r9;  \
+	pushq %r10; \
+	pushq %r11
 
 #define restore_common_regs \
-	popq_cfi_reg r11; \
-	popq_cfi_reg r10; \
-	popq_cfi_reg r9; \
-	popq_cfi_reg r8; \
-	popq_cfi_reg rcx; \
-	popq_cfi_reg rsi; \
-	popq_cfi_reg rdi
+	popq %r11; \
+	popq %r10; \
+	popq %r9; \
+	popq %r8; \
+	popq %rcx; \
+	popq %rsi; \
+	popq %rdi
 
 #endif
 
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_down_read_failed)
-	CFI_STARTPROC
 	save_common_regs
-	__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(push,) %__ASM_REG(dx)
 	movq %rax,%rdi
 	call rwsem_down_read_failed
-	__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(pop,) %__ASM_REG(dx)
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_down_read_failed)
 
 ENTRY(call_rwsem_down_write_failed)
-	CFI_STARTPROC
 	save_common_regs
 	movq %rax,%rdi
 	call rwsem_down_write_failed
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_down_write_failed)
 
 ENTRY(call_rwsem_wake)
-	CFI_STARTPROC
 	/* do nothing if still outstanding active readers */
 	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
 	jnz 1f
@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
 	call rwsem_wake
 	restore_common_regs
 1:	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_wake)
 
 ENTRY(call_rwsem_downgrade_wake)
-	CFI_STARTPROC
 	save_common_regs
-	__ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(push,) %__ASM_REG(dx)
 	movq %rax,%rdi
 	call rwsem_downgrade_wake
-	__ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+	__ASM_SIZE(pop,) %__ASM_REG(dx)
 	restore_common_regs
 	ret
-	CFI_ENDPROC
 ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 5eb715087b80..e9acf5f4fc92 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -6,16 +6,14 @@
  */
 	#include <linux/linkage.h>
 	#include <asm/asm.h>
-	#include <asm/dwarf2.h>
 
 	/* put return address in eax (arg1) */
 	.macro THUNK name, func, put_ret_addr_in_eax=0
 	.globl \name
 \name:
-	CFI_STARTPROC
-	pushl_cfi_reg eax
-	pushl_cfi_reg ecx
-	pushl_cfi_reg edx
+	pushl %eax
+	pushl %ecx
+	pushl %edx
 
 	.if \put_ret_addr_in_eax
 	/* Place EIP in the arg1 */
@@ -23,11 +21,10 @@
 	.endif
 
 	call \func
-	popl_cfi_reg edx
-	popl_cfi_reg ecx
-	popl_cfi_reg eax
+	popl %edx
+	popl %ecx
+	popl %eax
 	ret
-	CFI_ENDPROC
 	_ASM_NOKPROBE(\name)
 	.endm
 
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index f89ba4e93025..10f555e435e1 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -6,7 +6,6 @@
  * Subject to the GNU public license, v.2. No warranty of any kind.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm.h>
 
@@ -14,27 +13,25 @@
 	.macro THUNK name, func, put_ret_addr_in_rdi=0
 	.globl \name
 \name:
-	CFI_STARTPROC
 
 	/* this one pushes 9 elems, the next one would be %rIP */
-	pushq_cfi_reg rdi
-	pushq_cfi_reg rsi
-	pushq_cfi_reg rdx
-	pushq_cfi_reg rcx
-	pushq_cfi_reg rax
-	pushq_cfi_reg r8
-	pushq_cfi_reg r9
-	pushq_cfi_reg r10
-	pushq_cfi_reg r11
+	pushq %rdi
+	pushq %rsi
+	pushq %rdx
+	pushq %rcx
+	pushq %rax
+	pushq %r8
+	pushq %r9
+	pushq %r10
+	pushq %r11
 
 	.if \put_ret_addr_in_rdi
 	/* 9*8(%rsp) is return addr on stack */
-	movq_cfi_restore 9*8, rdi
+	movq 9*8(%rsp), %rdi
 	.endif
 
 	call \func
 	jmp  restore
-	CFI_ENDPROC
 	_ASM_NOKPROBE(\name)
 	.endm
 
@@ -57,19 +54,16 @@
 #if defined(CONFIG_TRACE_IRQFLAGS) \
  || defined(CONFIG_DEBUG_LOCK_ALLOC) \
  || defined(CONFIG_PREEMPT)
-	CFI_STARTPROC
-	CFI_ADJUST_CFA_OFFSET 9*8
 restore:
-	popq_cfi_reg r11
-	popq_cfi_reg r10
-	popq_cfi_reg r9
-	popq_cfi_reg r8
-	popq_cfi_reg rax
-	popq_cfi_reg rcx
-	popq_cfi_reg rdx
-	popq_cfi_reg rsi
-	popq_cfi_reg rdi
+	popq %r11
+	popq %r10
+	popq %r9
+	popq %r8
+	popq %rax
+	popq %rcx
+	popq %rdx
+	popq %rsi
+	popq %rdi
 	ret
-	CFI_ENDPROC
 	_ASM_NOKPROBE(restore)
 #endif
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 6440221ced0d..4093216b3791 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -8,7 +8,6 @@
  * of the License.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * Calling convention :

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-28 11:20     ` [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations Ingo Molnar
  2015-05-28 11:39       ` [PATCH v2] " Ingo Molnar
@ 2015-05-28 11:51       ` Jan Beulich
  2015-05-28 13:17         ` Ingo Molnar
  1 sibling, 1 reply; 17+ messages in thread
From: Jan Beulich @ 2015-05-28 11:51 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Borislav Petkov, Andy Lutomirski, Peter Zijlstra, mingo,
	Brian Gerst, fweisbec, tglx, Linus Torvalds, Denys Vlasenko,
	Josh Poimboeuf, linux-kernel, hpa

>>> On 28.05.15 at 13:20, <mingo@kernel.org> wrote:
> * Jan Beulich <JBeulich@suse.com> wrote:
>> Not sure why assembly code should look like C code. It's a matter of taste 
>> perhaps, and I can see your point, but I'm also not really eager to do 
> changes 
>> just to match other people's taste. And just like above - certainly not 
>> something for this patch I would think.
> 
> Yeah, no, so this isn't going to work that way.
> 
> On one hand you want dwarf annotations mostly for the out of tree 
> dwarf-unwinding 
> stack backtraces patch on SUSE kernels, while for the upstream kernel it's 
> mostly 
> just unreadable gunk in some of the most security sensitive code paths of 
> the 
> kernel, which only gets in the way of readability.
> 
> But on the other hand you are unwilling to (or don't have the time to) do a 
> proper 
> job of making this palatable for upstream.
> 
> That's unacceptable from the upstream kernel's POV, so instead of limping 
> forward 
> I'll do the attached patch: it gets rid of the unmaintainable dwarf mess 
> from low 
> level x86 assembly code. This isn't a new concern, a couple of years ago we 
> almost 
> did this.

I can understand your motivation, yet I still view it as rather sad that
you move this way. Indeed I don't have the time to do major rework
in this area, but I don't think you can blame me for not having tried to
at least investigate and eliminate breakage when I found such (which,
as you say, happens every now and then). Yet I do recall people
indicating that the unwind data can be useful for other than the
out-of-tree live stack unwinder. I.e. those will be broken along with
that code which we're _forced_ to maintain out-of-tree.

> and meanwhile you can keep a revert of this patch ported to SUSE kernels in 
> whatever fashion you prefer.

Funny suggestion - I don't think that's reasonable for us to do. Or if
we were to, we could as well invest in doing the re-work you're asking
for; I don't think anyone will have the time to do either.

Jan


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-28 11:51       ` [PATCH] " Jan Beulich
@ 2015-05-28 13:17         ` Ingo Molnar
  2015-05-29 17:47           ` Andy Lutomirski
  0 siblings, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2015-05-28 13:17 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Borislav Petkov, Andy Lutomirski, Peter Zijlstra, mingo,
	Brian Gerst, fweisbec, tglx, Linus Torvalds, Denys Vlasenko,
	Josh Poimboeuf, linux-kernel, hpa


* Jan Beulich <JBeulich@suse.com> wrote:

> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in 
> > whatever fashion you prefer.
> 
> Funny suggestion - I don't think that's reasonable for us to do. Or if we were 
> to, we could as well invest in doing the re-work you're asking for; I don't 
> think anyone will have the time to do either.

That's fair enough: if there's not enough resources to keep a feature maintainable 
upstream then it should not be upstream in that form.

This isn't just some driver we can let bit-rot in peace until it finds a 
maintainer (or not), without affecting anyone but users of that driver.

This is hundreds of usage sites of ugly code intermixed with critical pieces of 
assembly code that negatively affects the hackability of everything.

Also, with the feature missing completely, maybe someone finds a method to 
introduce it in a maintainable fashion, while with the feature included upstream 
there's very little pressure to do that. As a bonus we'd also win a workable dwarf 
unwinder.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-28 13:17         ` Ingo Molnar
@ 2015-05-29 17:47           ` Andy Lutomirski
  2015-05-29 20:27             ` Josh Poimboeuf
                               ` (2 more replies)
  0 siblings, 3 replies; 17+ messages in thread
From: Andy Lutomirski @ 2015-05-29 17:47 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Jan Beulich, Borislav Petkov, Peter Zijlstra, Ingo Molnar,
	Brian Gerst, Frédéric Weisbecker, Thomas Gleixner,
	Linus Torvalds, Denys Vlasenko, Josh Poimboeuf, linux-kernel,
	H. Peter Anvin

On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
>
> * Jan Beulich <JBeulich@suse.com> wrote:
>
>> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
>> > whatever fashion you prefer.
>>
>> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
>> to, we could as well invest in doing the re-work you're asking for; I don't
>> think anyone will have the time to do either.
>
> That's fair enough: if there's not enough resources to keep a feature maintainable
> upstream then it should not be upstream in that form.
>
> This isn't just some driver we can let bit-rot in peace until it finds a
> maintainer (or not), without affecting anyone but users of that driver.
>
> This is hundreds of usage sites of ugly code intermixed with critical pieces of
> assembly code that negatively affects the hackability of everything.
>
> Also, with the feature missing completely, maybe someone finds a method to
> introduce it in a maintainable fashion, while with the feature included upstream
> there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> unwinder.

Before doing something drastic like this, I think we should get Josh's
opinion, since I think he's working on a new (?) unwinder.

FWIW, musl is considering some kind of automatic annotation scheme:

http://www.openwall.com/lists/musl/2015/05/13/5

--Andy

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-29 17:47           ` Andy Lutomirski
@ 2015-05-29 20:27             ` Josh Poimboeuf
  2015-05-29 21:39               ` Frank Ch. Eigler
  2015-06-01 19:45             ` Josh Poimboeuf
  2015-06-05 17:11             ` Andi Kleen
  2 siblings, 1 reply; 17+ messages in thread
From: Josh Poimboeuf @ 2015-05-29 20:27 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Masami Hiramatsu, Frank Ch. Eigler,
	Dave Anderson, x86

On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
> On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
> > * Jan Beulich <JBeulich@suse.com> wrote:
> >> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
> >> > whatever fashion you prefer.
> >>
> >> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
> >> to, we could as well invest in doing the re-work you're asking for; I don't
> >> think anyone will have the time to do either.
> >
> > That's fair enough: if there's not enough resources to keep a feature maintainable
> > upstream then it should not be upstream in that form.
> >
> > This isn't just some driver we can let bit-rot in peace until it finds a
> > maintainer (or not), without affecting anyone but users of that driver.
> >
> > This is hundreds of usage sites of ugly code intermixed with critical pieces of
> > assembly code that negatively affects the hackability of everything.
> >
> > Also, with the feature missing completely, maybe someone finds a method to
> > introduce it in a maintainable fashion, while with the feature included upstream
> > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> > unwinder.
> 
> Before doing something drastic like this, I think we should get Josh's
> opinion, since I think he's working on a new (?) unwinder.

I'd definitely like to replace all the asm DWARF CFI annotations with
something more automated and robust.  So it doesn't really affect me
whether they're ripped out now or replaced later.  

But it might be a few months before I have the code.  If they get ripped
out now, it might affect those tools which rely on debuginfo like perf,
kprobes, systemtap, gdb, crash, etc.

Then again, I'm not sure how useful or reliable the existing annotations
are anyway, so maybe it doesn't matter much.

CCing some possible consumers of kernel x86 DWARF data to see if they
have any opinions.

-- 
Josh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-29 20:27             ` Josh Poimboeuf
@ 2015-05-29 21:39               ` Frank Ch. Eigler
  0 siblings, 0 replies; 17+ messages in thread
From: Frank Ch. Eigler @ 2015-05-29 21:39 UTC (permalink / raw)
  To: Josh Poimboeuf
  Cc: Andy Lutomirski, Ingo Molnar, Jan Beulich, Borislav Petkov,
	Peter Zijlstra, Ingo Molnar, Brian Gerst,
	Frédéric Weisbecker, Thomas Gleixner, Linus Torvalds,
	Denys Vlasenko, linux-kernel, H. Peter Anvin, Masami Hiramatsu,
	Dave Anderson, x86

Hi -

On Fri, May 29, 2015 at 03:27:16PM -0500, Josh Poimboeuf wrote:
> [...]
> > > Also, with the feature missing completely, maybe someone finds a method to
> > > introduce it in a maintainable fashion, while with the feature included upstream
> > > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> > > unwinder.
> > 
> > Before doing something drastic like this, I think we should get Josh's
> > opinion, since I think he's working on a new (?) unwinder.
> 
> I'd definitely like to replace all the asm DWARF CFI annotations with
> something more automated and robust.  So it doesn't really affect me
> whether they're ripped out now or replaced later.  
> [...]
> Then again, I'm not sure how useful or reliable the existing annotations
> are anyway, so maybe it doesn't matter much.

In our experience as consumers of this CFI information for years in
systemtap, the annotations have been generally correct and reliable.
Their presence allows reliable, correct, and efficient
kernel->userspace backtracing as used in important systemtap scripts.

If the current complaint is primarily about testability, it would be
easy to add simple stap-based tests to the kernel to exercise the code
and confirm its operation.  Perhaps we could extract a specialized
self-contained test case (containing an unwinder).

I'm not in a position to judge the purported cost savings of removing
this code, but there is definitely a negative benefit as a loss of
useful functionality, esp. with no replacement in sight.


- FChE

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-29 17:47           ` Andy Lutomirski
  2015-05-29 20:27             ` Josh Poimboeuf
@ 2015-06-01 19:45             ` Josh Poimboeuf
  2015-06-01 19:53               ` Andy Lutomirski
  2015-06-05 17:11             ` Andi Kleen
  2 siblings, 1 reply; 17+ messages in thread
From: Josh Poimboeuf @ 2015-06-01 19:45 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler

On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
> On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
> >
> > * Jan Beulich <JBeulich@suse.com> wrote:
> >
> >> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
> >> > whatever fashion you prefer.
> >>
> >> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
> >> to, we could as well invest in doing the re-work you're asking for; I don't
> >> think anyone will have the time to do either.
> >
> > That's fair enough: if there's not enough resources to keep a feature maintainable
> > upstream then it should not be upstream in that form.
> >
> > This isn't just some driver we can let bit-rot in peace until it finds a
> > maintainer (or not), without affecting anyone but users of that driver.
> >
> > This is hundreds of usage sites of ugly code intermixed with critical pieces of
> > assembly code that negatively affects the hackability of everything.
> >
> > Also, with the feature missing completely, maybe someone finds a method to
> > introduce it in a maintainable fashion, while with the feature included upstream
> > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> > unwinder.
> 
> Before doing something drastic like this, I think we should get Josh's
> opinion, since I think he's working on a new (?) unwinder.
> 
> FWIW, musl is considering some kind of automatic annotation scheme:
> 
> http://www.openwall.com/lists/musl/2015/05/13/5

Thanks for the link!  I found a newer version of it here:

  http://www.openwall.com/lists/musl/2015/05/31/5

Overall I think that script is a really good solution.

>From what I can tell, it tracks the CFA (stack pointer) perfectly.
(Which is actually pretty straightfoward if you just hook into function
entry/exit, push/pop, and add/sub to rsp).

It also does a nice job at making a best effort at tracking the caller's
register values (which are less important than CFA but still nice to
have).

Overall I'd bet that it would produce much more accurate results than
our current manual annotations.

I'm not crazy about the fact that it relies on awk, but I can't think of
a less hacky way to do that, without stuffing the CFI metadata directly
into the binary.

We could pretty easily port something like that to the kernel.  Before
that though, I think we'll need my stack validation patch set which
helps enforce asm function/non-function boundaries and proper ELF
function annotations.  Wrapping up a new version of that now.

-- 
Josh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-06-01 19:45             ` Josh Poimboeuf
@ 2015-06-01 19:53               ` Andy Lutomirski
  2015-06-01 20:19                 ` Josh Poimboeuf
  2015-06-02  5:57                 ` Ingo Molnar
  0 siblings, 2 replies; 17+ messages in thread
From: Andy Lutomirski @ 2015-06-01 19:53 UTC (permalink / raw)
  To: Josh Poimboeuf
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler

On Mon, Jun 1, 2015 at 12:45 PM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
> On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
>> On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
>> >
>> > * Jan Beulich <JBeulich@suse.com> wrote:
>> >
>> >> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
>> >> > whatever fashion you prefer.
>> >>
>> >> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
>> >> to, we could as well invest in doing the re-work you're asking for; I don't
>> >> think anyone will have the time to do either.
>> >
>> > That's fair enough: if there's not enough resources to keep a feature maintainable
>> > upstream then it should not be upstream in that form.
>> >
>> > This isn't just some driver we can let bit-rot in peace until it finds a
>> > maintainer (or not), without affecting anyone but users of that driver.
>> >
>> > This is hundreds of usage sites of ugly code intermixed with critical pieces of
>> > assembly code that negatively affects the hackability of everything.
>> >
>> > Also, with the feature missing completely, maybe someone finds a method to
>> > introduce it in a maintainable fashion, while with the feature included upstream
>> > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
>> > unwinder.
>>
>> Before doing something drastic like this, I think we should get Josh's
>> opinion, since I think he's working on a new (?) unwinder.
>>
>> FWIW, musl is considering some kind of automatic annotation scheme:
>>
>> http://www.openwall.com/lists/musl/2015/05/13/5
>
> Thanks for the link!  I found a newer version of it here:
>
>   http://www.openwall.com/lists/musl/2015/05/31/5
>
> Overall I think that script is a really good solution.
>
> From what I can tell, it tracks the CFA (stack pointer) perfectly.
> (Which is actually pretty straightfoward if you just hook into function
> entry/exit, push/pop, and add/sub to rsp).
>
> It also does a nice job at making a best effort at tracking the caller's
> register values (which are less important than CFA but still nice to
> have).

It might be nice to be able to reliably unwind out from an exception /
interrupt / syscall frame into userspace or into the kernel code that
trapped, complete with registers.

In any event, we'll almost certainly have to manually annotate these
weird types of entries.  I wonder if we could manage to annotate just
the entry parts and let a magic script do the rest.

--Andy

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-06-01 19:53               ` Andy Lutomirski
@ 2015-06-01 20:19                 ` Josh Poimboeuf
  2015-06-02  5:57                 ` Ingo Molnar
  1 sibling, 0 replies; 17+ messages in thread
From: Josh Poimboeuf @ 2015-06-01 20:19 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler

On Mon, Jun 01, 2015 at 12:53:36PM -0700, Andy Lutomirski wrote:
> On Mon, Jun 1, 2015 at 12:45 PM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
> > On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
> >> On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
> >> >
> >> > * Jan Beulich <JBeulich@suse.com> wrote:
> >> >
> >> >> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
> >> >> > whatever fashion you prefer.
> >> >>
> >> >> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
> >> >> to, we could as well invest in doing the re-work you're asking for; I don't
> >> >> think anyone will have the time to do either.
> >> >
> >> > That's fair enough: if there's not enough resources to keep a feature maintainable
> >> > upstream then it should not be upstream in that form.
> >> >
> >> > This isn't just some driver we can let bit-rot in peace until it finds a
> >> > maintainer (or not), without affecting anyone but users of that driver.
> >> >
> >> > This is hundreds of usage sites of ugly code intermixed with critical pieces of
> >> > assembly code that negatively affects the hackability of everything.
> >> >
> >> > Also, with the feature missing completely, maybe someone finds a method to
> >> > introduce it in a maintainable fashion, while with the feature included upstream
> >> > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> >> > unwinder.
> >>
> >> Before doing something drastic like this, I think we should get Josh's
> >> opinion, since I think he's working on a new (?) unwinder.
> >>
> >> FWIW, musl is considering some kind of automatic annotation scheme:
> >>
> >> http://www.openwall.com/lists/musl/2015/05/13/5
> >
> > Thanks for the link!  I found a newer version of it here:
> >
> >   http://www.openwall.com/lists/musl/2015/05/31/5
> >
> > Overall I think that script is a really good solution.
> >
> > From what I can tell, it tracks the CFA (stack pointer) perfectly.
> > (Which is actually pretty straightfoward if you just hook into function
> > entry/exit, push/pop, and add/sub to rsp).
> >
> > It also does a nice job at making a best effort at tracking the caller's
> > register values (which are less important than CFA but still nice to
> > have).
> 
> It might be nice to be able to reliably unwind out from an exception /
> interrupt / syscall frame into userspace or into the kernel code that
> trapped, complete with registers.
> 
> In any event, we'll almost certainly have to manually annotate these
> weird types of entries.  I wonder if we could manage to annotate just
> the entry parts and let a magic script do the rest.

Yeah.  Any callable function can be annotated "magically".  We may have
to manually annotate the rest.

-- 
Josh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-06-01 19:53               ` Andy Lutomirski
  2015-06-01 20:19                 ` Josh Poimboeuf
@ 2015-06-02  5:57                 ` Ingo Molnar
  2015-06-02 14:46                   ` Josh Poimboeuf
  1 sibling, 1 reply; 17+ messages in thread
From: Ingo Molnar @ 2015-06-02  5:57 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Josh Poimboeuf, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler


* Andy Lutomirski <luto@amacapital.net> wrote:

> On Mon, Jun 1, 2015 at 12:45 PM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
> > On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
> >> On Thu, May 28, 2015 at 6:17 AM, Ingo Molnar <mingo@kernel.org> wrote:
> >> >
> >> > * Jan Beulich <JBeulich@suse.com> wrote:
> >> >
> >> >> > and meanwhile you can keep a revert of this patch ported to SUSE kernels in
> >> >> > whatever fashion you prefer.
> >> >>
> >> >> Funny suggestion - I don't think that's reasonable for us to do. Or if we were
> >> >> to, we could as well invest in doing the re-work you're asking for; I don't
> >> >> think anyone will have the time to do either.
> >> >
> >> > That's fair enough: if there's not enough resources to keep a feature maintainable
> >> > upstream then it should not be upstream in that form.
> >> >
> >> > This isn't just some driver we can let bit-rot in peace until it finds a
> >> > maintainer (or not), without affecting anyone but users of that driver.
> >> >
> >> > This is hundreds of usage sites of ugly code intermixed with critical pieces of
> >> > assembly code that negatively affects the hackability of everything.
> >> >
> >> > Also, with the feature missing completely, maybe someone finds a method to
> >> > introduce it in a maintainable fashion, while with the feature included upstream
> >> > there's very little pressure to do that. As a bonus we'd also win a workable dwarf
> >> > unwinder.
> >>
> >> Before doing something drastic like this, I think we should get Josh's
> >> opinion, since I think he's working on a new (?) unwinder.
> >>
> >> FWIW, musl is considering some kind of automatic annotation scheme:
> >>
> >> http://www.openwall.com/lists/musl/2015/05/13/5
> >
> > Thanks for the link!  I found a newer version of it here:
> >
> >   http://www.openwall.com/lists/musl/2015/05/31/5
> >
> > Overall I think that script is a really good solution.
> >
> > From what I can tell, it tracks the CFA (stack pointer) perfectly.
> > (Which is actually pretty straightfoward if you just hook into function
> > entry/exit, push/pop, and add/sub to rsp).
> >
> > It also does a nice job at making a best effort at tracking the caller's
> > register values (which are less important than CFA but still nice to
> > have).
> 
> It might be nice to be able to reliably unwind out from an exception / interrupt 
> / syscall frame into userspace or into the kernel code that trapped, complete 
> with registers.
> 
> In any event, we'll almost certainly have to manually annotate these weird types 
> of entries.  I wonder if we could manage to annotate just the entry parts and 
> let a magic script do the rest.

Even the entry parts we could help without uglifying the code:

 - either by adding a 'RET' instruction after IRET/SYSRET/SYSEXIT/etc. that the
   tooling can recognize as 'return from function'. That's much nicer than ugly
   annotations.

 - enhancing the tooling script to also recognize these instructions as function
   returns - because they _are_ function returns.

So I'll commit the removal patch - it's clear at this point that the old 
annotations won't be used and they are actively getting in the way of bug fixes 
and new work. This became glaringly obvious during and after the big asm code 
refresh we did in v4.1.

If anyone needs debuginfo badly (who?) then they should help Josh get it all 
upstream again ASAP, it doesn't seem much is missing to get all that done in a 
much cleaner way.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-06-02  5:57                 ` Ingo Molnar
@ 2015-06-02 14:46                   ` Josh Poimboeuf
  2015-06-02 17:00                     ` Andy Lutomirski
  0 siblings, 1 reply; 17+ messages in thread
From: Josh Poimboeuf @ 2015-06-02 14:46 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Andy Lutomirski, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler

On Tue, Jun 02, 2015 at 07:57:06AM +0200, Ingo Molnar wrote:
> * Andy Lutomirski <luto@amacapital.net> wrote:
> > On Mon, Jun 1, 2015 at 12:45 PM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
> > > On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
> > >> FWIW, musl is considering some kind of automatic annotation scheme:
> > >>
> > >> http://www.openwall.com/lists/musl/2015/05/13/5
> > >
> > > Thanks for the link!  I found a newer version of it here:
> > >
> > >   http://www.openwall.com/lists/musl/2015/05/31/5
> > >
> > > Overall I think that script is a really good solution.
> > >
> > > From what I can tell, it tracks the CFA (stack pointer) perfectly.
> > > (Which is actually pretty straightfoward if you just hook into function
> > > entry/exit, push/pop, and add/sub to rsp).
> > >
> > > It also does a nice job at making a best effort at tracking the caller's
> > > register values (which are less important than CFA but still nice to
> > > have).
> > 
> > It might be nice to be able to reliably unwind out from an exception / interrupt 
> > / syscall frame into userspace or into the kernel code that trapped, complete 
> > with registers.
> > 
> > In any event, we'll almost certainly have to manually annotate these weird types 
> > of entries.  I wonder if we could manage to annotate just the entry parts and 
> > let a magic script do the rest.
> 
> Even the entry parts we could help without uglifying the code:
> 
>  - either by adding a 'RET' instruction after IRET/SYSRET/SYSEXIT/etc. that the
>    tooling can recognize as 'return from function'. That's much nicer than ugly
>    annotations.
> 
>  - enhancing the tooling script to also recognize these instructions as function
>    returns - because they _are_ function returns.

I think the problem with the entry code (and other non-function asm
code) is that it's quite spaghetti-esque, with lots of jumps, returns,
calls, etc to random places.  There aren't enough constraints which
would help the tooling make sense of where execution begins and ends,
when registers are saved or trashed, etc.

Maybe over time we can figure out what constraints (and/or annotations)
are needed there.


-- 
Josh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-06-02 14:46                   ` Josh Poimboeuf
@ 2015-06-02 17:00                     ` Andy Lutomirski
  0 siblings, 0 replies; 17+ messages in thread
From: Andy Lutomirski @ 2015-06-02 17:00 UTC (permalink / raw)
  To: Josh Poimboeuf
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, linux-kernel,
	H. Peter Anvin, Frank Ch. Eigler

On Tue, Jun 2, 2015 at 7:46 AM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
> On Tue, Jun 02, 2015 at 07:57:06AM +0200, Ingo Molnar wrote:
>> * Andy Lutomirski <luto@amacapital.net> wrote:
>> > On Mon, Jun 1, 2015 at 12:45 PM, Josh Poimboeuf <jpoimboe@redhat.com> wrote:
>> > > On Fri, May 29, 2015 at 10:47:31AM -0700, Andy Lutomirski wrote:
>> > >> FWIW, musl is considering some kind of automatic annotation scheme:
>> > >>
>> > >> http://www.openwall.com/lists/musl/2015/05/13/5
>> > >
>> > > Thanks for the link!  I found a newer version of it here:
>> > >
>> > >   http://www.openwall.com/lists/musl/2015/05/31/5
>> > >
>> > > Overall I think that script is a really good solution.
>> > >
>> > > From what I can tell, it tracks the CFA (stack pointer) perfectly.
>> > > (Which is actually pretty straightfoward if you just hook into function
>> > > entry/exit, push/pop, and add/sub to rsp).
>> > >
>> > > It also does a nice job at making a best effort at tracking the caller's
>> > > register values (which are less important than CFA but still nice to
>> > > have).
>> >
>> > It might be nice to be able to reliably unwind out from an exception / interrupt
>> > / syscall frame into userspace or into the kernel code that trapped, complete
>> > with registers.
>> >
>> > In any event, we'll almost certainly have to manually annotate these weird types
>> > of entries.  I wonder if we could manage to annotate just the entry parts and
>> > let a magic script do the rest.
>>
>> Even the entry parts we could help without uglifying the code:
>>
>>  - either by adding a 'RET' instruction after IRET/SYSRET/SYSEXIT/etc. that the
>>    tooling can recognize as 'return from function'. That's much nicer than ugly
>>    annotations.
>>
>>  - enhancing the tooling script to also recognize these instructions as function
>>    returns - because they _are_ function returns.
>
> I think the problem with the entry code (and other non-function asm
> code) is that it's quite spaghetti-esque, with lots of jumps, returns,
> calls, etc to random places.  There aren't enough constraints which
> would help the tooling make sense of where execution begins and ends,
> when registers are saved or trashed, etc.
>
> Maybe over time we can figure out what constraints (and/or annotations)
> are needed there.
>

We can also try to reduce the spaghetti, which I'm working on.  (It's
even harder than I expected, and I doubt that any of my code will be
ready for 4.2.)

--Andy

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
  2015-05-29 17:47           ` Andy Lutomirski
  2015-05-29 20:27             ` Josh Poimboeuf
  2015-06-01 19:45             ` Josh Poimboeuf
@ 2015-06-05 17:11             ` Andi Kleen
  2 siblings, 0 replies; 17+ messages in thread
From: Andi Kleen @ 2015-06-05 17:11 UTC (permalink / raw)
  To: Andy Lutomirski
  Cc: Ingo Molnar, Jan Beulich, Borislav Petkov, Peter Zijlstra,
	Ingo Molnar, Brian Gerst, Frédéric Weisbecker,
	Thomas Gleixner, Linus Torvalds, Denys Vlasenko, Josh Poimboeuf,
	linux-kernel, H. Peter Anvin

Andy Lutomirski <luto@amacapital.net> writes:
>
> Before doing something drastic like this, I think we should get Josh's
> opinion, since I think he's working on a new (?) unwinder.


It's also needed by gdb. gdbstub is in tree.

So no Ingo. You cannot just remove it. Please put it back.

-Andi

-- 
ak@linux.intel.com -- Speaking for myself only

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2015-06-05 17:11 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-28  8:20 [PATCH] x86-64: fix unwind info for incomplete frames Jan Beulich
2015-05-28  9:01 ` Ingo Molnar
2015-05-28  9:45   ` Jan Beulich
2015-05-28 11:20     ` [PATCH] x86/debug: Remove perpetually broken, unmaintainable dwarf annotations Ingo Molnar
2015-05-28 11:39       ` [PATCH v2] " Ingo Molnar
2015-05-28 11:51       ` [PATCH] " Jan Beulich
2015-05-28 13:17         ` Ingo Molnar
2015-05-29 17:47           ` Andy Lutomirski
2015-05-29 20:27             ` Josh Poimboeuf
2015-05-29 21:39               ` Frank Ch. Eigler
2015-06-01 19:45             ` Josh Poimboeuf
2015-06-01 19:53               ` Andy Lutomirski
2015-06-01 20:19                 ` Josh Poimboeuf
2015-06-02  5:57                 ` Ingo Molnar
2015-06-02 14:46                   ` Josh Poimboeuf
2015-06-02 17:00                     ` Andy Lutomirski
2015-06-05 17:11             ` Andi Kleen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).