Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > fc11cd6e1c513a17304da94a5390f3cd > files > 4100

kernel-2.6.18-194.11.1.el5.src.rpm

From: Amerigo Wang <amwang@redhat.com>
Date: Fri, 16 Oct 2009 01:37:45 -0400
Subject: [x86_64] fix 32-bit process register leak
Message-id: <20091016014027.3999.88666.sendpatchset@localhost.localdomain>
Patchwork-id: 21125
O-Subject: [v2 PATCH RHEL5.5] [CVE-2009-2910] fix x86_64 32bit process register
	leak
Bugzilla: 526798
RH-Acked-by: Dave Anderson <anderson@redhat.com>

BZ:
https://bugzilla.redhat.com/show_bug.cgi?id=526798

Description:
While 32-bit processes can't directly access R8...R15, they can gain
access to these registers by temporarily switching themselves into
64-bit mode. Therefore, registers not preserved anyway by called C
functions (i.e. R8...R11) must be cleared prior to returning to user
mode.

KABI:
No harm.

Brew:
https://brewweb.devel.redhat.com/taskinfo?taskID=2030288

Upstream status:
Commit 24e35800.

Test status:
With the test case in BZ, the result before and after this patch
applied is below:

	 eax            0x0      0
	-ecx            0x1b786000       460873728
	+ecx            0x0      0
	 edx            0x0      0
	-ebx            0x3c     60
	-esp            0xffff8126       0xffff8126
	-ebp            0x1b786000       0x1b786000
	+ebx            0x0      0
	+esp            0x24     0x24
	+ebp            0x0      0x0
	 esi            0x0      0
	 edi            0x246    582

Signed-off-by: WANG Cong <amwang@redhat.com>



diff --git a/arch/x86_64/ia32/ia32entry-xen.S b/arch/x86_64/ia32/ia32entry-xen.S
index f1938b4..14cca33 100644
--- a/arch/x86_64/ia32/ia32entry-xen.S
+++ b/arch/x86_64/ia32/ia32entry-xen.S
@@ -32,12 +32,12 @@
 	.endm 
 
 	/* clobbers %eax */	
-	.macro  CLEAR_RREGS _r9=rax
+	.macro  CLEAR_RREGS offset=0, _r9=rax
 	xorl 	%eax,%eax
-	movq	%rax,R11(%rsp)
-	movq	%rax,R10(%rsp)
-	movq    %\_r9,R9(%rsp)
-	movq	%rax,R8(%rsp)
+	movq    %rax,\offset+R11(%rsp)
+	movq    %rax,\offset+R10(%rsp)
+	movq    %\_r9,\offset+R9(%rsp)
+	movq    %rax,\offset+R8(%rsp)
 	.endm
 
 #if defined (__XEN_X86_64)
@@ -155,11 +155,15 @@ sysenter_do_call:
 	__cli
 	TRACE_IRQS_OFF
 	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-	jnz	int_ret_from_sys_call
+	jnz	ia32_ret_from_sys_call
 	andl    $~TS_COMPAT,threadinfo_status(%r10)
 	/* clear IF, that popfq doesn't enable interrupts early */
 	andl  $~0x200,EFLAGS-R11(%rsp) 
 	RESTORE_ARGS 1,24,1,1,1,1
+	xorq    %r8,%r8
+	xorq    %r9,%r9
+	xorq    %r10,%r10
+	xorq    %r11,%r11
 	popfq
 	CFI_ADJUST_CFA_OFFSET -8
 	/*CFI_RESTORE rflags*/
@@ -265,6 +269,9 @@ cstar_do_call:
 	movl RIP-ARGOFFSET(%rsp),%ecx
 	CFI_REGISTER rip,rcx
 	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
+	xorq    %r10,%r10
+	xorq    %r9,%r9
+	xorq    %r8,%r8
 	/*CFI_REGISTER rflags,r11*/
 	TRACE_IRQS_ON
 	movl RSP-ARGOFFSET(%rsp),%esp
@@ -276,7 +283,7 @@ cstar_tracesys:
 	CFI_RESTORE_STATE
 	xchgl %r9d,%ebp
 	SAVE_REST
-	CLEAR_RREGS r9
+	CLEAR_RREGS 0, r9
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
@@ -349,6 +356,8 @@ ia32_do_syscall:
 	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
 ia32_sysret:
 	movq %rax,RAX-ARGOFFSET(%rsp)
+ia32_ret_from_sys_call:
+	CLEAR_RREGS -ARGOFFSET
 	jmp int_ret_from_sys_call 
 
 ia32_tracesys:			 
@@ -363,8 +372,8 @@ END(ia32_syscall)
 
 ia32_badsys:
 	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-	jmp int_ret_from_sys_call
+	movq $-ENOSYS,%rax
+	jmp ia32_sysret
 
 quiet_ni_syscall:
 	movq $-ENOSYS,%rax
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 8d7e812..40e8168 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -30,12 +30,12 @@
 	.endm 
 
 	/* clobbers %eax */	
-	.macro  CLEAR_RREGS _r9=rax
+	.macro  CLEAR_RREGS offset=0, _r9=rax
 	xorl 	%eax,%eax
-	movq	%rax,R11(%rsp)
-	movq	%rax,R10(%rsp)
-	movq    %\_r9,R9(%rsp)
-	movq	%rax,R8(%rsp)
+	movq    %rax,\offset+R11(%rsp)
+	movq    %rax,\offset+R10(%rsp)
+	movq    %\_r9,\offset+R9(%rsp)
+	movq    %rax,\offset+R8(%rsp)
 	.endm
 
 	.macro LOAD_ARGS32 offset, _r9=0
@@ -136,11 +136,15 @@ sysenter_do_call:
 	cli
 	TRACE_IRQS_OFF
 	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-	jnz	int_ret_from_sys_call
+	jnz	ia32_ret_from_sys_call
 	andl    $~TS_COMPAT,threadinfo_status(%r10)
 	/* clear IF, that popfq doesn't enable interrupts early */
 	andl  $~0x200,EFLAGS-R11(%rsp) 
 	RESTORE_ARGS 1,24,1,1,1,1
+	xorq    %r8,%r8
+	xorq    %r9,%r9
+	xorq    %r10,%r10
+	xorq    %r11,%r11
 	popfq
 	CFI_ADJUST_CFA_OFFSET -8
 	/*CFI_RESTORE rflags*/
@@ -243,6 +247,9 @@ cstar_do_call:
 	movl RIP-ARGOFFSET(%rsp),%ecx
 	CFI_REGISTER rip,rcx
 	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
+	xorq    %r10,%r10
+	xorq    %r9,%r9
+	xorq    %r8,%r8
 	/*CFI_REGISTER rflags,r11*/
 	TRACE_IRQS_ON
 	movl RSP-ARGOFFSET(%rsp),%esp
@@ -254,7 +261,7 @@ cstar_tracesys:
 	CFI_RESTORE_STATE
 	xchgl %r9d,%ebp
 	SAVE_REST
-	CLEAR_RREGS r9
+	CLEAR_RREGS 0, r9
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
@@ -322,6 +329,8 @@ ia32_do_call:
 	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
 ia32_sysret:
 	movq %rax,RAX-ARGOFFSET(%rsp)
+ia32_ret_from_sys_call:
+	CLEAR_RREGS -ARGOFFSET
 	jmp int_ret_from_sys_call 
 
 ia32_tracesys:			 
@@ -338,8 +347,8 @@ END(ia32_syscall)
 
 ia32_badsys:
 	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-	jmp int_ret_from_sys_call
+	movq $-ENOSYS,%rax
+	jmp ia32_sysret
 
 quiet_ni_syscall:
 	movq $-ENOSYS,%rax