Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2785

kernel-2.6.18-128.1.10.el5.src.rpm

From: Bill Burns <bburns@redhat.com>
Date: Wed, 2 Jul 2008 13:43:34 -0400
Subject: [xen] add VPS sync read/write according to spec
Message-id: 20080702174334.8165.74077.sendpatchset@localhost.localdomain
O-Subject: [RHEL5.3 PATCH 3/3]
Bugzilla: 437096
RH-Acked-by: Jarod Wilson <jwilson@redhat.com>
RH-Acked-by: Jarod Wilson <jwilson@redhat.com>

Fixes bz 437096

Problem description:
This Intel feature patch adds support for VT-i2.
VT-i2 supports the new hardware virtualization features of a new
ia64 processor. This processor will be used on the upcoming Tukwila
system.

Patches were provided by Intel. They apply cleanly
to our 3.1.2 based code base and brew build cleanly.

Upstream in Xen Unstable:
http://xenbits.xensource.com/xen-unstable.hg?rev/e5244d14486c
http://xenbits.xensource.com/xen-unstable.hg?rev/6cf504b4de7d

Brew build:
http://brewweb.devel.redhat.com/brew/taskinfo?taskID=1372513

Testing:
Partner testing ongoing.

Please review and ACK.

Thanks,
 Bill

diff --git a/arch/ia64/vmx/optvfault.S b/arch/ia64/vmx/optvfault.S
index 0d778de..c64cb6f 100644
--- a/arch/ia64/vmx/optvfault.S
+++ b/arch/ia64/vmx/optvfault.S
@@ -19,16 +19,82 @@
 #include <asm/asm-offsets.h>
 #include <asm-ia64/vmx_mm_def.h>
 
-#define ACCE_MOV_FROM_AR
-#define ACCE_MOV_FROM_RR
-#define ACCE_MOV_TO_RR
-#define ACCE_RSM
-#define ACCE_SSM
-#define ACCE_MOV_TO_PSR
+//#define ACCE_MOV_FROM_AR
+//#define ACCE_MOV_FROM_RR
+//#define ACCE_MOV_TO_RR
+//#define ACCE_RSM
+//#define ACCE_SSM
+//#define ACCE_MOV_TO_PSR
 #define ACCE_THASH
 
 // Inputs are: r21 (= current), r24 (= cause), r25 (= insn), r31 (=saved pr)
 
+ENTRY(vmx_dummy_function)
+    br.sptk.many vmx_dummy_function
+END(vmx_dummy_function)
+
+/*
+ *	Inputs:
+ *		r24 : return address
+ *  	r25 : vpd
+ *		r29 : scratch
+ *
+ */
+GLOBAL_ENTRY(vmx_vps_sync_read)
+    movl r29 = vmx_dummy_function
+    ;;
+    mov b0=r29
+    br.sptk.many b0
+END(vmx_vps_sync_read)
+
+/*
+ *	Inputs:
+ *		r24 : return address
+ *  	r25 : vpd
+ *		r29 : scratch
+ *
+ */
+GLOBAL_ENTRY(vmx_vps_sync_write)
+    movl r29 = vmx_dummy_function
+    ;;
+    mov b0=r29
+    br.sptk.many b0
+END(vmx_vps_sync_write)
+
+/*
+ *	Inputs:
+ *		r23 : pr
+ *		r24 : guest b0
+ *  	r25 : vpd
+ *
+ */
+GLOBAL_ENTRY(vmx_vps_resume_normal)
+    movl r29 = vmx_dummy_function
+    ;;
+    mov b0=r29
+    mov pr=r23,-2
+    br.sptk.many b0
+END(vmx_vps_resume_normal)
+
+/*
+ *	Inputs:
+ *		r23 : pr
+ *		r24 : guest b0
+ *  	r25 : vpd
+ *		r17 : isr
+ */
+GLOBAL_ENTRY(vmx_vps_resume_handler)
+    movl r29 = vmx_dummy_function
+    ;;
+    ld8 r26=[r25]
+    shr r17=r17,IA64_ISR_IR_BIT
+    ;;
+    dep r26=r17,r26,63,1   // bit 63 of r26 indicate whether enable CFLE
+    mov b0=r29
+    mov pr=r23,-2
+    br.sptk.many b0
+END(vmx_vps_resume_handler)
+
 
 //mov r1=ar3
 GLOBAL_ENTRY(vmx_asm_mov_from_ar)
diff --git a/arch/ia64/vmx/vmx_entry.S b/arch/ia64/vmx/vmx_entry.S
index afe9762..ce73e6c 100644
--- a/arch/ia64/vmx/vmx_entry.S
+++ b/arch/ia64/vmx/vmx_entry.S
@@ -361,20 +361,16 @@ vmx_rse_clear_invalid:
     adds r19=VPD(VPSR),r18
     ;;
     ld8 r19=[r19]        //vpsr
-    movl r20=__vsa_base
     ;;
 //vsa_sync_write_start
-    ld8 r20=[r20]       // read entry point
-    mov r25=r18
-    ;;
     movl r24=ia64_vmm_entry  // calculate return address
-    add r16=PAL_VPS_SYNC_WRITE,r20
-    ;;
-    mov b0=r16
-    br.cond.sptk b0         // call the service
+    mov r25=r18
+    br.sptk.many vmx_vps_sync_write        // call the service
     ;;
 END(ia64_leave_hypervisor)
 // fall through
+
+
 GLOBAL_ENTRY(ia64_vmm_entry)
 /*
  *  must be at bank 0
@@ -382,32 +378,18 @@ GLOBAL_ENTRY(ia64_vmm_entry)
  *  r17:cr.isr
  *  r18:vpd
  *  r19:vpsr
- *  r20:__vsa_base
  *  r22:b0
  *  r23:predicate
  */
     mov r24=r22
     mov r25=r18
     tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
+    (p1) br.cond.sptk.few vmx_vps_resume_normal
+    (p2) br.cond.sptk.many vmx_vps_resume_handler
     ;;
-    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
-    (p1) br.sptk.many ia64_vmm_entry_out
-    ;;
-    tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT		//p1=cr.isr.ir
-    ;;
-    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
-    (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
-    (p2) ld8 r26=[r25]
-    ;;
-ia64_vmm_entry_out:    
-    mov pr=r23,-2
-    mov b0=r29
-    ;;
-    br.cond.sptk b0             // call pal service
 END(ia64_vmm_entry)
 
 
-
 /*
  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
  *  need to switch to bank 0 and doesn't restore the scratch registers.
diff --git a/arch/ia64/vmx/vmx_init.c b/arch/ia64/vmx/vmx_init.c
index b4bf62e..67f6bec 100644
--- a/arch/ia64/vmx/vmx_init.c
+++ b/arch/ia64/vmx/vmx_init.c
@@ -52,6 +52,7 @@
 #include <xen/event.h>
 #include <asm/vlsapic.h>
 #include <asm/vmx_pal_vsa.h>
+#include <asm/patch.h>
 #include "entry.h"
 
 /* Global flag to identify whether Intel vmx feature is on */
@@ -63,6 +64,28 @@ static u64 vm_buffer = 0;	/* Buffer required to bring up VMX feature */
 u64 __vsa_base = 0;	/* Run-time service base of VMX */
 
 /* Check whether vt feature is enabled or not. */
+
+void vmx_vps_patch(void)
+{
+	u64 addr;
+	
+	addr = (u64)&vmx_vps_sync_read;
+	ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_READ);
+	ia64_fc((void *)addr);
+	addr = (u64)&vmx_vps_sync_write;
+	ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_WRITE);
+	ia64_fc((void *)addr);
+	addr = (u64)&vmx_vps_resume_normal;
+	ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_NORMAL);
+	ia64_fc((void *)addr);
+	addr = (u64)&vmx_vps_resume_handler;
+	ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_HANDLER);
+	ia64_fc((void *)addr);
+	ia64_sync_i();
+	ia64_srlz_i();	
+}
+
+
 void
 identify_vmx_feature(void)
 {
@@ -131,8 +154,10 @@ vmx_init_env(void)
 		return ;
 	}
 
-	if (!__vsa_base)
+	if (!__vsa_base){
 		__vsa_base = tmp_base;
+		vmx_vps_patch();
+	}
 	else
 		ASSERT(tmp_base != __vsa_base);
 
diff --git a/arch/ia64/vmx/vmx_ivt.S b/arch/ia64/vmx/vmx_ivt.S
index deaefc2..8793360 100644
--- a/arch/ia64/vmx/vmx_ivt.S
+++ b/arch/ia64/vmx/vmx_ivt.S
@@ -208,11 +208,8 @@ vmx_itlb_loop:
     ld8 r18=[r16]
     ;;
     adds r19=VPD(VPSR),r18
-    movl r20=__vsa_base
     ;;
     ld8 r19=[r19]
-    ld8 r20=[r20]
-    ;;
     br.sptk ia64_vmm_entry
     ;;
 vmx_itlb_out:
@@ -289,11 +286,8 @@ vmx_dtlb_loop:
     ld8 r18=[r16]
     ;;
     adds r19=VPD(VPSR),r18
-    movl r20=__vsa_base
     ;;
     ld8 r19=[r19]
-    ld8 r20=[r20]
-    ;;
     br.sptk ia64_vmm_entry
     ;;
 vmx_dtlb_out:
diff --git a/arch/ia64/vmx/vmx_minstate.h b/arch/ia64/vmx/vmx_minstate.h
index 02a60ec..0c79156 100644
--- a/arch/ia64/vmx/vmx_minstate.h
+++ b/arch/ia64/vmx/vmx_minstate.h
@@ -59,24 +59,16 @@
 
 #define PAL_VSA_SYNC_READ           \
     /* begin to call pal vps sync_read */     \
-    add r25=IA64_VPD_BASE_OFFSET, r21;       \
-    movl r20=__vsa_base;     \
-    ;;          \
-    ld8 r25=[r25];      /* read vpd base */     \
-    ld8 r20=[r20];      /* read entry point */  \
-    ;;      \
-    add r20=PAL_VPS_SYNC_READ,r20;  \
-    ;;  \
 { .mii;  \
+    add r25=IA64_VPD_BASE_OFFSET, r21;       \
     nop 0x0;   \
     mov r24=ip;        \
-    mov b0=r20;     \
     ;;      \
 };           \
 { .mmb;      \
     add r24 = 0x20, r24;    \
-    nop 0x0;   	 \
-    br.cond.sptk b0;        /*  call the service */ \
+    ld8 r25 = [r25];   	 \
+    br.cond.sptk vmx_vps_sync_read;        /*  call the service */ \
     ;;              \
 };           \
 
diff --git a/include/asm-ia64/vmx_pal_vsa.h b/include/asm-ia64/vmx_pal_vsa.h
index 612f5ab..0564799 100644
--- a/include/asm-ia64/vmx_pal_vsa.h
+++ b/include/asm-ia64/vmx_pal_vsa.h
@@ -28,6 +28,14 @@
 #ifndef __ASSEMBLY__
 extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
                          u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+
+/* entry points in assembly code for calling vps services */
+
+extern char vmx_vps_sync_read;
+extern char vmx_vps_sync_write;
+extern char vmx_vps_resume_normal;
+extern char vmx_vps_resume_handler;
+
 extern u64 __vsa_base;
 #endif  /* __ASSEMBLY__ */