Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2787

kernel-2.6.18-128.1.10.el5.src.rpm

From: Bhavna Sarathy <bnagendr@redhat.com>
Date: Wed, 3 Sep 2008 10:37:35 -0400
Subject: [xen] AMD 2MB backing pages support
Message-id: 48BEA12F.6020302@redhat.com
O-Subject: Re: [RHEL5.3 XEN PATCH] AMD 2MB backing pages support
Bugzilla: 251980
RH-Acked-by: Don Dutile <ddutile@redhat.com>
RH-Acked-by: Chris Lalancette <clalance@redhat.com>

Resolves BZ 251980

This patch enables 2MB backing pages allocation in guest (also known as
super page allocation).   The required support for 2MB backing pages has
two components:
1) The hypervisor patch that adds necessary support to deal with 2MB pages.
(this patch)
2) The user space component that adds support to allocate 2MB pages.
(Intel's 2MB user space setup patch, submitted by Bill Burns works for
AMD as well)

Note, that when 2MB pages are not available, it falls back to 4KB page
allocation.

Upstream status:
The upstream 2MB backing pages patch was submitted on May 21.
http://lists.xensource.com/archives/html/xen-changelog/2008-05/msg00169.html

The change set for the accepted patch are:
17645 and 17782 (fixes a small typo)

Patch dependencies:
Since Intel submitted their EPT, VPID, Migration and 2MB backing pages code
earlier for RHEL5.3, I applied their patches to create the AMD port for
RHEL5.3.

Here are some notes:
1) Upstream Xen does not index with "i << a->extent_order", just "i".
2) Basically this function gets the guest physical frame from
extent_list.    We should not
change the index by shifting it.
3) Tried both cases and they both work.    The reason we don't see a
failure is because we
always ask for 1 page at a time.   In other words "a->nr_extents" is
always 1.   So "i" is 0,
and shifting "i" doesn't have any impact on the results.

I've attached the patch with this change.   Please ACK this final patch.

Many thanks,
Bhavna

diff --git a/arch/ia64/xen/mm.c b/arch/ia64/xen/mm.c
index a934197..1139ced 100644
--- a/arch/ia64/xen/mm.c
+++ b/arch/ia64/xen/mm.c
@@ -1884,7 +1884,7 @@ steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
 
 int
 guest_physmap_add_page(struct domain *d, unsigned long gpfn,
-                       unsigned long mfn)
+                       unsigned long mfn, int order)
 {
     BUG_ON(!mfn_valid(mfn));
     BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1));
@@ -1901,7 +1901,7 @@ guest_physmap_add_page(struct domain *d, unsigned long gpfn,
 
 void
 guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
-                          unsigned long mfn)
+                          unsigned long mfn, int order)
 {
     BUG_ON(mfn == 0);//XXX
     zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn);
@@ -2306,7 +2306,7 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         if (prev_mfn && mfn_valid(prev_mfn)) {
             if (is_xen_heap_frame(mfn_to_page(prev_mfn)))
                 /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn);
+                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
             else
                 /* Normal domain memory is freed, to avoid leaking memory. */
                 guest_remove_page(d, xatp.gpfn);
@@ -2315,10 +2315,10 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         /* Unmap from old location, if any. */
         gpfn = get_gpfn_from_mfn(mfn);
         if (gpfn != INVALID_M2P_ENTRY)
-            guest_physmap_remove_page(d, gpfn, mfn);
+            guest_physmap_remove_page(d, gpfn, mfn, 0);
 
         /* Map at new location. */
-        guest_physmap_add_page(d, xatp.gpfn, mfn);
+        guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
     out:
         UNLOCK_BIGLOCK(d);
diff --git a/arch/powerpc/mm.c b/arch/powerpc/mm.c
index 2dec97a..4a7f6ff 100644
--- a/arch/powerpc/mm.c
+++ b/arch/powerpc/mm.c
@@ -579,7 +579,7 @@ void guest_physmap_add_page(
 }
 
 void guest_physmap_remove_page(
-    struct domain *d, unsigned long gpfn, unsigned long mfn)
+    struct domain *d, unsigned long gpfn, unsigned long mfn, int order)
 {
     if (page_get_owner(mfn_to_page(mfn)) != d) {
         printk("Won't unmap foreign MFN 0x%lx for DOM%d\n", mfn, d->domain_id);
diff --git a/arch/x86/hvm/vmx/vmx.c b/arch/x86/hvm/vmx/vmx.c
index 51b058d..3c01a84 100644
--- a/arch/x86/hvm/vmx/vmx.c
+++ b/arch/x86/hvm/vmx/vmx.c
@@ -2657,7 +2657,7 @@ struct page_info * change_guest_physmap_for_vtpr(struct domain *d,
         d->arch.hvm_domain.apic_access_page = pg;
         d->arch.hvm_domain.vmx_apic_access_mfn = mfn;
 
-        guest_physmap_add_page(d, pfn, mfn);
+        guest_physmap_add_page(d, pfn, mfn, 0);
 
         d->arch.hvm_domain.physmap_changed_for_vlapic_access = 1;
 
@@ -2668,7 +2668,7 @@ struct page_info * change_guest_physmap_for_vtpr(struct domain *d,
         if ( d->arch.hvm_domain.physmap_changed_for_vlapic_access )
         {
             mfn = page_to_mfn(pg);
-            guest_physmap_remove_page(d, pfn, mfn);
+            guest_physmap_remove_page(d, pfn, mfn, 0);
             flush_tlb_mask(d->domain_dirty_cpumask);
 
             d->arch.hvm_domain.physmap_changed_for_vlapic_access = 0;
diff --git a/arch/x86/mm.c b/arch/x86/mm.c
index 557a3d9..f7f9e04 100644
--- a/arch/x86/mm.c
+++ b/arch/x86/mm.c
@@ -3102,7 +3102,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         {
             if ( is_xen_heap_frame(mfn_to_page(prev_mfn)) )
                 /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn);
+                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
             else
                 /* Normal domain memory is freed, to avoid leaking memory. */
                 guest_remove_page(d, xatp.gpfn);
@@ -3111,10 +3111,10 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         /* Unmap from old location, if any. */
         gpfn = get_gpfn_from_mfn(mfn);
         if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn);
+            guest_physmap_remove_page(d, gpfn, mfn, 0);
 
         /* Map at new location. */
-        guest_physmap_add_page(d, xatp.gpfn, mfn);
+        guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
         UNLOCK_BIGLOCK(d);
 
diff --git a/arch/x86/mm/p2m.c b/arch/x86/mm/p2m.c
index 56466c0..238fd68 100644
--- a/arch/x86/mm/p2m.c
+++ b/arch/x86/mm/p2m.c
@@ -123,9 +123,11 @@ p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
                unsigned long *gfn_remainder, unsigned long gfn, u32 shift, 
                u32 max, unsigned long type)
 {
+    l1_pgentry_t *l1_entry;
     l1_pgentry_t *p2m_entry;
     l1_pgentry_t new_entry;
     void *next;
+    int i;
     ASSERT(d->arch.p2m.alloc_page);
 
     if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
@@ -166,6 +168,42 @@ p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
             break;
         }
     }
+
+    /* split single large page into 4KB page in P2M table */
+    if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+    {
+        unsigned long flags, pfn;
+        struct page_info *pg = d->arch.p2m.alloc_page(d);
+        if ( pg == NULL )
+            return 0;
+        list_add_tail(&pg->list, &d->arch.p2m.pages);
+        pg->u.inuse.type_info = PGT_l1_page_table | 1 | PGT_validated;
+        pg->count_info = 1;
+        
+        /* New splintered mappings inherit the flags of the old superpage,
+         * with a little reorganisation for the _PAGE_PSE_PAT bit. */
+        flags = l1e_get_flags(*p2m_entry);
+        pfn = l1e_get_pfn(*p2m_entry);
+        if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
+            pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
+        else
+            flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
+        
+        l1_entry = map_domain_page(mfn_x(page_to_mfn(pg)));
+        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+        {
+            new_entry = l1e_from_pfn(pfn + i, flags);
+            paging_write_p2m_entry(d, gfn,
+                                   l1_entry+i, *table_mfn, new_entry, 1);
+        }
+        unmap_domain_page(l1_entry);
+        
+        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
+                                 __PAGE_HYPERVISOR|_PAGE_USER);
+        paging_write_p2m_entry(d, gfn,
+                               p2m_entry, *table_mfn, new_entry, 2);
+    }
+    
     *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
     next = map_domain_page(mfn_x(*table_mfn));
     unmap_domain_page(*table);
@@ -176,7 +214,8 @@ p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
 
 // Returns 0 on error (out of memory)
 static int
-set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, u32 l1e_flags)
+set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, int order, 
+		u32 l1e_flags)
 {
     // XXX -- this might be able to be faster iff current->domain == d
     mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
@@ -184,6 +223,7 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, u32 l1e_flags)
     unsigned long gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry;
     l1_pgentry_t entry_content;
+    l2_pgentry_t l2e_content;
     int rv=0;
 
 #if CONFIG_PAGING_LEVELS >= 4
@@ -208,26 +248,54 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, u32 l1e_flags)
                          PGT_l2_page_table) )
         goto out;
 #endif
-    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
-                         L2_PAGETABLE_SHIFT - PAGE_SHIFT,
-                         L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
-        goto out;
+    
+    if ( order == 0 ) 
+    {
+        if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
+                             L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+                             L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
+            goto out;
+        
+        p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
+                                   0, L1_PAGETABLE_ENTRIES);
+        ASSERT(p2m_entry);
+        
+        if ( mfn_valid(mfn) )
+            entry_content = l1e_from_pfn(mfn_x(mfn), l1e_flags);
+        else
+            entry_content = l1e_empty();
+        
+        /* level 1 entry */
+        paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
+    }
+    else
+    {
+        p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
+                                   L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+                                   L2_PAGETABLE_ENTRIES);
+        ASSERT(p2m_entry);
+        
+        if ( (l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) &&
+             !(l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+        {
+            P2M_ERROR("configure P2M table 4KB L2 entry with large page\n");
+            domain_crash(d);
+            goto out;
+        }
+
+        if ( mfn_valid(mfn) )
+            l2e_content = l2e_from_pfn(mfn_x(mfn),
+                                       __PAGE_HYPERVISOR|_PAGE_USER|_PAGE_PSE);
+        else
+            l2e_content = l2e_empty();
 
-    p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
-                               0, L1_PAGETABLE_ENTRIES);
-    ASSERT(p2m_entry);
+        entry_content.l1 = l2e_content.l2;
+        paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 2);
+    }
 
     /* Track the highest gfn for which we have ever had a valid mapping */
     if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) ) 
-        d->arch.p2m.max_mapped_pfn = gfn;
-
-    if ( mfn_valid(mfn) )
-        entry_content = l1e_from_pfn(mfn_x(mfn), l1e_flags);
-    else
-        entry_content = l1e_empty();
-
-    /* level 1 entry */
-    paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
+        d->arch.p2m.max_mapped_pfn = gfn + (1UL << order) - 1;
 
     /* Success */
     rv = 1;
@@ -307,7 +375,7 @@ int p2m_alloc_table(struct domain *d,
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     gfn = 0;
     mfn = _mfn(INVALID_MFN);
-    if ( !set_p2m_entry(d, gfn, mfn, __PAGE_HYPERVISOR|_PAGE_USER) )
+    if ( !set_p2m_entry(d, gfn, mfn, 0, __PAGE_HYPERVISOR|_PAGE_USER) )
         goto error;
 
     for ( entry = d->page_list.next;
@@ -325,7 +393,7 @@ int p2m_alloc_table(struct domain *d,
             (gfn != 0x55555555L)
 #endif
              && gfn != INVALID_M2P_ENTRY
-             && !set_p2m_entry(d, gfn, mfn, __PAGE_HYPERVISOR|_PAGE_USER) )
+             && !set_p2m_entry(d, gfn, mfn, 0, __PAGE_HYPERVISOR|_PAGE_USER) )
             goto error;
     }
 
@@ -419,6 +487,14 @@ gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
         unmap_domain_page(l2e);
         return _mfn(INVALID_MFN);
     }
+    else if ( (l2e_get_flags(*l2e) & _PAGE_PSE) )
+    {
+        mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
+        unmap_domain_page(l2e);
+        
+        return mfn_valid(mfn) ? mfn : _mfn(INVALID_MFN);
+    }
+    
     mfn = _mfn(l2e_get_pfn(*l2e));
     unmap_domain_page(l2e);
 
@@ -573,6 +649,29 @@ static void audit_p2m(struct domain *d)
                         gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
                         continue;
                     }
+
+                    
+                    /* check for super page */
+                    if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE )
+                    {
+                        mfn = l2e_get_pfn(l2e[i2]);
+                        ASSERT(mfn_valid(_mfn(mfn)));
+                        for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++)
+                        {
+                            m2pfn = get_gpfn_from_mfn(mfn+i1);
+                            if ( m2pfn != (gfn + i) )
+                            {
+                                pmbad++;
+                                P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
+                                           " -> gfn %#lx\n", gfn+i, mfn+i,
+                                           m2pfn);
+                                BUG();
+                            }
+                        }
+                        gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
+                        continue;
+                    }
+
                     l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
                     
                     for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
@@ -626,8 +725,10 @@ static void audit_p2m(struct domain *d)
 
 
 static void
-p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
+p2m_remove_page(struct domain *d, unsigned long gfn,
+                    unsigned long mfn, int order)
 {
+    unsigned long i;
     if ( !paging_mode_translate(d) )
         return;
     P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
@@ -635,24 +736,26 @@ p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
     ASSERT(mfn_x(gfn_to_mfn(d, gfn)) == mfn);
     //ASSERT(mfn_to_gfn(d, mfn) == gfn);
 
-    set_p2m_entry(d, gfn, _mfn(INVALID_MFN), __PAGE_HYPERVISOR|_PAGE_USER);
-    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+    set_p2m_entry(d, gfn, _mfn(INVALID_MFN), order,
+                         __PAGE_HYPERVISOR|_PAGE_USER);
+    for ( i = 0; i < (1UL << order); i++ )
+        set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
 }
 
 void
 guest_physmap_remove_page(struct domain *d, unsigned long gfn,
-                          unsigned long mfn)
+                          unsigned long mfn, int order)
 {
     p2m_lock(d);
     audit_p2m(d);
-    p2m_remove_page(d, gfn, mfn);
+    p2m_remove_page(d, gfn, mfn, order);
     audit_p2m(d);
     p2m_unlock(d);    
 }
 
 int
-guest_physmap_add_page(struct domain *d, unsigned long gfn,
-                       unsigned long mfn)
+guest_physmap_add_entry(struct domain *d, unsigned long gfn,
+                       unsigned long mfn, int order, u32 l1e_flags)
 {
     unsigned long ogfn;
     mfn_t omfn;
@@ -685,7 +788,7 @@ guest_physmap_add_page(struct domain *d, unsigned long gfn,
     omfn = gfn_to_mfn(d, gfn);
     if ( mfn_valid(omfn) )
     {
-        if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), __PAGE_HYPERVISOR|_PAGE_USER) )
+        if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), order, l1e_flags) )
             rc = -EINVAL;
 
         set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
@@ -709,12 +812,12 @@ guest_physmap_add_page(struct domain *d, unsigned long gfn,
             P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", 
                       ogfn , mfn_x(omfn));
             if ( mfn_x(omfn) == mfn ) 
-                p2m_remove_page(d, ogfn, mfn);
+                p2m_remove_page(d, ogfn, mfn, order);
         }
     }
 
-    if ( !set_p2m_entry(d, gfn, _mfn(mfn), __PAGE_HYPERVISOR|_PAGE_USER) )
-            rc = -EINVAL;
+    if ( !set_p2m_entry(d, gfn, _mfn(mfn), order, l1e_flags) )
+        rc = -EINVAL;
 
     set_gpfn_from_mfn(mfn, gfn);
 
@@ -724,6 +827,20 @@ guest_physmap_add_page(struct domain *d, unsigned long gfn,
     return rc;
 }
 
+int
+guest_physmap_add_page(struct domain *d, unsigned long gfn,
+                       unsigned long mfn, int order)
+{
+    int ret = 0;
+
+
+    ret = guest_physmap_add_entry(d, gfn, mfn, order,
+                                  __PAGE_HYPERVISOR | _PAGE_USER);
+
+    /* TODO: fix exit path when failure */
+    return ret;
+}
+
 /* This function goes through P2M table and modify l1e flags of all pages. Note
  * that physical base address of l1e is intact. This function can be used for
  * special purpose, such as marking physical memory as NOT WRITABLE for
@@ -838,7 +955,7 @@ int p2m_set_flags(struct domain *d, paddr_t gpa, u32 l1e_flags)
     gfn = gpa >> PAGE_SHIFT;
     mfn = gfn_to_mfn(d, gfn);
     if ( mfn_valid(mfn) )
-        set_p2m_entry(d, gfn, mfn, l1e_flags);
+        set_p2m_entry(d, gfn, mfn, 0, l1e_flags);
     
     p2m_unlock(d);
 
diff --git a/common/grant_table.c b/common/grant_table.c
index dc14f5a..6c3401a 100644
--- a/common/grant_table.c
+++ b/common/grant_table.c
@@ -925,7 +925,7 @@ gnttab_transfer(
         spin_lock(&e->grant_table->lock);
 
         sha = &shared_entry(e->grant_table, gop.ref);
-        guest_physmap_add_page(e, sha->frame, mfn);
+        guest_physmap_add_page(e, sha->frame, mfn, 0);
         sha->frame = mfn;
         wmb();
         sha->flags |= GTF_transfer_completed;
diff --git a/common/memory.c b/common/memory.c
index 24f5848..72b223c 100644
--- a/common/memory.c
+++ b/common/memory.c
@@ -129,8 +129,7 @@ static void populate_physmap(struct memop_args *a)
 
         if ( unlikely(paging_mode_translate(d)) )
         {
-            for ( j = 0; j < (1 << a->extent_order); j++ )
-                if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
+            if ( guest_physmap_add_page(d, gpfn, mfn, a->extent_order) )
                     goto out;
         }
         else
@@ -174,7 +173,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
     if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
         put_page(page);
 
-    guest_physmap_remove_page(d, gmfn, mfn);
+    guest_physmap_remove_page(d, gmfn, mfn, 0);
 
     put_page(page);
 
@@ -407,7 +406,7 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
             if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 BUG();
             mfn = page_to_mfn(page);
-            guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn);
+            guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
             put_page(page);
         }
 
@@ -429,8 +428,8 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
             if ( unlikely(paging_mode_translate(d)) )
             {
                 /* Ignore failure here. There's nothing we can do. */
-                for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
-                    (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
+                    (void)guest_physmap_add_page(d, gpfn, mfn,
+                                        exch.out.extent_order);
             }
             else
             {
diff --git a/include/asm-ia64/grant_table.h b/include/asm-ia64/grant_table.h
index f5a38d9..b38182e 100644
--- a/include/asm-ia64/grant_table.h
+++ b/include/asm-ia64/grant_table.h
@@ -12,7 +12,7 @@ int create_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned
 int destroy_grant_host_mapping(unsigned long gpaddr, unsigned long mfn, unsigned int flags);
 
 // for grant transfer
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
 
 /* XXX
  * somewhere appropriate
diff --git a/include/asm-ia64/shadow.h b/include/asm-ia64/shadow.h
index d978a2b..16376ed 100644
--- a/include/asm-ia64/shadow.h
+++ b/include/asm-ia64/shadow.h
@@ -40,8 +40,8 @@
  * Utilities to change relationship of gpfn->mfn for designated domain,
  * which is required by gnttab transfer, balloon, device model and etc.
  */
-int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
-void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
+int guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
+void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn, int order);
 
 static inline int
 shadow_mode_enabled(struct domain *d)
diff --git a/include/asm-x86/p2m.h b/include/asm-x86/p2m.h
index 5ab11e2..53e66e4 100644
--- a/include/asm-x86/p2m.h
+++ b/include/asm-x86/p2m.h
@@ -39,7 +39,9 @@
 static inline mfn_t gfn_to_mfn_current(unsigned long gfn)
 {
     l1_pgentry_t l1e = l1e_empty();
+    l2_pgentry_t l2e = l2e_empty();
     int ret;
+    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
 
     if ( gfn > current->domain->arch.p2m.max_mapped_pfn )
         return _mfn(INVALID_MFN);
@@ -47,12 +49,25 @@ static inline mfn_t gfn_to_mfn_current(unsigned long gfn)
     /* Don't read off the end of the p2m table */
     ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
 
-    ret = __copy_from_user(&l1e,
-                           &phys_to_machine_mapping[gfn],
-                           sizeof(l1e));
 
-    if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
-        return _mfn(l1e_get_pfn(l1e));
+    ret = __copy_from_user(&l2e,
+                           &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) + l2_linear_offset(addr)],
+                           sizeof(l2e));
+
+    if ( (ret == 0) && (l2e_get_flags(l2e) & _PAGE_PRESENT) &&
+         (l2e_get_flags(l2e) & _PAGE_PSE) )
+    {
+        return _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
+    }
+    else
+    {
+        ret = __copy_from_user(&l1e,
+                               &phys_to_machine_mapping[gfn],
+                               sizeof(l1e));
+        
+        if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
+            return _mfn(l1e_get_pfn(l1e));
+    }
 
     return _mfn(INVALID_MFN);
 }
@@ -123,11 +138,11 @@ void p2m_teardown(struct domain *d);
 
 /* Add a page to a domain's p2m table */
 int guest_physmap_add_page(struct domain *d, unsigned long gfn,
-                            unsigned long mfn);
+                            unsigned long mfn, int order);
 
 /* Remove a page from a domain's p2m table */
 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
-                               unsigned long mfn);
+                               unsigned long mfn, int order);
 
 /* set P2M table l1e flags */
 void p2m_set_flags_global(struct domain *d, u32 l1e_flags);
diff --git a/include/xen/paging.h b/include/xen/paging.h
index 3e77d94..713c6ee 100644
--- a/include/xen/paging.h
+++ b/include/xen/paging.h
@@ -18,8 +18,8 @@
 #else
 
 #define paging_mode_translate(d)              (0)
-#define guest_physmap_add_page(d, p, m)       (0)
-#define guest_physmap_remove_page(d, p, m)    ((void)0)
+#define guest_physmap_add_page(d, p, m, order)       (0)
+#define guest_physmap_remove_page(d, p, m, order)    ((void)0)
 
 #endif