Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2689

kernel-2.6.18-128.1.10.el5.src.rpm

Patch: linux-2.6-xen-ia64-kernel-panics-when-dom0_mem-is-specified_2.patch
From: Kei Tokunaga <ktokunag@redhat.com>
Subject: Re: [RHEL5.1 PATCH 9/21] kernel-xen panics when dom0_mem is specified
Date: Thu, 21 Jun 2007 17:16:07 -0400
Bugzilla: 217593
Message-ID: <467AEA97.30101@redhat.com>
Changelog: [xen] ia64: kernel-xen panics when dom0_mem is specified(2)

Fujitsu have done more testing and found out that the
attached patch is necessary to fix the bug completely.
They apply to 2.6.18-29.  And we have done the same
testing as we did last time on our box, meaning the
bare metal works as well.

The patch consists of 10 additional cset patches.  With
this patch, you can specify a value of dom0_mem more than
6GB.  Also, IOMMU should be usable on HP boxes.  Also,
some kernel config changes (config_xen_ia64) is necessary.
The diff to the config will be posted in another email.

This patch includes only ia64 related changes.  This patch
has 4 lines changes to drivers/xen/balloon/balloon.c and
lib/Makefile, but these are also for ia64.

Backport of cset#13251(xen-3.1-testing)
Backport of cset#15018, 15021, 15022, 15023, 15024, 15041,
            15143, and 15169(xen-ia64-unstable)
Backport of cset#62(linux-2.6.18-xen)

Thanks,
Kei

---

 linux-2.6.18.ia64-kei/arch/ia64/Kconfig              |    4 
 linux-2.6.18.ia64-kei/arch/ia64/Makefile             |    1 
 linux-2.6.18.ia64-kei/arch/ia64/kernel/acpi.c        |    6 
 linux-2.6.18.ia64-kei/arch/ia64/xen/machvec.c        |    4 
 linux-2.6.18.ia64-kei/arch/ia64/xen/xen_dma.c        |  145 +++++++++++++++++++
 linux-2.6.18.ia64-kei/include/asm-ia64/machvec.h     |    2 
 linux-2.6.18.ia64-kei/include/asm-ia64/machvec_xen.h |   37 ++++
 7 files changed, 199 insertions(+)

diff -puN arch/ia64/Kconfig~15023ia64-Create_Xen_machine_vector arch/ia64/Kconfig
--- linux-2.6.18.ia64/arch/ia64/Kconfig~15023ia64-Create_Xen_machine_vector	2007-06-19 12:07:58.000000000 -0400
+++ linux-2.6.18.ia64-kei/arch/ia64/Kconfig	2007-06-19 12:07:58.000000000 -0400
@@ -161,6 +161,10 @@ config IA64_SGI_SN2
 config IA64_HP_SIM
 	bool "Ski-simulator"
 
+config IA64_XEN
+	bool "Xen guest"
+	depends on XEN
+
 endchoice
 
 choice
diff -puN arch/ia64/Makefile~15023ia64-Create_Xen_machine_vector arch/ia64/Makefile
--- linux-2.6.18.ia64/arch/ia64/Makefile~15023ia64-Create_Xen_machine_vector	2007-06-19 12:07:58.000000000 -0400
+++ linux-2.6.18.ia64-kei/arch/ia64/Makefile	2007-06-19 12:07:58.000000000 -0400
@@ -60,6 +60,7 @@ core-$(CONFIG_IA64_DIG) 	+= arch/ia64/di
 core-$(CONFIG_IA64_GENERIC) 	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
+core-$(CONFIG_IA64_XEN)		+= arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
 core-$(CONFIG_XEN)		+= arch/ia64/xen/
 
diff -puN arch/ia64/kernel/acpi.c~15023ia64-Create_Xen_machine_vector arch/ia64/kernel/acpi.c
--- linux-2.6.18.ia64/arch/ia64/kernel/acpi.c~15023ia64-Create_Xen_machine_vector	2007-06-19 12:07:58.000000000 -0400
+++ linux-2.6.18.ia64-kei/arch/ia64/kernel/acpi.c	2007-06-19 12:07:58.000000000 -0400
@@ -109,6 +109,10 @@ const char *acpi_get_sysname(void)
 		return "hpzx1";
 	} else if (!strcmp(hdr->oem_id, "SGI")) {
 		return "sn2";
+#ifdef CONFIG_XEN
+	} else if (is_running_on_xen() && !strcmp(hdr->oem_id, "XEN")) {
+		return "xen";
+#endif
 	}
 
 	return "dig";
@@ -123,6 +127,8 @@ const char *acpi_get_sysname(void)
 	return "sn2";
 # elif defined (CONFIG_IA64_DIG)
 	return "dig";
+# elif defined (CONFIG_IA64_XEN)
+	return "xen";
 # else
 #	error Unknown platform.  Fix acpi.c.
 # endif
diff -puN /dev/null arch/ia64/xen/machvec.c
--- /dev/null	2007-06-19 10:58:53.352000000 -0400
+++ linux-2.6.18.ia64-kei/arch/ia64/xen/machvec.c	2007-06-19 12:07:58.000000000 -0400
@@ -0,0 +1,4 @@
+#define MACHVEC_PLATFORM_NAME           xen
+#define MACHVEC_PLATFORM_HEADER         <asm/machvec_xen.h>
+#include <asm/machvec_init.h>
+
diff -puN /dev/null arch/ia64/xen/xen_dma.c
--- /dev/null	2007-06-19 10:58:53.352000000 -0400
+++ linux-2.6.18.ia64-kei/arch/ia64/xen/xen_dma.c	2007-06-19 12:07:58.000000000 -0400
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ * 	Alex Williamson <alex.williamson@hp.com>
+ *
+ * Basic DMA mapping services for Xen guests.
+ * Based on arch/i386/kernel/pci-dma-xen.c.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+
+#define IOMMU_BUG_ON(test)					\
+do {								\
+	if (unlikely(test)) {					\
+		printk(KERN_ALERT "Fatal DMA error!\n");	\
+		BUG();						\
+	}							\
+} while (0)
+
+
+/*
+ * This should be broken out of swiotlb and put in a common place
+ * when merged with upstream Linux.
+ */
+static inline int
+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+{
+	dma_addr_t mask = 0xffffffff;
+
+	/* If the device has a mask, use it, otherwise default to 32 bits */
+	if (hwdev && hwdev->dma_mask)
+		mask = *hwdev->dma_mask;
+	return (addr & ~mask) != 0;
+}
+
+int
+xen_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+	   int direction)
+{
+	int i;
+
+	for (i = 0 ; i < nents ; i++) {
+		sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
+		sg[i].dma_length  = sg[i].length;
+
+		IOMMU_BUG_ON(address_needs_mapping(hwdev, sg[i].dma_address));
+	}
+
+	return nents;
+}
+EXPORT_SYMBOL(xen_map_sg);
+
+void
+xen_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+	     int direction)
+{
+}
+EXPORT_SYMBOL(xen_unmap_sg);
+
+int
+xen_dma_mapping_error(dma_addr_t dma_addr)
+{
+	return 0;
+}
+EXPORT_SYMBOL(xen_dma_mapping_error);
+
+int
+xen_dma_supported(struct device *dev, u64 mask)
+{
+	return 1;
+}
+EXPORT_SYMBOL(xen_dma_supported);
+
+void *
+xen_alloc_coherent(struct device *dev, size_t size,
+		   dma_addr_t *dma_handle, gfp_t gfp)
+{
+	unsigned long vaddr;
+	unsigned int order = get_order(size);
+
+	vaddr = __get_free_pages(gfp, order);
+
+	if (!vaddr)
+		return NULL;
+
+	if (xen_create_contiguous_region(vaddr, order,
+					 dev->coherent_dma_mask)) {
+		free_pages(vaddr, order);
+		return NULL;
+	}
+
+	memset((void *)vaddr, 0, size);
+	*dma_handle = virt_to_bus((void *)vaddr);
+
+	return (void *)vaddr;
+}
+EXPORT_SYMBOL(xen_alloc_coherent);
+
+void
+xen_free_coherent(struct device *dev, size_t size,
+		      void *vaddr, dma_addr_t dma_handle)
+{
+	unsigned int order =  get_order(size);
+
+	xen_destroy_contiguous_region((unsigned long)vaddr, order);
+	free_pages((unsigned long)vaddr, order);
+}
+EXPORT_SYMBOL(xen_free_coherent);
+
+dma_addr_t
+xen_map_single(struct device *dev, void *ptr, size_t size,
+	       int direction)
+{
+	dma_addr_t dma_addr = virt_to_bus(ptr);
+
+	IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+	IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
+
+	return dma_addr;
+}
+EXPORT_SYMBOL(xen_map_single);
+
+void
+xen_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+		 int direction)
+{
+}
+EXPORT_SYMBOL(xen_unmap_single);
diff -puN include/asm-ia64/machvec.h~15023ia64-Create_Xen_machine_vector include/asm-ia64/machvec.h
--- linux-2.6.18.ia64/include/asm-ia64/machvec.h~15023ia64-Create_Xen_machine_vector	2007-06-19 12:07:58.000000000 -0400
+++ linux-2.6.18.ia64-kei/include/asm-ia64/machvec.h	2007-06-19 12:07:58.000000000 -0400
@@ -109,6 +109,8 @@ extern void machvec_tlb_migrate_finish (
 #  include <asm/machvec_hpzx1_swiotlb.h>
 # elif defined (CONFIG_IA64_SGI_SN2)
 #  include <asm/machvec_sn2.h>
+# elif defined (CONFIG_IA64_XEN)
+#  include <asm/machvec_xen.h>
 # elif defined (CONFIG_IA64_GENERIC)
 
 # ifdef MACHVEC_PLATFORM_HEADER
diff -puN /dev/null include/asm-ia64/machvec_xen.h
--- /dev/null	2007-06-19 10:58:53.352000000 -0400
+++ linux-2.6.18.ia64-kei/include/asm-ia64/machvec_xen.h	2007-06-19 12:07:58.000000000 -0400
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_MACHVEC_XEN_h
+#define _ASM_IA64_MACHVEC_XEN_h
+
+extern ia64_mv_setup_t			dig_setup;
+extern ia64_mv_dma_alloc_coherent	xen_alloc_coherent;
+extern ia64_mv_dma_free_coherent	xen_free_coherent;
+extern ia64_mv_dma_map_single		xen_map_single;
+extern ia64_mv_dma_unmap_single		xen_unmap_single;
+extern ia64_mv_dma_map_sg		xen_map_sg;
+extern ia64_mv_dma_unmap_sg		xen_unmap_sg;
+extern ia64_mv_dma_supported		xen_dma_supported;
+extern ia64_mv_dma_mapping_error	xen_dma_mapping_error;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name				"xen"
+#define platform_setup				dig_setup
+#define platform_dma_init			machvec_noop
+#define platform_dma_alloc_coherent		xen_alloc_coherent
+#define platform_dma_free_coherent		xen_free_coherent
+#define platform_dma_map_single			xen_map_single
+#define platform_dma_unmap_single		xen_unmap_single
+#define platform_dma_map_sg			xen_map_sg
+#define platform_dma_unmap_sg			xen_unmap_sg
+#define platform_dma_sync_single_for_cpu	machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu		machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device	machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device		machvec_dma_sync_sg
+#define platform_dma_supported			xen_dma_supported
+#define platform_dma_mapping_error		xen_dma_mapping_error
+
+#endif /* _ASM_IA64_MACHVEC_XEN_h */

_
---

 linux-2.6.18.ia64-kei/arch/ia64/hp/common/sba_iommu.c |   59 -
 linux-2.6.18.ia64-kei/arch/ia64/kernel/setup.c        |    2 
 linux-2.6.18.ia64-kei/arch/ia64/xen/Makefile          |    6 
 linux-2.6.18.ia64-kei/arch/ia64/xen/hypervisor.c      |   85 +
 linux-2.6.18.ia64-kei/arch/ia64/xen/swiotlb.c         |  882 ++++++++++++++++++
 linux-2.6.18.ia64-kei/arch/ia64/xen/xen_dma.c         |   15 
 linux-2.6.18.ia64-kei/drivers/xen/balloon/balloon.c   |    4 
 linux-2.6.18.ia64-kei/include/asm-ia64/dma-mapping.h  |   69 -
 linux-2.6.18.ia64-kei/include/asm-ia64/hypervisor.h   |    4 
 linux-2.6.18.ia64-kei/include/asm-ia64/machvec_dig.h  |   15 
 linux-2.6.18.ia64-kei/lib/Makefile                    |    5 
 11 files changed, 1024 insertions(+), 122 deletions(-)

Index: latest/arch/ia64/hp/common/sba_iommu.c
===================================================================
--- latest.orig/arch/ia64/hp/common/sba_iommu.c
+++ latest/arch/ia64/hp/common/sba_iommu.c
@@ -766,13 +766,14 @@ sba_free_range(struct ioc *ioc, dma_addr
  */
 
 #if 1
-#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)	\
-						      | 0x8000000000000000ULL)
+#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr =	\
+	((virt_to_bus((void *)vba) & ~0xFFFULL) | 0x8000000000000000ULL)
 #else
 void SBA_INLINE
 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
 {
-	*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
+	*pdir_ptr = ((virt_to_bus((void *)vba) & ~0xFFFULL) |
+		    0x80000000000000FFULL);
 }
 #endif
 
@@ -787,6 +788,12 @@ mark_clean (void *addr, size_t size)
 {
 	unsigned long pg_addr, end;
 
+#ifdef CONFIG_XEN
+	/* XXX: Bad things happen starting domUs when this is enabled. */
+	if (is_running_on_xen())
+		return;
+#endif
+
 	pg_addr = PAGE_ALIGN((unsigned long) addr);
 	end = (unsigned long) addr + size;
 	while (pg_addr + PAGE_SIZE <= end) {
@@ -897,15 +904,14 @@ sba_map_single(struct device *dev, void 
 	unsigned long flags;
 #endif
 #ifdef ALLOW_IOV_BYPASS
-	unsigned long pci_addr = virt_to_phys(addr);
-#endif
+	unsigned long pci_addr = virt_to_bus(addr);
 
-#ifdef ALLOW_IOV_BYPASS
 	ASSERT(to_pci_dev(dev)->dma_mask);
 	/*
  	** Check if the PCI device can DMA to ptr... if so, just return ptr
  	*/
-	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
+	if (likely(pci_addr & ~to_pci_dev(dev)->dma_mask) == 0 &&
+		   !range_straddles_page_boundary(addr, size)) {
 		/*
  		** Device is bit capable of DMA'ing to the buffer...
 		** just return the PCI address of ptr
@@ -976,13 +982,13 @@ sba_mark_clean(struct ioc *ioc, dma_addr
 	void	*addr;
 
 	if (size <= iovp_size) {
-		addr = phys_to_virt(ioc->pdir_base[off] &
-		                    ~0xE000000000000FFFULL);
+		addr = bus_to_virt(ioc->pdir_base[off] &
+				   ~0xE000000000000FFFULL);
 		mark_clean(addr, size);
 	} else {
 		do {
-			addr = phys_to_virt(ioc->pdir_base[off] &
-			                    ~0xE000000000000FFFULL);
+			addr = bus_to_virt(ioc->pdir_base[off] &
+					   ~0xE000000000000FFFULL);
 			mark_clean(addr, min(size, iovp_size));
 			off++;
 			size -= iovp_size;
@@ -1021,7 +1027,7 @@ void sba_unmap_single(struct device *dev
 
 #ifdef ENABLE_MARK_CLEAN
 		if (dir == DMA_FROM_DEVICE) {
-			mark_clean(phys_to_virt(iova), size);
+			mark_clean(bus_to_virt(iova), size);
 		}
 #endif
 		return;
@@ -1105,9 +1111,14 @@ sba_alloc_coherent (struct device *dev, 
 		return NULL;
 
 	memset(addr, 0, size);
-	*dma_handle = virt_to_phys(addr);
 
 #ifdef ALLOW_IOV_BYPASS
+#ifdef CONFIG_XEN
+	if (xen_create_contiguous_region((unsigned long)addr, get_order(size),
+					 fls64(dev->coherent_dma_mask)))
+		goto iommu_map;
+#endif
+	*dma_handle = virt_to_bus(addr);
 	ASSERT(dev->coherent_dma_mask);
 	/*
  	** Check if the PCI device can DMA to ptr... if so, just return ptr
@@ -1118,6 +1129,9 @@ sba_alloc_coherent (struct device *dev, 
 
 		return addr;
 	}
+#ifdef CONFIG_XEN
+iommu_map:
+#endif
 #endif
 
 	/*
@@ -1141,6 +1155,13 @@ sba_alloc_coherent (struct device *dev, 
  */
 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
+#if defined(ALLOW_IOV_BYPASS) && defined(CONFIG_XEN)
+	struct ioc *ioc = GET_IOC(dev);
+
+	if (likely((dma_handle & ioc->imask) != ioc->ibase))
+		xen_destroy_contiguous_region((unsigned long)vaddr,
+					      get_order(size));
+#endif
 	sba_unmap_single(dev, dma_handle, size, 0);
 	free_pages((unsigned long) vaddr, get_order(size));
 }
@@ -1409,7 +1430,7 @@ int sba_map_sg(struct device *dev, struc
 	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
 		for (sg = sglist ; filled < nents ; filled++, sg++){
 			sg->dma_length = sg->length;
-			sg->dma_address = virt_to_phys(sba_sg_address(sg));
+			sg->dma_address = virt_to_bus(sba_sg_address(sg));
 		}
 		return filled;
 	}
@@ -1563,13 +1584,19 @@ ioc_iova_init(struct ioc *ioc)
 	if (!ioc->pdir_base)
 		panic(PFX "Couldn't allocate I/O Page Table\n");
 
+#ifdef CONFIG_XEN
+	/* The page table needs to be pinned in Xen memory */
+	if (xen_create_contiguous_region((unsigned long)ioc->pdir_base,
+					 get_order(ioc->pdir_size), 0))
+		panic(PFX "Couldn't contiguously map I/O Page Table\n");
+#endif
 	memset(ioc->pdir_base, 0, ioc->pdir_size);
 
 	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
 		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
 
 	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
-	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
+	WRITE_REG(virt_to_bus(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
 
 	/*
 	** If an AGP device is present, only use half of the IOV space
@@ -1606,7 +1633,7 @@ ioc_iova_init(struct ioc *ioc)
 		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
 			memcpy(poison_addr, spill_poison, poison_size);
 
-		prefetch_spill_page = virt_to_phys(addr);
+		prefetch_spill_page = virt_to_bus(addr);
 
 		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
 	}
Index: latest/arch/ia64/kernel/setup.c
===================================================================
--- latest.orig/arch/ia64/kernel/setup.c
+++ latest/arch/ia64/kernel/setup.c
@@ -666,7 +666,7 @@ setup_arch (char **cmdline_p)
 	platform_setup(cmdline_p);
 	paging_init();
 #ifdef CONFIG_XEN
-	contiguous_bitmap_init(max_pfn);
+	xen_contiguous_bitmap_init(max_pfn);
 #endif
 }
 
Index: latest/arch/ia64/xen/Makefile
===================================================================
--- latest.orig/arch/ia64/xen/Makefile
+++ latest/arch/ia64/xen/Makefile
@@ -3,7 +3,7 @@
 #
 
 obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o \
-	 hypervisor.o pci-dma-xen.o util.o xencomm.o xcom_hcall.o \
-	 xcom_mini.o xcom_privcmd.o mem.o
+	 hypervisor.o util.o xencomm.o xcom_hcall.o xcom_mini.o \
+	 xcom_privcmd.o mem.o xen_dma.o
 
-pci-dma-xen-y := ../../i386/kernel/pci-dma-xen.o
+obj-$(CONFIG_IA64_GENERIC) += machvec.o
Index: latest/arch/ia64/xen/hypervisor.c
===================================================================
--- latest.orig/arch/ia64/xen/hypervisor.c
+++ latest/arch/ia64/xen/hypervisor.c
@@ -47,6 +47,7 @@ EXPORT_SYMBOL(running_on_xen);
 static int p2m_expose_init(void);
 #else
 #define p2m_expose_init() (-ENOSYS)
+#define p2m_expose_resume() ((void)0)
 #endif
 
 EXPORT_SYMBOL(__hypercall);
@@ -127,7 +128,7 @@ __contiguous_bitmap_init(unsigned long s
 }
 
 void
-contiguous_bitmap_init(unsigned long end_pfn)
+xen_contiguous_bitmap_init(unsigned long end_pfn)
 {
 	unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
 #ifndef CONFIG_VIRTUAL_MEM_MAP
@@ -882,6 +883,8 @@ static struct resource p2m_resource = {
 };
 static unsigned long p2m_assign_start_pfn __read_mostly;
 static unsigned long p2m_assign_end_pfn __read_mostly;
+static unsigned long p2m_expose_size;	// this is referenced only when resume.
+					// so __read_mostly doesn't make sense.
 volatile const pte_t* p2m_pte __read_mostly;
 
 #define GRNULE_PFN	PTRS_PER_PTE
@@ -942,8 +945,15 @@ p2m_expose_dtr_call(struct notifier_bloc
 	unsigned int cpu = (unsigned int)(long)ptr;
 	if (event != CPU_ONLINE)
 		return 0;
-	if (!(p2m_initialized && xen_ia64_p2m_expose_use_dtr))
-		smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg, 1, 1);
+	if (p2m_initialized && xen_ia64_p2m_expose_use_dtr) {
+		unsigned int me = get_cpu();
+		if (cpu == me)
+			p2m_itr(&p2m_itr_arg);
+		else
+			smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg,
+						 1, 1);
+		put_cpu();
+	}
 	return 0;
 }
 
@@ -958,7 +968,6 @@ static int
 p2m_expose_init(void)
 {
 	unsigned long num_pfn;
-	unsigned long size = 0;
 	unsigned long p2m_size = 0;
 	unsigned long align = ~0UL;
 	int error = 0;
@@ -994,7 +1003,8 @@ p2m_expose_init(void)
 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
 	if (xen_ia64_p2m_expose_use_dtr) {
 		unsigned long granule_pfn = 0;
-		p2m_size = p2m_max_low_pfn - p2m_min_low_pfn;
+		p2m_size = ((p2m_max_low_pfn - p2m_min_low_pfn +
+			     PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
 		for (i = 0;
 		     i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
 		     i++) {
@@ -1010,8 +1020,9 @@ p2m_expose_init(void)
 			p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
 			                              granule_pfn);
 			num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
-			size = num_pfn << PAGE_SHIFT;
-			p2m_size = num_pfn / PTRS_PER_PTE;
+			p2m_expose_size = num_pfn << PAGE_SHIFT;
+			p2m_size = ((num_pfn + PTRS_PER_PTE - 1) /
+				    PTRS_PER_PTE) << PAGE_SHIFT;
 			p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
 			if (p2m_size == page_size)
 				break;
@@ -1030,8 +1041,9 @@ p2m_expose_init(void)
 		                                p2m_granule_pfn);
 		p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
 		num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
-		size = num_pfn << PAGE_SHIFT;
-		p2m_size = num_pfn / PTRS_PER_PTE;
+		p2m_expose_size = num_pfn << PAGE_SHIFT;
+		p2m_size = ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) <<
+			PAGE_SHIFT;
 		p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
 		align = max(privcmd_resource_align,
 		            p2m_granule_pfn << PAGE_SHIFT);
@@ -1044,7 +1056,7 @@ p2m_expose_init(void)
 	if (error) {
 		printk(KERN_ERR P2M_PREFIX
 		       "can't allocate region for p2m exposure "
-		       "[0x%016lx, 0x%016lx) 0x%016lx\n",
+		       "[0x%016lx, 0x%016lx] 0x%016lx\n",
 		       p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
 		goto out;
 	}
@@ -1054,14 +1066,14 @@ p2m_expose_init(void)
 	
 	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
 	                              p2m_assign_start_pfn,
-	                              size, p2m_granule_pfn);
+	                              p2m_expose_size, p2m_granule_pfn);
 	if (error) {
 		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
 		       error);
 		printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
-		       "size 0x%016lx granule 0x%016lx\n",
+		       "expose_size 0x%016lx granule 0x%016lx\n",
 		       p2m_convert_min_pfn, p2m_assign_start_pfn,
-		       size, p2m_granule_pfn);;
+		       p2m_expose_size, p2m_granule_pfn);;
 		release_resource(&p2m_resource);
 		goto out;
 	}
@@ -1082,10 +1094,10 @@ p2m_expose_init(void)
 	p2m_initialized = 1;
 	printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
 	       p2m_convert_min_pfn << PAGE_SHIFT,
-	       p2m_convert_max_pfn << PAGE_SHIFT);
+	       (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
 	printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
 	       p2m_assign_start_pfn << PAGE_SHIFT,
-	       p2m_assign_end_pfn << PAGE_SHIFT,
+	       (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
 	       p2m_size / 1024);
 out:
 	unlock_cpu_hotplug();
@@ -1104,6 +1116,49 @@ p2m_expose_cleanup(void)
 }
 #endif
 
+static void
+p2m_expose_resume(void)
+{
+	int error;
+
+	if (!xen_ia64_p2m_expose || !p2m_initialized)
+		return;
+
+	/*
+	 * We can't call {lock, unlock}_cpu_hotplug() because
+	 * they require process context.
+	 * We don't need them because we're the only one cpu and
+	 * interrupts are masked when resume.
+	 */
+	error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
+	                              p2m_assign_start_pfn,
+	                              p2m_expose_size, p2m_granule_pfn);
+	if (error) {
+		printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
+		       error);
+		printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
+		       "expose_size 0x%016lx granule 0x%016lx\n",
+		       p2m_convert_min_pfn, p2m_assign_start_pfn,
+		       p2m_expose_size, p2m_granule_pfn);;
+		p2m_initialized = 0;
+		smp_mb();
+		ia64_ptr(0x2, p2m_itr_arg.vaddr, p2m_itr_arg.log_page_size);
+		
+		/*
+		 * We can't call those clean up functions because they
+		 * require process context.
+		 */
+#if 0
+#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
+		if (xen_ia64_p2m_expose_use_dtr)
+			unregister_cpu_notifier(
+				&p2m_expose_dtr_hotplug_notifier);
+#endif
+		release_resource(&p2m_resource);
+#endif
+	}
+}
+
 //XXX inlinize?
 unsigned long
 p2m_phystomach(unsigned long gpfn)
Index: latest/arch/ia64/xen/swiotlb.c
===================================================================
--- /dev/null
+++ latest/arch/ia64/xen/swiotlb.c
@@ -0,0 +1,882 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * This implementation is for IA-64 and EM64T platforms that do not support
+ * I/O TLBs (aka DMA address translation hardware).
+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
+ * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
+ *			unnecessary i-cache flushing.
+ * 04/07/.. ak		Better overflow handling. Assorted fixes.
+ * 05/09/10 linville	Add support for syncing ranges, support syncing for
+ *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
+ */
+
+#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/scatterlist.h>
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#ifdef CONFIG_XEN
+/*
+ * What DMA mask should Xen use to remap the bounce buffer pool?  Most
+ * reports seem to indicate 30 bits is sufficient, except maybe for old
+ * sound cards that we probably don't care about anyway.  If we need to,
+ * we could put in some smarts to try to lower, but hopefully it's not
+ * necessary.
+ */
+#define DMA_BITS	(30)
+#endif
+
+#define OFFSET(val,align) ((unsigned long)	\
+	                   ( (val) & ( (align) - 1)))
+
+#define SG_ENT_VIRT_ADDRESS(sg)	(page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG)	virt_to_bus(SG_ENT_VIRT_ADDRESS(SG))
+
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2.  What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE	128
+
+/*
+ * log of the size of each IO TLB slab.  The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
+#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+
+/*
+ * Minimum IO TLB size to bother booting with.  Systems with mainly
+ * 64bit capable cards will only lightly use the swiotlb.  If we can't
+ * allocate a contiguous 1MB, we're probably in trouble anyway.
+ */
+#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+
+/*
+ * Enumeration for sync targets
+ */
+enum dma_sync_target {
+	SYNC_FOR_CPU = 0,
+	SYNC_FOR_DEVICE = 1,
+};
+
+int swiotlb_force;
+
+/*
+ * Used to do a quick range check in swiotlb_unmap_single and
+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
+ */
+static char *io_tlb_start, *io_tlb_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long io_tlb_nslabs;
+
+/*
+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
+ */
+static unsigned long io_tlb_overflow = 32*1024;
+
+void *io_tlb_overflow_buffer;
+
+/*
+ * This is a free list describing the number of free entries available from
+ * each index
+ */
+static unsigned int *io_tlb_list;
+static unsigned int io_tlb_index;
+
+/*
+ * We need to save away the original address corresponding to a mapped entry
+ * for the sync operations.
+ */
+static unsigned char **io_tlb_orig_addr;
+
+/*
+ * Protect the above data structures in the map and unmap calls
+ */
+static DEFINE_SPINLOCK(io_tlb_lock);
+
+static int __init
+setup_io_tlb_npages(char *str)
+{
+	if (isdigit(*str)) {
+		io_tlb_nslabs = simple_strtoul(str, &str, 0);
+		/* avoid tail segment of size < IO_TLB_SEGSIZE */
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+	}
+	if (*str == ',')
+		++str;
+	if (!strcmp(str, "force"))
+		swiotlb_force = 1;
+	return 1;
+}
+__setup("swiotlb=", setup_io_tlb_npages);
+/* make io_tlb_overflow tunable too? */
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void
+swiotlb_init_with_default_size (size_t default_size)
+{
+	unsigned long i;
+
+	if (!io_tlb_nslabs) {
+		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+	}
+
+#ifdef CONFIG_XEN
+	if (is_running_on_xen())
+		io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
+#endif
+	/*
+	 * Get IO TLB memory from the low pages
+	 */
+	io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+	if (!io_tlb_start)
+		panic("Cannot allocate SWIOTLB buffer");
+	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+
+#ifdef CONFIG_XEN
+	for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
+		if (xen_create_contiguous_region(
+				(unsigned long)io_tlb_start +
+				(i << IO_TLB_SHIFT),
+				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
+				DMA_BITS))
+			panic("Failed to setup Xen contiguous region");
+	}
+#endif
+
+	/*
+	 * Allocate and initialize the free list array.  This array is used
+	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+	 * between io_tlb_start and io_tlb_end.
+	 */
+	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+	for (i = 0; i < io_tlb_nslabs; i++)
+ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+	io_tlb_index = 0;
+	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+
+	/*
+	 * Get the overflow emergency buffer
+	 */
+	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+#ifdef CONFIG_XEN
+	if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
+					 get_order(io_tlb_overflow), DMA_BITS))
+		panic("Failed to setup Xen contiguous region for overflow");
+#endif
+	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
+	       virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+}
+
+void
+swiotlb_init (void)
+{
+	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
+}
+
+/*
+ * Systems with larger DMA zones (those that don't support ISA) can
+ * initialize the swiotlb later using the slab allocator if needed.
+ * This should be just like above, but with some error catching.
+ */
+int
+swiotlb_late_init_with_default_size (size_t default_size)
+{
+	unsigned long i, req_nslabs = io_tlb_nslabs;
+	unsigned int order;
+
+	if (!io_tlb_nslabs) {
+		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+	}
+
+#ifdef CONFIG_XEN
+	if (is_running_on_xen())
+		io_tlb_nslabs = roundup_pow_of_two(io_tlb_nslabs);
+#endif
+	/*
+	 * Get IO TLB memory from the low pages
+	 */
+	order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+	io_tlb_nslabs = SLABS_PER_PAGE << order;
+
+	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+		io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+		                                        order);
+		if (io_tlb_start)
+			break;
+		order--;
+	}
+
+	if (!io_tlb_start)
+		goto cleanup1;
+
+	if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
+		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+		io_tlb_nslabs = SLABS_PER_PAGE << order;
+	}
+	io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+	memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+
+#ifdef CONFIG_XEN
+	for (i = 0 ; i < io_tlb_nslabs ; i += IO_TLB_SEGSIZE) {
+		if (xen_create_contiguous_region(
+				(unsigned long)io_tlb_start +
+				(i << IO_TLB_SHIFT),
+				get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
+				DMA_BITS))
+			panic("Failed to setup Xen contiguous region");
+	}
+#endif
+	/*
+	 * Allocate and initialize the free list array.  This array is used
+	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+	 * between io_tlb_start and io_tlb_end.
+	 */
+	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+	                              get_order(io_tlb_nslabs * sizeof(int)));
+	if (!io_tlb_list)
+		goto cleanup2;
+
+	for (i = 0; i < io_tlb_nslabs; i++)
+ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+	io_tlb_index = 0;
+
+	io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
+	                           get_order(io_tlb_nslabs * sizeof(char *)));
+	if (!io_tlb_orig_addr)
+		goto cleanup3;
+
+	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+
+	/*
+	 * Get the overflow emergency buffer
+	 */
+	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+	                                          get_order(io_tlb_overflow));
+	if (!io_tlb_overflow_buffer)
+		goto cleanup4;
+
+#ifdef CONFIG_XEN
+	if (xen_create_contiguous_region((unsigned long)io_tlb_overflow_buffer,
+					 get_order(io_tlb_overflow), DMA_BITS))
+		panic("Failed to setup Xen contiguous region for overflow");
+#endif
+	printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
+	       "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
+	       virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+
+	return 0;
+
+cleanup4:
+	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
+	                                                      sizeof(char *)));
+	io_tlb_orig_addr = NULL;
+cleanup3:
+	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+	                                                 sizeof(int)));
+	io_tlb_list = NULL;
+	io_tlb_end = NULL;
+cleanup2:
+	free_pages((unsigned long)io_tlb_start, order);
+	io_tlb_start = NULL;
+cleanup1:
+	io_tlb_nslabs = req_nslabs;
+	return -ENOMEM;
+}
+
+static inline int
+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+{
+	dma_addr_t mask = 0xffffffff;
+	/* If the device has a mask, use it, otherwise default to 32 bits */
+	if (hwdev && hwdev->dma_mask)
+		mask = *hwdev->dma_mask;
+	return (addr & ~mask) != 0;
+}
+
+/*
+ * Allocates bounce buffer and returns its kernel virtual address.
+ */
+static void *
+map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+{
+	unsigned long flags;
+	char *dma_addr;
+	unsigned int nslots, stride, index, wrap;
+	int i;
+
+	/*
+	 * For mappings greater than a page, we limit the stride (and
+	 * hence alignment) to a page size.
+	 */
+	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+	if (size > PAGE_SIZE)
+		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+	else
+		stride = 1;
+
+	BUG_ON(!nslots);
+
+	/*
+	 * Find suitable number of IO TLB entries size that will fit this
+	 * request and allocate a buffer from that IO TLB pool.
+	 */
+	spin_lock_irqsave(&io_tlb_lock, flags);
+	{
+		wrap = index = ALIGN(io_tlb_index, stride);
+
+		if (index >= io_tlb_nslabs)
+			wrap = index = 0;
+
+		do {
+			/*
+			 * If we find a slot that indicates we have 'nslots'
+			 * number of contiguous buffers, we allocate the
+			 * buffers from that slot and mark the entries as '0'
+			 * indicating unavailable.
+			 */
+			if (io_tlb_list[index] >= nslots) {
+				int count = 0;
+
+				for (i = index; i < (int) (index + nslots); i++)
+					io_tlb_list[i] = 0;
+				for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+					io_tlb_list[i] = ++count;
+				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+				/*
+				 * Update the indices to avoid searching in
+				 * the next round.
+				 */
+				io_tlb_index = ((index + nslots) < io_tlb_nslabs
+						? (index + nslots) : 0);
+
+				goto found;
+			}
+			index += stride;
+			if (index >= io_tlb_nslabs)
+				index = 0;
+		} while (index != wrap);
+
+		spin_unlock_irqrestore(&io_tlb_lock, flags);
+		return NULL;
+	}
+  found:
+	spin_unlock_irqrestore(&io_tlb_lock, flags);
+
+	/*
+	 * Save away the mapping from the original address to the DMA address.
+	 * This is needed when we sync the memory.  Then we sync the buffer if
+	 * needed.
+	 */
+	io_tlb_orig_addr[index] = buffer;
+	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+		memcpy(dma_addr, buffer, size);
+
+	return dma_addr;
+}
+
+/*
+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
+ */
+static void
+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+{
+	unsigned long flags;
+	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+	char *buffer = io_tlb_orig_addr[index];
+
+	/*
+	 * First, sync the memory before unmapping the entry
+	 */
+	if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+		/*
+		 * bounce... copy the data back into the original buffer * and
+		 * delete the bounce buffer.
+		 */
+		memcpy(buffer, dma_addr, size);
+
+	/*
+	 * Return the buffer to the free list by setting the corresponding
+	 * entries to indicate the number of contigous entries available.
+	 * While returning the entries to the free list, we merge the entries
+	 * with slots below and above the pool being returned.
+	 */
+	spin_lock_irqsave(&io_tlb_lock, flags);
+	{
+		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+			 io_tlb_list[index + nslots] : 0);
+		/*
+		 * Step 1: return the slots to the free list, merging the
+		 * slots with superceeding slots
+		 */
+		for (i = index + nslots - 1; i >= index; i--)
+			io_tlb_list[i] = ++count;
+		/*
+		 * Step 2: merge the returned slots with the preceding slots,
+		 * if available (non zero)
+		 */
+		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+			io_tlb_list[i] = ++count;
+	}
+	spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+static void
+sync_single(struct device *hwdev, char *dma_addr, size_t size,
+	    int dir, int target)
+{
+	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+	char *buffer = io_tlb_orig_addr[index];
+
+	switch (target) {
+	case SYNC_FOR_CPU:
+		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+			memcpy(buffer, dma_addr, size);
+		else
+			BUG_ON(dir != DMA_TO_DEVICE);
+		break;
+	case SYNC_FOR_DEVICE:
+		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+			memcpy(dma_addr, buffer, size);
+		else
+			BUG_ON(dir != DMA_FROM_DEVICE);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void *
+swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+		       dma_addr_t *dma_handle, gfp_t flags)
+{
+	unsigned long dev_addr;
+	void *ret;
+	int order = get_order(size);
+
+	/*
+	 * XXX fix me: the DMA API should pass us an explicit DMA mask
+	 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
+	 * bit range instead of a 16MB one).
+	 */
+	flags |= GFP_DMA;
+
+	ret = (void *)__get_free_pages(flags, order);
+#ifdef CONFIG_XEN
+	if (ret && is_running_on_xen()) {
+		if (xen_create_contiguous_region((unsigned long)ret, order,
+					fls64(hwdev->coherent_dma_mask))) {
+			free_pages((unsigned long)ret, order);
+			ret = NULL;
+		} else {
+			/*
+			 * Short circuit the rest, xen_create_contiguous_region
+			 * should fail if it didn't give us an address within
+			 * the mask requested.  
+			 */
+			memset(ret, 0, size);
+			*dma_handle = virt_to_bus(ret);
+			return ret;
+		}
+	}
+#endif
+	if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+		/*
+		 * The allocated memory isn't reachable by the device.
+		 * Fall back on swiotlb_map_single().
+		 */
+		free_pages((unsigned long) ret, order);
+		ret = NULL;
+	}
+	if (!ret) {
+		/*
+		 * We are either out of memory or the device can't DMA
+		 * to GFP_DMA memory; fall back on
+		 * swiotlb_map_single(), which will grab memory from
+		 * the lowest available address range.
+		 */
+		dma_addr_t handle;
+		handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
+		if (swiotlb_dma_mapping_error(handle))
+			return NULL;
+
+		ret = bus_to_virt(handle);
+	}
+
+	memset(ret, 0, size);
+	dev_addr = virt_to_bus(ret);
+
+	/* Confirm address can be DMA'd by device */
+	if (address_needs_mapping(hwdev, dev_addr)) {
+		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
+		       (unsigned long long)*hwdev->dma_mask, dev_addr);
+		panic("swiotlb_alloc_coherent: allocated memory is out of "
+		      "range for device");
+	}
+	*dma_handle = dev_addr;
+	return ret;
+}
+
+void
+swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+		      dma_addr_t dma_handle)
+{
+	if (!(vaddr >= (void *)io_tlb_start
+                    && vaddr < (void *)io_tlb_end)) {
+#ifdef CONFIG_XEN
+		xen_destroy_contiguous_region((unsigned long)vaddr,
+					      get_order(size));
+#endif
+		free_pages((unsigned long) vaddr, get_order(size));
+	} else
+		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+		swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+}
+
+static void
+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
+{
+	/*
+	 * Ran out of IOMMU space for this operation. This is very bad.
+	 * Unfortunately the drivers cannot handle this operation properly.
+	 * unless they check for dma_mapping_error (most don't)
+	 * When the mapping is small enough return a static buffer to limit
+	 * the damage, or panic when the transfer is too big.
+	 */
+	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
+	       "device %s\n", size, dev ? dev->bus_id : "?");
+
+	if (size > io_tlb_overflow && do_panic) {
+		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+			panic("DMA: Memory would be corrupted\n");
+		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+			panic("DMA: Random memory would be DMAed\n");
+	}
+}
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
+ * physical address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until
+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+{
+	unsigned long dev_addr = virt_to_bus(ptr);
+	void *map;
+
+	BUG_ON(dir == DMA_NONE);
+	/*
+	 * If the pointer passed in happens to be in the device's DMA window,
+	 * we can safely return the device addr and not worry about bounce
+	 * buffering it.
+	 */
+	if (!range_straddles_page_boundary(ptr, size) &&
+	    !address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+		return dev_addr;
+
+	/*
+	 * Oh well, have to allocate and map a bounce buffer.
+	 */
+	map = map_single(hwdev, ptr, size, dir);
+	if (!map) {
+		swiotlb_full(hwdev, size, dir, 1);
+		map = io_tlb_overflow_buffer;
+	}
+
+	dev_addr = virt_to_bus(map);
+
+	/*
+	 * Ensure that the address returned is DMA'ble
+	 */
+	if (address_needs_mapping(hwdev, dev_addr))
+		panic("map_single: bounce buffer is not DMA'ble");
+
+	return dev_addr;
+}
+
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+static void
+mark_clean(void *addr, size_t size)
+{
+	unsigned long pg_addr, end;
+
+#ifdef CONFIG_XEN
+	/* XXX: Bad things happen when starting domUs if this is enabled. */
+	if (is_running_on_xen())
+		return;
+#endif
+
+	pg_addr = PAGE_ALIGN((unsigned long) addr);
+	end = (unsigned long) addr + size;
+	while (pg_addr + PAGE_SIZE <= end) {
+		struct page *page = virt_to_page(pg_addr);
+		set_bit(PG_arch_1, &page->flags);
+		pg_addr += PAGE_SIZE;
+	}
+}
+
+/*
+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
+ * match what was provided for in a previous swiotlb_map_single call.  All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+		     int dir)
+{
+	char *dma_addr = bus_to_virt(dev_addr);
+
+	BUG_ON(dir == DMA_NONE);
+	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+		unmap_single(hwdev, dma_addr, size, dir);
+	else if (dir == DMA_FROM_DEVICE)
+		mark_clean(dma_addr, size);
+}
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
+ *
+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the dma mapping, you must
+ * call this function before doing so.  At the next point you give the dma
+ * address back to the card, you must first perform a
+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ */
+static inline void
+swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+		    size_t size, int dir, int target)
+{
+	char *dma_addr = bus_to_virt(dev_addr);
+
+	BUG_ON(dir == DMA_NONE);
+	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+		sync_single(hwdev, dma_addr, size, dir, target);
+	else if (dir == DMA_FROM_DEVICE)
+		mark_clean(dma_addr, size);
+}
+
+void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+			    size_t size, int dir)
+{
+	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+			       size_t size, int dir)
+{
+	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+/*
+ * Same as above, but for a sub-range of the mapping.
+ */
+static inline void
+swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
+			  unsigned long offset, size_t size,
+			  int dir, int target)
+{
+	char *dma_addr = bus_to_virt(dev_addr) + offset;
+
+	BUG_ON(dir == DMA_NONE);
+	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+		sync_single(hwdev, dma_addr, size, dir, target);
+	else if (dir == DMA_FROM_DEVICE)
+		mark_clean(dma_addr, size);
+}
+
+void
+swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+				  unsigned long offset, size_t size, int dir)
+{
+	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
+				  SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
+				     unsigned long offset, size_t size, int dir)
+{
+	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
+				  SYNC_FOR_DEVICE);
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above swiotlb_map_single
+ * interface.  Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length.  They are obtained via
+ * sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for swiotlb_map_single are the
+ * same here.
+ */
+int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+	       int dir)
+{
+	void *addr;
+	unsigned long dev_addr;
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for (i = 0; i < nelems; i++, sg++) {
+		addr = SG_ENT_VIRT_ADDRESS(sg);
+		dev_addr = virt_to_bus(addr);
+		if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+			void *map = map_single(hwdev, addr, sg->length, dir);
+			sg->dma_address = virt_to_bus(map);
+			if (!map) {
+				/* Don't panic here, we expect map_sg users
+				   to do proper error handling. */
+				swiotlb_full(hwdev, sg->length, dir, 0);
+				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
+				sg[0].dma_length = 0;
+				return 0;
+			}
+		} else
+			sg->dma_address = dev_addr;
+		sg->dma_length = sg->length;
+	}
+	return nelems;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_single() above.
+ */
+void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+		 int dir)
+{
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for (i = 0; i < nelems; i++, sg++)
+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+			unmap_single(hwdev, (void *) bus_to_virt(sg->dma_address), sg->dma_length, dir);
+		else if (dir == DMA_FROM_DEVICE)
+			mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
+ *
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
+ */
+static inline void
+swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
+		int nelems, int dir, int target)
+{
+	int i;
+
+	BUG_ON(dir == DMA_NONE);
+
+	for (i = 0; i < nelems; i++, sg++)
+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+			sync_single(hwdev, (void *) sg->dma_address,
+				    sg->dma_length, dir, target);
+}
+
+void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+			int nelems, int dir)
+{
+	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+			   int nelems, int dir)
+{
+	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+
+int
+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+{
+	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
+}
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
+ */
+int
+swiotlb_dma_supported (struct device *hwdev, u64 mask)
+{
+	return (virt_to_bus(io_tlb_end) - 1) <= mask;
+}
+
+EXPORT_SYMBOL(swiotlb_init);
+EXPORT_SYMBOL(swiotlb_map_single);
+EXPORT_SYMBOL(swiotlb_unmap_single);
+EXPORT_SYMBOL(swiotlb_map_sg);
+EXPORT_SYMBOL(swiotlb_unmap_sg);
+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
+EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
+EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
+EXPORT_SYMBOL(swiotlb_free_coherent);
+EXPORT_SYMBOL(swiotlb_dma_supported);
Index: latest/arch/ia64/xen/xen_dma.c
===================================================================
--- latest.orig/arch/ia64/xen/xen_dma.c
+++ latest/arch/ia64/xen/xen_dma.c
@@ -22,6 +22,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/bitops.h>
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <asm/scatterlist.h>
@@ -40,18 +41,18 @@ do {								\
  * when merged with upstream Linux.
  */
 static inline int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *dev, dma_addr_t addr)
 {
 	dma_addr_t mask = 0xffffffff;
 
 	/* If the device has a mask, use it, otherwise default to 32 bits */
-	if (hwdev && hwdev->dma_mask)
-		mask = *hwdev->dma_mask;
+	if (dev && dev->dma_mask)
+		mask = *dev->dma_mask;
 	return (addr & ~mask) != 0;
 }
 
 int
-xen_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+xen_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 	   int direction)
 {
 	int i;
@@ -60,7 +61,7 @@ xen_map_sg(struct device *hwdev, struct 
 		sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
 		sg[i].dma_length  = sg[i].length;
 
-		IOMMU_BUG_ON(address_needs_mapping(hwdev, sg[i].dma_address));
+		IOMMU_BUG_ON(address_needs_mapping(dev, sg[i].dma_address));
 	}
 
 	return nents;
@@ -68,7 +69,7 @@ xen_map_sg(struct device *hwdev, struct 
 EXPORT_SYMBOL(xen_map_sg);
 
 void
-xen_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+xen_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 	     int direction)
 {
 }
@@ -101,7 +102,7 @@ xen_alloc_coherent(struct device *dev, s
 		return NULL;
 
 	if (xen_create_contiguous_region(vaddr, order,
-					 dev->coherent_dma_mask)) {
+					 fls64(dev->coherent_dma_mask))) {
 		free_pages(vaddr, order);
 		return NULL;
 	}
Index: latest/drivers/xen/balloon/balloon.c
===================================================================
--- latest.orig/drivers/xen/balloon/balloon.c
+++ latest/drivers/xen/balloon/balloon.c
@@ -459,8 +459,10 @@ static struct notifier_block xenstore_no
 
 static int __init balloon_init(void)
 {
+#ifdef CONFIG_X86
 	unsigned long pfn;
 	struct page *page;
+#endif
 
 	if (!is_running_on_xen())
 		return -ENODEV;
@@ -489,12 +491,14 @@ static int __init balloon_init(void)
 	balloon_pde->write_proc = balloon_write;
 #endif
     
+#ifdef CONFIG_X86
 	/* Initialise the balloon with excess memory space. */
 	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
 		page = pfn_to_page(pfn);
 		if (!PageReserved(page))
 			balloon_append(page);
 	}
+#endif
 
 	target_watch.callback = watch_target;
 	xenstore_notifier.notifier_call = balloon_init_watcher;
Index: latest/include/asm-ia64/dma-mapping.h
===================================================================
--- latest.orig/include/asm-ia64/dma-mapping.h
+++ latest/include/asm-ia64/dma-mapping.h
@@ -7,8 +7,6 @@
  */
 #include <asm/machvec.h>
 
-#ifndef CONFIG_XEN
-
 #define dma_alloc_coherent      platform_dma_alloc_coherent
 #define dma_free_coherent       platform_dma_free_coherent
 #define dma_map_single          platform_dma_map_single
@@ -22,52 +20,6 @@
 #define dma_mapping_error       platform_dma_mapping_error
 
 
-#else /* CONFIG_XEN */
-/* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
-#include <asm/hypervisor.h>
-/* Needed for arch/i386/kernel/swiotlb.c */
-#include <asm/swiotlb.h>
-
-int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-               enum dma_data_direction direction);
-void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-                  enum dma_data_direction direction);
-int dma_supported(struct device *dev, u64 mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                         dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-                       dma_addr_t dma_handle);
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
-                          enum dma_data_direction direction);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                      enum dma_data_direction direction);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                             size_t size, enum dma_data_direction direction);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-                                size_t size,
-                                enum dma_data_direction direction);
-int dma_mapping_error(dma_addr_t dma_addr);
-
-#define flush_write_buffers()	do { } while (0)
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-                    enum dma_data_direction direction)
-{
-	if (swiotlb)
-		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
-	flush_write_buffers();
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-                       enum dma_data_direction direction)
-{
-	if (swiotlb)
-		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
-	flush_write_buffers();
-}
-#endif /* CONFIG_XEN */
-
 /* coherent mem. is cheap */
 static inline void *
 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
@@ -98,9 +50,7 @@ dma_free_noncoherent(struct device *dev,
 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)	\
 	dma_sync_single_for_device(dev, dma_handle, size, dir)
 
-#ifndef CONFIG_XEN
 #define dma_supported		platform_dma_supported
-#endif
 
 static inline int
 dma_set_mask (struct device *dev, u64 mask)
@@ -125,27 +75,16 @@ dma_cache_sync (void *vaddr, size_t size
 
 #define dma_is_consistent(dma_handle)	(1)	/* all we do is coherent memory... */
 
-#ifdef CONFIG_XEN
-/* arch/i386/kernel/swiotlb.o requires */
-void contiguous_bitmap_init(unsigned long end_pfn);
-
-static inline int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-{
-	dma_addr_t mask = DMA_64BIT_MASK;
-	/* If the device has a mask, use it, otherwise default to 64 bits */
-	if (hwdev && hwdev->dma_mask)
-		mask = *hwdev->dma_mask;
-	return (addr & ~mask) != 0;
-}
-#else
 #define contiguous_bitmap_init(end_pfn)	((void)end_pfn)
-#endif
 
 static inline int
 range_straddles_page_boundary(void *p, size_t size)
 {
 	extern unsigned long *contiguous_bitmap;
+
+	if (!is_running_on_xen())
+		return 0;
+
 	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
 	        !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
 }
Index: latest/include/asm-ia64/hypervisor.h
===================================================================
--- latest.orig/include/asm-ia64/hypervisor.h
+++ latest/include/asm-ia64/hypervisor.h
@@ -147,6 +147,7 @@ int privcmd_mmap(struct file * file, str
 #define pfn_pte_ma(_x,_y)	__pte_ma(0)     /* unmodified use */
 
 #ifndef CONFIG_VMX_GUEST
+void xen_contiguous_bitmap_init(unsigned long end_pfn);
 int __xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits);
 static inline int
 xen_create_contiguous_region(unsigned long vstart,
@@ -168,6 +169,9 @@ xen_destroy_contiguous_region(unsigned l
 		__xen_destroy_contiguous_region(vstart, order);
 }
 
+/* For drivers/xen/core/machine_reboot.c */
+#define HAVE_XEN_POST_SUSPEND
+void xen_post_suspend(int suspend_cancelled);
 #endif /* !CONFIG_VMX_GUEST */
 
 // for netfront.c, netback.c
Index: latest/include/asm-ia64/machvec_dig.h
===================================================================
--- latest.orig/include/asm-ia64/machvec_dig.h
+++ latest/include/asm-ia64/machvec_dig.h
@@ -13,19 +13,4 @@ extern ia64_mv_setup_t dig_setup;
 #define platform_name		"dig"
 #define platform_setup		dig_setup
 
-#ifdef CONFIG_XEN
-# define platform_dma_map_sg		dma_map_sg
-# define platform_dma_unmap_sg		dma_unmap_sg
-# define platform_dma_mapping_error	dma_mapping_error
-# define platform_dma_supported		dma_supported
-# define platform_dma_alloc_coherent	dma_alloc_coherent
-# define platform_dma_free_coherent	dma_free_coherent
-# define platform_dma_map_single	dma_map_single
-# define platform_dma_unmap_single	dma_unmap_single
-# define platform_dma_sync_single_for_cpu \
-					dma_sync_single_for_cpu
-# define platform_dma_sync_single_for_device \
-					dma_sync_single_for_device
-#endif
-
 #endif /* _ASM_IA64_MACHVEC_DIG_h */
Index: latest/lib/Makefile
===================================================================
--- latest.orig/lib/Makefile
+++ latest/lib/Makefile
@@ -54,7 +54,12 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 
 obj-$(CONFIG_SWIOTLB) += swiotlb.o
+ifeq ($(CONFIG_X86),y)
 swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
+endif
+ifeq ($(CONFIG_IA64),y)
+swiotlb-$(CONFIG_XEN) := ../arch/ia64/xen/swiotlb.o
+endif
 
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h