Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2055

kernel-2.6.18-128.1.10.el5.src.rpm

From: Jeff Garzik <jgarzik@redhat.com>
Subject: [RHEL5 PATCH] SATA super-jumbo update
Date: Wed, 6 Jun 2007 10:25:32 -0400
Bugzilla: 203781
Message-Id: <20070606142532.GD5997@devserv.devel.redhat.com>
Changelog: [sata] super-jumbo update



Update SATA to latest upstream as of 2.6.22-rc3.

Changes since last posting:
* fixed Intel combined mode symbology in ata/Kconfig


Below is the config I stuck into config-generic, after removing all the
CONFIG_SCSI_SATA* and CONFIG_SCSI_PDC_ADMA symbols, for the all-arch
test build I did last night:
http://brewweb.devel.redhat.com/brew/taskinfo?taskID=809438

Alan will probably suggest some tweaks for it, based on some of the work
he is doing.

CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
CONFIG_ATA_ACPI=y
CONFIG_SATA_AHCI=m
CONFIG_SATA_SVW=m
CONFIG_ATA_PIIX=m
CONFIG_SATA_MV=m
CONFIG_SATA_NV=m
CONFIG_PDC_ADMA=m
CONFIG_SATA_QSTOR=m
CONFIG_SATA_PROMISE=m
CONFIG_SATA_SX4=m
CONFIG_SATA_SIL=m
CONFIG_SATA_SIL24=m
CONFIG_SATA_SIS=m
CONFIG_SATA_ULI=m
CONFIG_SATA_VIA=m
CONFIG_SATA_VITESSE=m
CONFIG_SATA_INIC162X=m
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
# CONFIG_PATA_CS5520 is not set
# CONFIG_PATA_CS5530 is not set
# CONFIG_PATA_CYPRESS is not set
# CONFIG_PATA_EFAR is not set
# CONFIG_ATA_GENERIC is not set
# CONFIG_PATA_HPT366 is not set
# CONFIG_PATA_HPT37X is not set
# CONFIG_PATA_HPT3X2N is not set
# CONFIG_PATA_HPT3X3 is not set
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
# CONFIG_PATA_TRIFLEX is not set
CONFIG_PATA_MARVELL=m
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
# CONFIG_PATA_NS87410 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PCMCIA is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
CONFIG_PATA_SIS=m
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
CONFIG_ATA_INTEL_COMBINED=y

diff -urN linux-2.6.18.x86_64.p3/drivers/ata/ahci.c linux-2.6.18.x86_64.p4/drivers/ata/ahci.c
--- linux-2.6.18.x86_64.p3/drivers/ata/ahci.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/ahci.c	2007-06-06 10:08:00.000000000 -0400
@@ -39,20 +39,19 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/dma-mapping.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 
 #define DRV_NAME	"ahci"
-#define DRV_VERSION	"2.0"
+#define DRV_VERSION	"2.2"
 
 
 enum {
 	AHCI_PCI_BAR		= 5,
+	AHCI_MAX_PORTS		= 32,
 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
 	AHCI_DMA_BOUNDARY	= 0xffffffff,
 	AHCI_USE_CLUSTERING	= 0,
@@ -74,11 +73,14 @@
 	AHCI_CMD_CLR_BUSY	= (1 << 10),
 
 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
+	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
 
 	board_ahci		= 0,
-	board_ahci_vt8251	= 1,
-	board_ahci_sb600	= 2,
+	board_ahci_pi		= 1,
+	board_ahci_vt8251	= 2,
+	board_ahci_ign_iferr	= 3,
+	board_ahci_sb600	= 4,
 
 	/* global controller registers */
 	HOST_CAP		= 0x00, /* host capabilities */
@@ -163,13 +165,17 @@
 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
 
-	/* hpriv->flags bits */
-	AHCI_FLAG_MSI		= (1 << 0),
-
 	/* ap->flags bits */
-	AHCI_FLAG_RESET_NEEDS_CLO	= (1 << 24),
-	AHCI_FLAG_NO_NCQ		= (1 << 25),
+	AHCI_FLAG_NO_NCQ		= (1 << 24),
+	AHCI_FLAG_IGN_IRQ_IF_ERR	= (1 << 25), /* ignore IRQ_IF_ERR */
+	AHCI_FLAG_HONOR_PI		= (1 << 26), /* honor PORTS_IMPL */
 	AHCI_FLAG_IGN_SERR_INTERNAL	= (1 << 27), /* ignore SERR_INTERNAL */
+	AHCI_FLAG_32BIT_ONLY		= (1 << 28), /* force 32bit */
+
+	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+					  ATA_FLAG_SKIP_D2H_BSY |
+					  ATA_FLAG_ACPI_SATA,
 };
 
 struct ahci_cmd_hdr {
@@ -188,9 +194,10 @@
 };
 
 struct ahci_host_priv {
-	unsigned long		flags;
-	u32			cap;	/* cache of HOST_CAP register */
-	u32			port_map; /* cache of HOST_PORTS_IMPL reg */
+	u32			cap;		/* cap to use */
+	u32			port_map;	/* port map to use */
+	u32			saved_cap;	/* saved initial cap */
+	u32			saved_port_map;	/* saved initial port_map */
 };
 
 struct ahci_port_priv {
@@ -200,13 +207,16 @@
 	dma_addr_t		cmd_tbl_dma;
 	void			*rx_fis;
 	dma_addr_t		rx_fis_dma;
+	/* for NCQ spurious interrupt analysis */
+	unsigned int		ncq_saw_d2h:1;
+	unsigned int		ncq_saw_dmas:1;
+	unsigned int		ncq_saw_sdb:1;
 };
 
 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
 static void ahci_irq_clear(struct ata_port *ap);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
@@ -216,12 +226,14 @@
 static void ahci_freeze(struct ata_port *ap);
 static void ahci_thaw(struct ata_port *ap);
 static void ahci_error_handler(struct ata_port *ap);
+static void ahci_vt8251_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+#ifdef CONFIG_PM
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
 static int ahci_port_resume(struct ata_port *ap);
 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int ahci_pci_device_resume(struct pci_dev *pdev);
-static void ahci_remove_one (struct pci_dev *pdev);
+#endif
 
 static struct scsi_host_template ahci_sht = {
 	.module			= THIS_MODULE,
@@ -240,8 +252,6 @@
 	.slave_configure	= ata_scsi_slave_config,
 	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
-	.suspend		= ata_scsi_device_suspend,
-	.resume			= ata_scsi_device_resume,
 };
 
 static const struct ata_port_operations ahci_ops = {
@@ -256,8 +266,9 @@
 	.qc_prep		= ahci_qc_prep,
 	.qc_issue		= ahci_qc_issue,
 
-	.irq_handler		= ahci_interrupt,
 	.irq_clear		= ahci_irq_clear,
+	.irq_on			= ata_dummy_irq_on,
+	.irq_ack		= ata_dummy_irq_ack,
 
 	.scr_read		= ahci_scr_read,
 	.scr_write		= ahci_scr_write,
@@ -268,8 +279,44 @@
 	.error_handler		= ahci_error_handler,
 	.post_internal_cmd	= ahci_post_internal_cmd,
 
+#ifdef CONFIG_PM
+	.port_suspend		= ahci_port_suspend,
+	.port_resume		= ahci_port_resume,
+#endif
+
+	.port_start		= ahci_port_start,
+	.port_stop		= ahci_port_stop,
+};
+
+static const struct ata_port_operations ahci_vt8251_ops = {
+	.port_disable		= ata_port_disable,
+
+	.check_status		= ahci_check_status,
+	.check_altstatus	= ahci_check_status,
+	.dev_select		= ata_noop_dev_select,
+
+	.tf_read		= ahci_tf_read,
+
+	.qc_prep		= ahci_qc_prep,
+	.qc_issue		= ahci_qc_issue,
+
+	.irq_clear		= ahci_irq_clear,
+	.irq_on			= ata_dummy_irq_on,
+	.irq_ack		= ata_dummy_irq_ack,
+
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
+
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+
+	.error_handler		= ahci_vt8251_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+
+#ifdef CONFIG_PM
 	.port_suspend		= ahci_port_suspend,
 	.port_resume		= ahci_port_resume,
+#endif
 
 	.port_start		= ahci_port_start,
 	.port_stop		= ahci_port_stop,
@@ -278,131 +325,116 @@
 static const struct ata_port_info ahci_port_info[] = {
 	/* board_ahci */
 	{
-		.sht		= &ahci_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-				  ATA_FLAG_SKIP_D2H_BSY,
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &ahci_ops,
+	},
+	/* board_ahci_pi */
+	{
+		.flags		= AHCI_FLAG_COMMON | AHCI_FLAG_HONOR_PI,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &ahci_ops,
 	},
 	/* board_ahci_vt8251 */
 	{
-		.sht		= &ahci_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-				  ATA_FLAG_SKIP_D2H_BSY |
-				  AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
+		.flags		= AHCI_FLAG_COMMON | ATA_FLAG_HRST_TO_RESUME |
+				  AHCI_FLAG_NO_NCQ,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &ahci_vt8251_ops,
+	},
+	/* board_ahci_ign_iferr */
+	{
+		.flags		= AHCI_FLAG_COMMON | AHCI_FLAG_IGN_IRQ_IF_ERR,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &ahci_ops,
 	},
 	/* board_ahci_sb600 */
 	{
-		.sht		= &ahci_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-				  ATA_FLAG_SKIP_D2H_BSY |
-				  AHCI_FLAG_IGN_SERR_INTERNAL,
+		.flags		= AHCI_FLAG_COMMON |
+				  AHCI_FLAG_IGN_SERR_INTERNAL |
+				  AHCI_FLAG_32BIT_ONLY,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &ahci_ops,
 	},
-
 };
 
 static const struct pci_device_id ahci_pci_tbl[] = {
 	/* Intel */
-	{ PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH6 */
-	{ PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH6M */
-	{ PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH7 */
-	{ PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH7M */
-	{ PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH7R */
-	{ PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ULi M5288 */
-	{ PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ESB2 */
-	{ PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ESB2 */
-	{ PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ESB2 */
-	{ PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH7-M DH */
-	{ PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH8 */
-	{ PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH8 */
-	{ PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH8 */
-	{ PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH8M */
-	{ PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH8M */
-	{ PCI_VENDOR_ID_INTEL, 0x2922, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */
-	{ PCI_VENDOR_ID_INTEL, 0x2923, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */	
-	{ PCI_VENDOR_ID_INTEL, 0x2924, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */	
-	{ PCI_VENDOR_ID_INTEL, 0x2925, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */	
-	{ PCI_VENDOR_ID_INTEL, 0x2927, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */	
-	{ PCI_VENDOR_ID_INTEL, 0x2929, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */	
-	{ PCI_VENDOR_ID_INTEL, 0x292a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */	
-	{ PCI_VENDOR_ID_INTEL, 0x292b, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */	
-	{ PCI_VENDOR_ID_INTEL, 0x292c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */	
-	{ PCI_VENDOR_ID_INTEL, 0x292f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */	
-	{ PCI_VENDOR_ID_INTEL, 0x294d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9 */	
-	{ PCI_VENDOR_ID_INTEL, 0x294e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ICH9M */
-
-	/* JMicron */
-	{ 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* JMicron JMB360 */
-	{ 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* JMicron JMB361 */
-	{ 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* JMicron JMB363 */
-	{ 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* JMicron JMB365 */
-	{ 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* JMicron JMB366 */
+	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
+	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
+	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
+	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
+	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
+	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
+	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
+	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
+
+	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
 
 	/* ATI */
-	{ PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci_sb600 }, /* ATI SB600 non-raid */
-	{ PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci }, /* ATI SB600 raid */
+	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
+	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */
 
 	/* VIA */
-	{ PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci_vt8251 }, /* VIA VT8251 */
+	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
+	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
 
 	/* NVIDIA */
-	{ PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci },		/* MCP65 */
-	{ PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci },		/* MCP65 */
-	{ PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci },		/* MCP65 */
-	{ PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci },		/* MCP65 */
+	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci },		/* MCP67 */
+
+	/* SiS */
+	{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
+	{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
+	{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
 
 	/* Generic, PCI class code for AHCI */
 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
-	  0x010601, 0xffffff, board_ahci },
+	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
 
 	{ }	/* terminate list */
 };
@@ -412,20 +444,120 @@
 	.name			= DRV_NAME,
 	.id_table		= ahci_pci_tbl,
 	.probe			= ahci_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
 	.suspend		= ahci_pci_device_suspend,
 	.resume			= ahci_pci_device_resume,
-	.remove			= ahci_remove_one,
+#endif
 };
 
 
-static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
+static inline int ahci_nr_ports(u32 cap)
 {
-	return base + 0x100 + (port * 0x80);
+	return (cap & 0x1f) + 1;
 }
 
-static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
 {
-	return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
+	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+
+	return mmio + 0x100 + (ap->port_no * 0x80);
+}
+
+/**
+ *	ahci_save_initial_config - Save and fixup initial config values
+ *	@pdev: target PCI device
+ *	@pi: associated ATA port info
+ *	@hpriv: host private area to store config values
+ *
+ *	Some registers containing configuration info might be setup by
+ *	BIOS and might be cleared on reset.  This function saves the
+ *	initial values of those registers into @hpriv such that they
+ *	can be restored after controller reset.
+ *
+ *	If inconsistent, config values are fixed up by this function.
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ahci_save_initial_config(struct pci_dev *pdev,
+				     const struct ata_port_info *pi,
+				     struct ahci_host_priv *hpriv)
+{
+	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+	u32 cap, port_map;
+	int i;
+
+	/* Values prefixed with saved_ are written back to host after
+	 * reset.  Values without are used for driver operation.
+	 */
+	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+	/* some chips lie about 64bit support */
+	if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) {
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "controller can't do 64bit DMA, forcing 32bit\n");
+		cap &= ~HOST_CAP_64;
+	}
+
+	/* fixup zero port_map */
+	if (!port_map) {
+		port_map = (1 << ahci_nr_ports(hpriv->cap)) - 1;
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "PORTS_IMPL is zero, forcing 0x%x\n", port_map);
+
+		/* write the fixed up value to the PI register */
+		hpriv->saved_port_map = port_map;
+	}
+
+	/* cross check port_map and cap.n_ports */
+	if (pi->flags & AHCI_FLAG_HONOR_PI) {
+		u32 tmp_port_map = port_map;
+		int n_ports = ahci_nr_ports(cap);
+
+		for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
+			if (tmp_port_map & (1 << i)) {
+				n_ports--;
+				tmp_port_map &= ~(1 << i);
+			}
+		}
+
+		/* Whine if inconsistent.  No need to update cap.
+		 * port_map is used to determine number of ports.
+		 */
+		if (n_ports || tmp_port_map)
+			dev_printk(KERN_WARNING, &pdev->dev,
+				   "nr_ports (%u) and implemented port map "
+				   "(0x%x) don't match\n",
+				   ahci_nr_ports(cap), port_map);
+	} else {
+		/* fabricate port_map from cap.nr_ports */
+		port_map = (1 << ahci_nr_ports(cap)) - 1;
+	}
+
+	/* record values to use during operation */
+	hpriv->cap = cap;
+	hpriv->port_map = port_map;
+}
+
+/**
+ *	ahci_restore_initial_config - Restore initial config
+ *	@host: target ATA host
+ *
+ *	Restore initial config stored by ahci_save_initial_config().
+ *
+ *	LOCKING:
+ *	None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+
+	writel(hpriv->saved_cap, mmio + HOST_CAP);
+	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
 }
 
 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
@@ -441,7 +573,7 @@
 		return 0xffffffffU;
 	}
 
-	return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -459,11 +591,12 @@
 		return;
 	}
 
-	writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
-static void ahci_start_engine(void __iomem *port_mmio)
+static void ahci_start_engine(struct ata_port *ap)
 {
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 tmp;
 
 	/* start DMA */
@@ -473,8 +606,9 @@
 	readl(port_mmio + PORT_CMD); /* flush */
 }
 
-static int ahci_stop_engine(void __iomem *port_mmio)
+static int ahci_stop_engine(struct ata_port *ap)
 {
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 tmp;
 
 	tmp = readl(port_mmio + PORT_CMD);
@@ -496,19 +630,23 @@
 	return 0;
 }
 
-static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
-			      dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
+static void ahci_start_fis_rx(struct ata_port *ap)
 {
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
 	u32 tmp;
 
 	/* set FIS registers */
-	if (cap & HOST_CAP_64)
-		writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
-	writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
-
-	if (cap & HOST_CAP_64)
-		writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
-	writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+	if (hpriv->cap & HOST_CAP_64)
+		writel((pp->cmd_slot_dma >> 16) >> 16,
+		       port_mmio + PORT_LST_ADDR_HI);
+	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+	if (hpriv->cap & HOST_CAP_64)
+		writel((pp->rx_fis_dma >> 16) >> 16,
+		       port_mmio + PORT_FIS_ADDR_HI);
+	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
 
 	/* enable FIS reception */
 	tmp = readl(port_mmio + PORT_CMD);
@@ -519,8 +657,9 @@
 	readl(port_mmio + PORT_CMD);
 }
 
-static int ahci_stop_fis_rx(void __iomem *port_mmio)
+static int ahci_stop_fis_rx(struct ata_port *ap)
 {
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 tmp;
 
 	/* disable FIS reception */
@@ -537,14 +676,16 @@
 	return 0;
 }
 
-static void ahci_power_up(void __iomem *port_mmio, u32 cap)
+static void ahci_power_up(struct ata_port *ap)
 {
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 cmd;
 
 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
 
 	/* spin up device */
-	if (cap & HOST_CAP_SSS) {
+	if (hpriv->cap & HOST_CAP_SSS) {
 		cmd |= PORT_CMD_SPIN_UP;
 		writel(cmd, port_mmio + PORT_CMD);
 	}
@@ -553,85 +694,63 @@
 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
 }
 
-static void ahci_power_down(void __iomem *port_mmio, u32 cap)
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
 {
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 cmd, scontrol;
 
-	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
-
-	if (cap & HOST_CAP_SSC) {
-		/* enable transitions to slumber mode */
-		scontrol = readl(port_mmio + PORT_SCR_CTL);
-		if ((scontrol & 0x0f00) > 0x100) {
-			scontrol &= ~0xf00;
-			writel(scontrol, port_mmio + PORT_SCR_CTL);
-		}
-
-		/* put device into slumber mode */
-		writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
+	if (!(hpriv->cap & HOST_CAP_SSS))
+		return;
 
-		/* wait for the transition to complete */
-		ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
-				  PORT_CMD_ICC_SLUMBER, 1, 50);
-	}
-
-	/* put device into listen mode */
-	if (cap & HOST_CAP_SSS) {
-		/* first set PxSCTL.DET to 0 */
-		scontrol = readl(port_mmio + PORT_SCR_CTL);
-		scontrol &= ~0xf;
-		writel(scontrol, port_mmio + PORT_SCR_CTL);
+	/* put device into listen mode, first set PxSCTL.DET to 0 */
+	scontrol = readl(port_mmio + PORT_SCR_CTL);
+	scontrol &= ~0xf;
+	writel(scontrol, port_mmio + PORT_SCR_CTL);
 
-		/* then set PxCMD.SUD to 0 */
-		cmd &= ~PORT_CMD_SPIN_UP;
-		writel(cmd, port_mmio + PORT_CMD);
-	}
+	/* then set PxCMD.SUD to 0 */
+	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+	cmd &= ~PORT_CMD_SPIN_UP;
+	writel(cmd, port_mmio + PORT_CMD);
 }
+#endif
 
-static void ahci_init_port(void __iomem *port_mmio, u32 cap,
-			   dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
+static void ahci_init_port(struct ata_port *ap)
 {
-	/* power up */
-	ahci_power_up(port_mmio, cap);
-
 	/* enable FIS reception */
-	ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
+	ahci_start_fis_rx(ap);
 
 	/* enable DMA */
-	ahci_start_engine(port_mmio);
+	ahci_start_engine(ap);
 }
 
-static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
 {
 	int rc;
 
 	/* disable DMA */
-	rc = ahci_stop_engine(port_mmio);
+	rc = ahci_stop_engine(ap);
 	if (rc) {
 		*emsg = "failed to stop engine";
 		return rc;
 	}
 
 	/* disable FIS reception */
-	rc = ahci_stop_fis_rx(port_mmio);
+	rc = ahci_stop_fis_rx(ap);
 	if (rc) {
 		*emsg = "failed stop FIS RX";
 		return rc;
 	}
 
-	/* put device into slumber mode */
-	ahci_power_down(port_mmio, cap);
-
 	return 0;
 }
 
-static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
+static int ahci_reset_controller(struct ata_host *host)
 {
-	u32 cap_save, tmp;
-
-	cap_save = readl(mmio + HOST_CAP);
-	cap_save &= ( (1<<28) | (1<<17) );
-	cap_save |= (1 << 27);
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+	u32 tmp;
 
 	/* global controller reset */
 	tmp = readl(mmio + HOST_CTL);
@@ -647,16 +766,17 @@
 
 	tmp = readl(mmio + HOST_CTL);
 	if (tmp & HOST_RESET) {
-		dev_printk(KERN_ERR, &pdev->dev,
+		dev_printk(KERN_ERR, host->dev,
 			   "controller reset failed (0x%x)\n", tmp);
 		return -EIO;
 	}
 
+	/* turn on AHCI mode */
 	writel(HOST_AHCI_EN, mmio + HOST_CTL);
 	(void) readl(mmio + HOST_CTL);	/* flush */
-	writel(cap_save, mmio + HOST_CAP);
-	writel(0xf, mmio + HOST_PORTS_IMPL);
-	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
+
+	/* some registers might be cleared on reset.  restore initial values */
+	ahci_restore_initial_config(host);
 
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
 		u16 tmp16;
@@ -670,23 +790,23 @@
 	return 0;
 }
 
-static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
-				 int n_ports, u32 cap)
+static void ahci_init_controller(struct ata_host *host)
 {
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
 	int i, rc;
 	u32 tmp;
 
-	for (i = 0; i < n_ports; i++) {
-		void __iomem *port_mmio = ahci_port_base(mmio, i);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *port_mmio = ahci_port_base(ap);
 		const char *emsg = NULL;
 
-#if 0 /* BIOSen initialize this incorrectly */
-		if (!(hpriv->port_map & (1 << i)))
+		if (ata_port_is_dummy(ap))
 			continue;
-#endif
 
 		/* make sure port is not active */
-		rc = ahci_deinit_port(port_mmio, cap, &emsg);
+		rc = ahci_deinit_port(ap, &emsg);
 		if (rc)
 			dev_printk(KERN_WARNING, &pdev->dev,
 				   "%s (%d)\n", emsg, rc);
@@ -696,14 +816,13 @@
 		VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
 		writel(tmp, port_mmio + PORT_SCR_ERR);
 
-		/* clear & turn off port IRQ */
+		/* clear port IRQ */
 		tmp = readl(port_mmio + PORT_IRQ_STAT);
 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
 		if (tmp)
 			writel(tmp, port_mmio + PORT_IRQ_STAT);
 
 		writel(1 << i, mmio + HOST_IRQ_STAT);
-		writel(0, port_mmio + PORT_IRQ_MASK);
 	}
 
 	tmp = readl(mmio + HOST_CTL);
@@ -715,7 +834,7 @@
 
 static unsigned int ahci_dev_classify(struct ata_port *ap)
 {
-	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	struct ata_taskfile tf;
 	u32 tmp;
 
@@ -743,8 +862,8 @@
 
 static int ahci_clo(struct ata_port *ap)
 {
-	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
-	struct ahci_host_priv *hpriv = ap->host_set->private_data;
+	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
 	u32 tmp;
 
 	if (!(hpriv->cap & HOST_CAP_CLO))
@@ -762,22 +881,11 @@
 	return 0;
 }
 
-static int ahci_prereset(struct ata_port *ap)
-{
-	if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
-	    (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
-		/* ATA_BUSY hasn't cleared, so send a CLO */
-		ahci_clo(ap);
-	}
-
-	return ata_std_prereset(ap);
-}
-
-static int ahci_softreset(struct ata_port *ap, unsigned int *class)
+static int ahci_softreset(struct ata_port *ap, unsigned int *class,
+			  unsigned long deadline)
 {
 	struct ahci_port_priv *pp = ap->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *port_mmio = ahci_port_base(ap);
 	const u32 cmd_fis_len = 5; /* five dwords */
 	const char *reason = NULL;
 	struct ata_taskfile tf;
@@ -794,15 +902,14 @@
 	}
 
 	/* prepare for SRST (AHCI-1.1 10.4.1) */
-	rc = ahci_stop_engine(port_mmio);
+	rc = ahci_stop_engine(ap);
 	if (rc) {
 		reason = "failed to stop engine";
 		goto fail_restart;
 	}
 
 	/* check BUSY/DRQ, perform Command List Override if necessary */
-	ahci_tf_read(ap, &tf);
-	if (tf.command & (ATA_BUSY | ATA_DRQ)) {
+	if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
 		rc = ahci_clo(ap);
 
 		if (rc == -EOPNOTSUPP) {
@@ -815,7 +922,7 @@
 	}
 
 	/* restart engine */
-	ahci_start_engine(port_mmio);
+	ahci_start_engine(ap);
 
 	ata_tf_init(ap->device, &tf);
 	fis = pp->cmd_tbl;
@@ -830,7 +937,7 @@
 
 	writel(1, port_mmio + PORT_CMD_ISSUE);
 
-	tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 2500);
+	tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
 	if (tmp & 0x1) {
 		rc = -EIO;
 		reason = "1st FIS failed";
@@ -860,47 +967,44 @@
 	 */
 	msleep(150);
 
-	*class = ATA_DEV_NONE;
-	if (ata_port_online(ap)) {
-		if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
-			rc = -EIO;
-			reason = "device not ready";
-			goto fail;
-		}
-		*class = ahci_dev_classify(ap);
+	rc = ata_wait_ready(ap, deadline);
+	/* link occupied, -ENODEV too is an error */
+	if (rc) {
+		reason = "device not ready";
+		goto fail;
 	}
+	*class = ahci_dev_classify(ap);
 
 	DPRINTK("EXIT, class=%u\n", *class);
 	return 0;
 
  fail_restart:
-	ahci_start_engine(port_mmio);
+	ahci_start_engine(ap);
  fail:
 	ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
 	return rc;
 }
 
-static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
+static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
+			  unsigned long deadline)
 {
 	struct ahci_port_priv *pp = ap->private_data;
 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
 	struct ata_taskfile tf;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	int rc;
 
 	DPRINTK("ENTER\n");
 
-	ahci_stop_engine(port_mmio);
+	ahci_stop_engine(ap);
 
 	/* clear D2H reception area to properly wait for D2H FIS */
 	ata_tf_init(ap->device, &tf);
-	tf.command = 0xff;
+	tf.command = 0x80;
 	ata_tf_to_fis(&tf, d2h_fis, 0);
 
-	rc = sata_std_hardreset(ap, class);
+	rc = sata_std_hardreset(ap, class, deadline);
 
-	ahci_start_engine(port_mmio);
+	ahci_start_engine(ap);
 
 	if (rc == 0 && ata_port_online(ap))
 		*class = ahci_dev_classify(ap);
@@ -911,9 +1015,34 @@
 	return rc;
 }
 
+static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
+				 unsigned long deadline)
+{
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	ahci_stop_engine(ap);
+
+	rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context),
+				 deadline);
+
+	/* vt8251 needs SError cleared for the port to operate */
+	ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
+
+	ahci_start_engine(ap);
+
+	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+	/* vt8251 doesn't clear BSY on signature FIS reception,
+	 * request follow-up softreset.
+	 */
+	return rc ?: -EAGAIN;
+}
+
 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
 {
-	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 new_tmp, tmp;
 
 	ata_std_postreset(ap, class);
@@ -932,7 +1061,7 @@
 
 static u8 ahci_check_status(struct ata_port *ap)
 {
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *mmio = ap->ioaddr.cmd_addr;
 
 	return readl(mmio + PORT_TFDATA) & 0xFF;
 }
@@ -1027,6 +1156,10 @@
 	/* analyze @irq_stat */
 	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
 
+	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
+	if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
+		irq_stat &= ~PORT_IRQ_IF_ERR;
+
 	if (irq_stat & PORT_IRQ_TF_ERR) {
 		err_mask |= AC_ERR_DEV;
 		if (ap->flags & AHCI_FLAG_IGN_SERR_INTERNAL)
@@ -1077,11 +1210,11 @@
 
 static void ahci_host_intr(struct ata_port *ap)
 {
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
 	struct ata_eh_info *ehi = &ap->eh_info;
+	struct ahci_port_priv *pp = ap->private_data;
 	u32 status, qc_active;
-	int rc;
+	int rc, known_irq = 0;
 
 	status = readl(port_mmio + PORT_IRQ_STAT);
 	writel(status, port_mmio + PORT_IRQ_STAT);
@@ -1108,17 +1241,62 @@
 
 	/* hmmm... a spurious interupt */
 
-	/* some devices send D2H reg with I bit set during NCQ command phase */
-	if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
+	/* if !NCQ, ignore.  No modern ATA device has broken HSM
+	 * implementation for non-NCQ commands.
+	 */
+	if (!ap->sactive)
 		return;
 
-	/* ignore interim PIO setup fis interrupts */
-	if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS)) 
-		return;
+	if (status & PORT_IRQ_D2H_REG_FIS) {
+		if (!pp->ncq_saw_d2h)
+			ata_port_printk(ap, KERN_INFO,
+				"D2H reg with I during NCQ, "
+				"this message won't be printed again\n");
+		pp->ncq_saw_d2h = 1;
+		known_irq = 1;
+	}
+
+	if (status & PORT_IRQ_DMAS_FIS) {
+		if (!pp->ncq_saw_dmas)
+			ata_port_printk(ap, KERN_INFO,
+				"DMAS FIS during NCQ, "
+				"this message won't be printed again\n");
+		pp->ncq_saw_dmas = 1;
+		known_irq = 1;
+	}
+
+	if (status & PORT_IRQ_SDB_FIS) {
+		const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+
+		if (le32_to_cpu(f[1])) {
+			/* SDB FIS containing spurious completions
+			 * might be dangerous, whine and fail commands
+			 * with HSM violation.  EH will turn off NCQ
+			 * after several such failures.
+			 */
+			ata_ehi_push_desc(ehi,
+				"spurious completions during NCQ "
+				"issue=0x%x SAct=0x%x FIS=%08x:%08x",
+				readl(port_mmio + PORT_CMD_ISSUE),
+				readl(port_mmio + PORT_SCR_ACT),
+				le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+			ehi->err_mask |= AC_ERR_HSM;
+			ehi->action |= ATA_EH_SOFTRESET;
+			ata_port_freeze(ap);
+		} else {
+			if (!pp->ncq_saw_sdb)
+				ata_port_printk(ap, KERN_INFO,
+					"spurious SDB FIS %08x:%08x during NCQ, "
+					"this message won't be printed again\n",
+					le32_to_cpu(f[0]), le32_to_cpu(f[1]));
+			pp->ncq_saw_sdb = 1;
+		}
+		known_irq = 1;
+	}
 
-	if (ata_ratelimit())
+	if (!known_irq)
 		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
-				"(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
+				"(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
 				status, ap->active_tag, ap->sactive);
 }
 
@@ -1127,9 +1305,9 @@
 	/* TODO */
 }
 
-static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	struct ahci_host_priv *hpriv;
 	unsigned int i, handled = 0;
 	void __iomem *mmio;
@@ -1137,8 +1315,8 @@
 
 	VPRINTK("ENTER\n");
 
-	hpriv = host_set->private_data;
-	mmio = host_set->mmio_base;
+	hpriv = host->private_data;
+	mmio = host->iomap[AHCI_PCI_BAR];
 
 	/* sigh.  0xffffffff is a valid return from h/w */
 	irq_stat = readl(mmio + HOST_IRQ_STAT);
@@ -1146,22 +1324,22 @@
 	if (!irq_stat)
 		return IRQ_NONE;
 
-        spin_lock(&host_set->lock);
+        spin_lock(&host->lock);
 
-        for (i = 0; i < host_set->n_ports; i++) {
+        for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap;
 
 		if (!(irq_stat & (1 << i)))
 			continue;
 
-		ap = host_set->ports[i];
+		ap = host->ports[i];
 		if (ap) {
 			ahci_host_intr(ap);
 			VPRINTK("port %u\n", i);
 		} else {
 			VPRINTK("port %u (no irq)\n", i);
 			if (ata_ratelimit())
-				dev_printk(KERN_WARNING, host_set->dev,
+				dev_printk(KERN_WARNING, host->dev,
 					"interrupt on disabled port %u\n", i);
 		}
 
@@ -1173,7 +1351,7 @@
 		handled = 1;
 	}
 
-	spin_unlock(&host_set->lock);
+	spin_unlock(&host->lock);
 
 	VPRINTK("EXIT\n");
 
@@ -1183,7 +1361,7 @@
 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = ahci_port_base(ap);
 
 	if (qc->tf.protocol == ATA_PROT_NCQ)
 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
@@ -1195,8 +1373,7 @@
 
 static void ahci_freeze(struct ata_port *ap)
 {
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *port_mmio = ahci_port_base(ap);
 
 	/* turn IRQ off */
 	writel(0, port_mmio + PORT_IRQ_MASK);
@@ -1204,14 +1381,14 @@
 
 static void ahci_thaw(struct ata_port *ap)
 {
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 tmp;
 
 	/* clear IRQ */
 	tmp = readl(port_mmio + PORT_IRQ_STAT);
 	writel(tmp, port_mmio + PORT_IRQ_STAT);
-	writel(1 << ap->id, mmio + HOST_IRQ_STAT);
+	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
 
 	/* turn IRQ back on */
 	writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
@@ -1219,50 +1396,53 @@
 
 static void ahci_error_handler(struct ata_port *ap)
 {
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+		/* restart engine */
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
+	}
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
+		  ahci_postreset);
+}
 
+static void ahci_vt8251_error_handler(struct ata_port *ap)
+{
 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
 		/* restart engine */
-		ahci_stop_engine(port_mmio);
-		ahci_start_engine(port_mmio);
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
 	}
 
 	/* perform recovery */
-	ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
 		  ahci_postreset);
 }
 
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
-
-	if (qc->flags & ATA_QCFLAG_FAILED)
-		qc->err_mask |= AC_ERR_OTHER;
 
-	if (qc->err_mask) {
+	if (qc->flags & ATA_QCFLAG_FAILED) {
 		/* make DMA engine forget about the failed command */
-		ahci_stop_engine(port_mmio);
-		ahci_start_engine(port_mmio);
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
 	}
 }
 
+#ifdef CONFIG_PM
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
 {
-	struct ahci_host_priv *hpriv = ap->host_set->private_data;
-	struct ahci_port_priv *pp = ap->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	const char *emsg = NULL;
 	int rc;
 
-	rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
-	if (rc) {
+	rc = ahci_deinit_port(ap, &emsg);
+	if (rc == 0)
+		ahci_power_down(ap);
+	else {
 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
-		ahci_init_port(port_mmio, hpriv->cap,
-			       pp->cmd_slot_dma, pp->rx_fis_dma);
+		ahci_init_port(ap);
 	}
 
 	return rc;
@@ -1270,20 +1450,16 @@
 
 static int ahci_port_resume(struct ata_port *ap)
 {
-	struct ahci_port_priv *pp = ap->private_data;
-	struct ahci_host_priv *hpriv = ap->host_set->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
-
-	ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
+	ahci_power_up(ap);
+	ahci_init_port(ap);
 
 	return 0;
 }
 
 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
-	void __iomem *mmio = host_set->mmio_base;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
 	u32 ctl;
 
 	if (mesg.event == PM_EVENT_SUSPEND) {
@@ -1302,54 +1478,47 @@
 
 static int ahci_pci_device_resume(struct pci_dev *pdev)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
-	struct ahci_host_priv *hpriv = host_set->private_data;
-	void __iomem *mmio = host_set->mmio_base;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 	int rc;
 
-	ata_pci_device_do_resume(pdev);
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 
 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
-		rc = ahci_reset_controller(mmio, pdev);
+		rc = ahci_reset_controller(host);
 		if (rc)
 			return rc;
 
-		ahci_init_controller(mmio, pdev, host_set->n_ports, hpriv->cap);
+		ahci_init_controller(host);
 	}
 
-	ata_host_set_resume(host_set);
+	ata_host_resume(host);
 
 	return 0;
 }
+#endif
 
 static int ahci_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct ahci_host_priv *hpriv = ap->host_set->private_data;
+	struct device *dev = ap->host->dev;
 	struct ahci_port_priv *pp;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	void *mem;
 	dma_addr_t mem_dma;
 	int rc;
 
-	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 	if (!pp)
 		return -ENOMEM;
-	memset(pp, 0, sizeof(*pp));
 
 	rc = ata_pad_alloc(ap, dev);
-	if (rc) {
-		kfree(pp);
+	if (rc)
 		return rc;
-	}
 
-	mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
-	if (!mem) {
-		ata_pad_free(ap, dev);
-		kfree(pp);
+	mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
+				  GFP_KERNEL);
+	if (!mem)
 		return -ENOMEM;
-	}
 	memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
 
 	/*
@@ -1380,67 +1549,30 @@
 
 	ap->private_data = pp;
 
+	/* power up port */
+	ahci_power_up(ap);
+
 	/* initialize port */
-	ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
+	ahci_init_port(ap);
 
 	return 0;
 }
 
 static void ahci_port_stop(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct ahci_host_priv *hpriv = ap->host_set->private_data;
-	struct ahci_port_priv *pp = ap->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	const char *emsg = NULL;
 	int rc;
 
 	/* de-initialize port */
-	rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
+	rc = ahci_deinit_port(ap, &emsg);
 	if (rc)
 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
-
-	ap->private_data = NULL;
-	dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
-			  pp->cmd_slot, pp->cmd_slot_dma);
-	ata_pad_free(ap, dev);
-	kfree(pp);
-}
-
-static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
-			    unsigned int port_idx)
-{
-	VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
-	base = ahci_port_base_ul(base, port_idx);
-	VPRINTK("base now==0x%lx\n", base);
-
-	port->cmd_addr		= base;
-	port->scr_addr		= base + PORT_SCR;
-
-	VPRINTK("EXIT\n");
 }
 
-static int ahci_host_init(struct ata_probe_ent *probe_ent)
+static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
 {
-	struct ahci_host_priv *hpriv = probe_ent->private_data;
-	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-	void __iomem *mmio = probe_ent->mmio_base;
-	unsigned int i, using_dac;
 	int rc;
 
-	rc = ahci_reset_controller(mmio, pdev);
-	if (rc)
-		return rc;
-
-	hpriv->cap = readl(mmio + HOST_CAP);
-	hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
-	probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
-
-	VPRINTK("cap 0x%x  port_map 0x%x  n_ports %d\n",
-		hpriv->cap, hpriv->port_map, probe_ent->n_ports);
-
-	using_dac = hpriv->cap & HOST_CAP_64;
 	if (using_dac &&
 	    !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
@@ -1466,22 +1598,14 @@
 			return rc;
 		}
 	}
-
-	for (i = 0; i < probe_ent->n_ports; i++)
-		ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
-
-	ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
-
-	pci_set_master(pdev);
-
 	return 0;
 }
 
-static void ahci_print_info(struct ata_probe_ent *probe_ent)
+static void ahci_print_info(struct ata_host *host)
 {
-	struct ahci_host_priv *hpriv = probe_ent->private_data;
-	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-	void __iomem *mmio = probe_ent->mmio_base;
+	struct ahci_host_priv *hpriv = host->private_data;
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
 	u32 vers, cap, impl, speed;
 	const char *speed_s;
 	u16 cc;
@@ -1500,11 +1624,11 @@
 		speed_s = "?";
 
 	pci_read_config_word(pdev, 0x0a, &cc);
-	if (cc == 0x0101)
+	if (cc == PCI_CLASS_STORAGE_IDE)
 		scc_s = "IDE";
-	else if (cc == 0x0106)
+	else if (cc == PCI_CLASS_STORAGE_SATA)
 		scc_s = "SATA";
-	else if (cc == 0x0104)
+	else if (cc == PCI_CLASS_STORAGE_RAID)
 		scc_s = "RAID";
 	else
 		scc_s = "unknown";
@@ -1548,16 +1672,15 @@
 		);
 }
 
-static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
+	struct ata_port_info pi = ahci_port_info[ent->driver_data];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct device *dev = &pdev->dev;
 	struct ahci_host_priv *hpriv;
-	unsigned long base;
-	void __iomem *mmio_base;
-	unsigned int board_idx = (unsigned int) ent->driver_data;
-	int have_msi, pci_dev_busy = 0;
-	int rc;
+	struct ata_host *host;
+	int i, rc;
 
 	VPRINTK("ENTER\n");
 
@@ -1566,144 +1689,68 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	/* JMicron-specific fixup: make sure we're in AHCI mode */
-	/* This is protected from races with ata_jmicron by the pci probe
-	   locking */
-	if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
-		/* AHCI enable, AHCI on function 0 */
-		pci_write_config_byte(pdev, 0x41, 0xa1);
-		/* Function 1 is the PATA controller */
-		if (PCI_FUNC(pdev->devfn))
-			return -ENODEV;
-	}
-
-	rc = pci_enable_device(pdev);
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
 
-	if (pci_enable_msi(pdev) == 0)
-		have_msi = 1;
-	else {
+	if (pci_enable_msi(pdev))
 		pci_intx(pdev, 1);
-		have_msi = 0;
-	}
 
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_msi;
-	}
-
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-	base = (unsigned long) mmio_base;
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
 
-	hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv) {
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
-	memset(hpriv, 0, sizeof(*hpriv));
+	/* save initial config */
+	ahci_save_initial_config(pdev, &pi, hpriv);
 
-	probe_ent->sht		= ahci_port_info[board_idx].sht;
-	probe_ent->host_flags	= ahci_port_info[board_idx].host_flags;
-	probe_ent->pio_mask	= ahci_port_info[board_idx].pio_mask;
-	probe_ent->udma_mask	= ahci_port_info[board_idx].udma_mask;
-	probe_ent->port_ops	= ahci_port_info[board_idx].port_ops;
+	/* prepare host */
+	if (!(pi.flags & AHCI_FLAG_NO_NCQ) && (hpriv->cap & HOST_CAP_NCQ))
+		pi.flags |= ATA_FLAG_NCQ;
 
-       	probe_ent->irq = pdev->irq;
-       	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-	probe_ent->private_data = hpriv;
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
+	if (!host)
+		return -ENOMEM;
+	host->iomap = pcim_iomap_table(pdev);
+	host->private_data = hpriv;
 
-	if (have_msi)
-		hpriv->flags |= AHCI_FLAG_MSI;
+	for (i = 0; i < host->n_ports; i++) {
+		if (hpriv->port_map & (1 << i)) {
+			struct ata_port *ap = host->ports[i];
+			void __iomem *port_mmio = ahci_port_base(ap);
+
+			ap->ioaddr.cmd_addr = port_mmio;
+			ap->ioaddr.scr_addr = port_mmio + PORT_SCR;
+		} else
+			host->ports[i]->ops = &ata_dummy_port_ops;
+	}
 
 	/* initialize adapter */
-	rc = ahci_host_init(probe_ent);
+	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
 	if (rc)
-		goto err_out_hpriv;
-
-	if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
-	    (hpriv->cap & HOST_CAP_NCQ))
-		probe_ent->host_flags |= ATA_FLAG_NCQ;
-
-	ahci_print_info(probe_ent);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_hpriv:
-	kfree(hpriv);
-err_out_iounmap:
-	pci_iounmap(pdev, mmio_base);
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_msi:
-	if (have_msi)
-		pci_disable_msi(pdev);
-	else
-		pci_intx(pdev, 0);
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
-}
-
-static void ahci_remove_one (struct pci_dev *pdev)
-{
-	struct device *dev = pci_dev_to_dev(pdev);
-	struct ata_host_set *host_set = dev_get_drvdata(dev);
-	struct ahci_host_priv *hpriv = host_set->private_data;
-	unsigned int i;
-	int have_msi;
-
-	for (i = 0; i < host_set->n_ports; i++)
-		ata_port_detach(host_set->ports[i]);
-
-	have_msi = hpriv->flags & AHCI_FLAG_MSI;
-	free_irq(host_set->irq, host_set);
-
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
+		return rc;
 
-		ata_scsi_release(ap->host);
-		scsi_host_put(ap->host);
-	}
+	rc = ahci_reset_controller(host);
+	if (rc)
+		return rc;
 
-	kfree(hpriv);
-	pci_iounmap(pdev, host_set->mmio_base);
-	kfree(host_set);
+	ahci_init_controller(host);
+	ahci_print_info(host);
 
-	if (have_msi)
-		pci_disable_msi(pdev);
-	else
-		pci_intx(pdev, 0);
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-	dev_set_drvdata(dev, NULL);
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
+				 &ahci_sht);
 }
 
 static int __init ahci_init(void)
 {
-	return pci_module_init(&ahci_pci_driver);
+	return pci_register_driver(&ahci_pci_driver);
 }
 
 static void __exit ahci_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/ata_generic.c linux-2.6.18.x86_64.p4/drivers/ata/ata_generic.c
--- linux-2.6.18.x86_64.p3/drivers/ata/ata_generic.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/ata_generic.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,230 @@
+/*
+ *  ata_generic.c - Generic PATA/SATA controller driver.
+ *  Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ *
+ *  Elements from ide/pci/generic.c
+ *	    Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
+ *	    Portions (C) Copyright 2002  Red Hat Inc <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  Driver for PCI IDE interfaces implementing the standard bus mastering
+ *  interface functionality. This assumes the BIOS did the drive set up and
+ *  tuning for us. By default we do not grab all IDE class devices as they
+ *  may have other drivers or need fixups to avoid problems. Instead we keep
+ *  a default list of stuff without documentation/driver that appears to
+ *  work.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "ata_generic"
+#define DRV_VERSION "0.2.12"
+
+/*
+ *	A generic parallel ATA driver using libata
+ */
+
+/**
+ *	generic_set_mode	-	mode setting
+ *	@ap: interface to set up
+ *	@unused: returned device on error
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *	The BIOS configured everything. Our job is not to fiddle. We
+ *	read the dma enabled bits from the PCI configuration of the device
+ *	and respect them.
+ */
+
+static int generic_set_mode(struct ata_port *ap, struct ata_device **unused)
+{
+	int dma_enabled = 0;
+	int i;
+
+	/* Bits 5 and 6 indicate if DMA is active on master/slave */
+	if (ap->ioaddr.bmdma_addr)
+		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			/* We don't really care */
+			dev->pio_mode = XFER_PIO_0;
+			dev->dma_mode = XFER_MW_DMA_0;
+			/* We do need the right mode information for DMA or PIO
+			   and this comes from the current configuration flags */
+			if (dma_enabled & (1 << (5 + i))) {
+				ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
+				dev->flags &= ~ATA_DFLAG_PIO;
+			} else {
+				ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+				dev->xfer_mode = XFER_PIO_0;
+				dev->xfer_shift = ATA_SHIFT_PIO;
+				dev->flags |= ATA_DFLAG_PIO;
+			}
+		}
+	}
+	return 0;
+}
+
+static struct scsi_host_template generic_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations generic_port_ops = {
+	.set_mode	= generic_set_mode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.data_xfer	= ata_data_xfer,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_unknown,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int all_generic_ide;		/* Set to claim all devices */
+
+/**
+ *	ata_generic_init		-	attach generic IDE
+ *	@dev: PCI device found
+ *	@id: match entry
+ *
+ *	Called each time a matching IDE interface is found. We check if the
+ *	interface is one we wish to claim and if so we perform any chip
+ *	specific hacks then let the ATA layer do the heavy lifting.
+ */
+
+static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	u16 command;
+	static const struct ata_port_info info = {
+		.sht = &generic_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &generic_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	/* Don't use the generic entry unless instructed to do so */
+	if (id->driver_data == 1 && all_generic_ide == 0)
+		return -ENODEV;
+
+	/* Devices that need care */
+	if (dev->vendor == PCI_VENDOR_ID_UMC &&
+	    dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+	    (!(PCI_FUNC(dev->devfn) & 1)))
+		return -ENODEV;
+
+	if (dev->vendor == PCI_VENDOR_ID_OPTI &&
+	    dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
+	    (!(PCI_FUNC(dev->devfn) & 1)))
+		return -ENODEV;
+
+	/* Don't re-enable devices in generic mode or we will break some
+	   motherboards with disabled and unused IDE controllers */
+	pci_read_config_word(dev, PCI_COMMAND, &command);
+	if (!(command & PCI_COMMAND_IO))
+		return -ENODEV;
+
+	if (dev->vendor == PCI_VENDOR_ID_AL)
+	    	ata_pci_clear_simplex(dev);
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static struct pci_device_id ata_generic[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8673F), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8886A), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_UMC,    PCI_DEVICE_ID_UMC_UM8886BF), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HINT,   PCI_DEVICE_ID_HINT_VXPROII_IDE), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA,    PCI_DEVICE_ID_VIA_82C561), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_OPTI,   PCI_DEVICE_ID_OPTI_82C558), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
+	/* Must come last. If you add entries adjust this table appropriately */
+	{ PCI_ANY_ID,		PCI_ANY_ID,			   PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+	{ 0, },
+};
+
+static struct pci_driver ata_generic_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ata_generic,
+	.probe 		= ata_generic_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init ata_generic_init(void)
+{
+	return pci_register_driver(&ata_generic_pci_driver);
+}
+
+
+static void __exit ata_generic_exit(void)
+{
+	pci_unregister_driver(&ata_generic_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for generic ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ata_generic);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ata_generic_init);
+module_exit(ata_generic_exit);
+
+module_param(all_generic_ide, int, 0);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/ata_piix.c linux-2.6.18.x86_64.p4/drivers/ata/ata_piix.c
--- linux-2.6.18.x86_64.p3/drivers/ata/ata_piix.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/ata_piix.c	2007-06-06 10:08:00.000000000 -0400
@@ -40,7 +40,7 @@
  * Documentation
  *	Publically available from Intel web site. Errata documentation
  * is also publically available. As an aide to anyone hacking on this
- * driver the list of errata that are relevant is below.going back to
+ * driver the list of errata that are relevant is below, going back to
  * PIIX4. Older device documentation is now a bit tricky to find.
  *
  * The chipsets all follow very much the same design. The orginal Triton
@@ -93,7 +93,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"ata_piix"
-#define DRV_VERSION	"2.00"
+#define DRV_VERSION	"2.11"
 
 enum {
 	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
@@ -101,11 +101,13 @@
 	ICH5_PCS		= 0x92,	/* port control and status */
 	PIIX_SCC		= 0x0A, /* sub-class code register */
 
-	PIIX_FLAG_IGNORE_PCS	= (1 << 25), /* ignore PCS present bits */
 	PIIX_FLAG_SCR		= (1 << 26), /* SCR available */
 	PIIX_FLAG_AHCI		= (1 << 27), /* AHCI possible */
 	PIIX_FLAG_CHECKINTR	= (1 << 28), /* make sure PCI INTx enabled */
 
+	PIIX_PATA_FLAGS		= ATA_FLAG_SLAVE_POSS,
+	PIIX_SATA_FLAGS		= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+
 	/* combined mode.  if set, PATA is channel 0.
 	 * if clear, PATA is channel 1.
 	 */
@@ -116,15 +118,17 @@
 	PIIX_80C_SEC		= (1 << 7) | (1 << 6),
 
 	/* controller IDs */
-	piix4_pata		= 0,
-	ich5_pata		= 1,
-	ich5_sata		= 2,
-	esb_sata		= 3,
-	ich6_sata		= 4,
-	ich6_sata_ahci		= 5,
-	ich6m_sata_ahci		= 6,
-	ich7m_sata_ahci		= 7,
-	ich8_sata_ahci		= 8,
+	piix_pata_33		= 0,	/* PIIX4 at 33Mhz */
+	ich_pata_33		= 1,	/* ICH up to UDMA 33 only */
+	ich_pata_66		= 2,	/* ICH up to 66 Mhz */
+	ich_pata_100		= 3,	/* ICH up to UDMA 100 */
+	ich_pata_133		= 4,	/* ICH up to UDMA 133 */
+	ich5_sata		= 5,
+	ich6_sata		= 6,
+	ich6_sata_ahci		= 7,
+	ich6m_sata_ahci		= 8,
+	ich8_sata_ahci		= 9,
+	piix_pata_mwdma		= 10,	/* PIIX3 MWDMA only */
 
 	/* constants for mapping table */
 	P0			= 0,  /* port 0 */
@@ -141,32 +145,61 @@
 struct piix_map_db {
 	const u32 mask;
 	const u16 port_enable;
-	const int present_shift;
 	const int map[][4];
 };
 
 struct piix_host_priv {
 	const int *map;
-	const struct piix_map_db *map_db;
 };
 
 static int piix_init_one (struct pci_dev *pdev,
 				    const struct pci_device_id *ent);
-static void piix_host_stop(struct ata_host_set *host_set);
+static void piix_pata_error_handler(struct ata_port *ap);
 static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
 static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
-static void piix_pata_error_handler(struct ata_port *ap);
-static void piix_sata_error_handler(struct ata_port *ap);
+static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
+static int ich_pata_cable_detect(struct ata_port *ap);
 
 static unsigned int in_module_init = 1;
 
 static const struct pci_device_id piix_pci_tbl[] = {
-#ifdef ATA_ENABLE_PATA
-	{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
-	{ 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
-	{ 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
-	{ 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
-#endif
+	/* Intel PIIX3 for the 430HX etc */
+	{ 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
+	/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
+	/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
+	{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX4 */
+	{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX4 */
+	{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel PIIX */
+	{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+	/* Intel ICH (i810, i815, i840) UDMA 66*/
+	{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
+	/* Intel ICH0 : UDMA 33*/
+	{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
+	/* Intel ICH2M */
+	{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
+	{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/*  Intel ICH3M */
+	{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH3 (E7500/1) UDMA 100 */
+	{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
+	{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* Intel ICH5 */
+	{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
+	/* C-ICH (i810E2) */
+	{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ESB (855GME/875P + 6300ESB) UDMA 100  */
+	{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ICH6 (and 6) (i915) UDMA 100 */
+	{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ICH7/7-R (i945, i975) UDMA 100*/
+	{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
+	{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 
 	/* NOTE: The following PCI ids must be kept in sync with the
 	 * list in drivers/pci/quirks.c.
@@ -177,9 +210,9 @@
 	/* 82801EB (ICH5) */
 	{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 6300ESB (ICH5 variant with broken PCS present bits) */
-	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
+	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 6300ESB pretending RAID */
-	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata },
+	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
 	/* 82801FB/FW (ICH6/ICH6W) */
 	{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
 	/* 82801FR/FRW (ICH6R/ICH6RW) */
@@ -189,27 +222,27 @@
 	/* 82801GB/GR/GH (ICH7, identical to ICH6) */
 	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 	/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
-	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
-	/* 631x/632xESB (ESB2) */
+	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+	/* Enterprise Southbridge 2 (631xESB/632xESB) */
 	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 	/* SATA Controller 1 IDE (ICH8) */
 	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
 	/* SATA Controller 2 IDE (ICH8) */
 	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
-	/* Mobile SATA Controller IDE (ICH8M, ditto) */
+	/* Mobile SATA Controller IDE (ICH8M) */
 	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9) */
- 	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9) */
- 	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9) */
- 	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9M) */
- 	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9M) */
- 	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
- 	/* SATA Controller IDE (ICH9M) */
- 	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9) */
+	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* SATA Controller IDE (ICH9M) */
+	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
 
 	{ }	/* terminate list */
 };
@@ -219,8 +252,10 @@
 	.id_table		= piix_pci_tbl,
 	.probe			= piix_init_one,
 	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
 	.suspend		= ata_pci_device_suspend,
 	.resume			= ata_pci_device_resume,
+#endif
 };
 
 static struct scsi_host_template piix_sht = {
@@ -239,8 +274,6 @@
 	.slave_configure	= ata_scsi_slave_config,
 	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
-	.resume			= ata_scsi_device_resume,
-	.suspend		= ata_scsi_device_suspend,
 };
 
 static const struct ata_port_operations piix_pata_ops = {
@@ -261,19 +294,54 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= piix_pata_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
 
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations ich_pata_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= piix_set_piomode,
+	.set_dmamode		= ich_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= piix_pata_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ich_pata_cable_detect,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= piix_host_stop,
 };
 
 static const struct ata_port_operations piix_sata_ops = {
@@ -291,25 +359,24 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
-	.error_handler		= piix_sata_error_handler,
+	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= piix_host_stop,
 };
 
 static const struct piix_map_db ich5_map_db = {
 	.mask = 0x7,
 	.port_enable = 0x3,
-	.present_shift = 4,
 	.map = {
 		/* PM   PS   SM   SS       MAP  */
 		{  P0,  NA,  P1,  NA }, /* 000b */
@@ -326,7 +393,6 @@
 static const struct piix_map_db ich6_map_db = {
 	.mask = 0x3,
 	.port_enable = 0xf,
-	.present_shift = 4,
 	.map = {
 		/* PM   PS   SM   SS       MAP */
 		{  P0,  P2,  P1,  P3 }, /* 00b */
@@ -339,24 +405,10 @@
 static const struct piix_map_db ich6m_map_db = {
 	.mask = 0x3,
 	.port_enable = 0x5,
-	.present_shift = 4,
-	.map = {
-		/* PM   PS   SM   SS       MAP */
-		{  P0,  P2,  RV,  RV }, /* 00b */
-		{  RV,  RV,  RV,  RV },
-		{  P0,  P2, IDE, IDE }, /* 10b */
-		{  RV,  RV,  RV,  RV },
-	},
-};
-
-static const struct piix_map_db ich7m_map_db = {
-	.mask = 0x3,
-	.port_enable = 0x5,
-	.present_shift = 4,
 
 	/* Map 01b isn't specified in the doc but some notebooks use
-	 * it anyway.  ATM, the only case spotted carries subsystem ID
-	 * 1025:0107.  This is the only difference from ich6m.
+	 * it anyway.  MAP 01b have been spotted on both ICH6M and
+	 * ICH7M.
 	 */
 	.map = {
 		/* PM   PS   SM   SS       MAP */
@@ -370,7 +422,6 @@
 static const struct piix_map_db ich8_map_db = {
 	.mask = 0x3,
 	.port_enable = 0x3,
-	.present_shift = 8,
 	.map = {
 		/* PM   PS   SM   SS       MAP */
 		{  P0,  P2,  P1,  P3 }, /* 00b (hardwired when in AHCI) */
@@ -382,81 +433,86 @@
 
 static const struct piix_map_db *piix_map_db_table[] = {
 	[ich5_sata]		= &ich5_map_db,
-	[esb_sata]		= &ich5_map_db,
 	[ich6_sata]		= &ich6_map_db,
 	[ich6_sata_ahci]	= &ich6_map_db,
 	[ich6m_sata_ahci]	= &ich6m_map_db,
-	[ich7m_sata_ahci]	= &ich7m_map_db,
 	[ich8_sata_ahci]	= &ich8_map_db,
 };
 
 static struct ata_port_info piix_port_info[] = {
-	/* piix4_pata */
+	/* piix_pata_33: 0:  PIIX4 at 33MHz */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SLAVE_POSS,
+		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-#if 0
-		.mwdma_mask	= 0x06, /* mwdma1-2 */
-#else
-		.mwdma_mask	= 0x00, /* mwdma broken */
-#endif
+		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
 		.udma_mask	= ATA_UDMA_MASK_40C,
 		.port_ops	= &piix_pata_ops,
 	},
 
-	/* ich5_pata */
+	/* ich_pata_33: 1 	ICH0 - ICH at 33Mhz*/
+	{
+		.sht		= &piix_sht,
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask 	= 0x1f,	/* pio 0-4 */
+		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
+		.udma_mask	= ATA_UDMA2, /* UDMA33 */
+		.port_ops	= &ich_pata_ops,
+	},
+	/* ich_pata_66: 2 	ICH controllers up to 66MHz */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask 	= 0x1f,	/* pio 0-4 */
+		.mwdma_mask	= 0x06, /* MWDMA0 is broken on chip */
+		.udma_mask	= ATA_UDMA4,
+		.port_ops	= &ich_pata_ops,
+	},
+
+	/* ich_pata_100: 3 */
+	{
+		.sht		= &piix_sht,
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-#if 0
 		.mwdma_mask	= 0x06, /* mwdma1-2 */
-#else
-		.mwdma_mask	= 0x00, /* mwdma broken */
-#endif
-		.udma_mask	= 0x3f, /* udma0-5 */
-		.port_ops	= &piix_pata_ops,
+		.udma_mask	= ATA_UDMA5, /* udma0-5 */
+		.port_ops	= &ich_pata_ops,
 	},
 
-	/* ich5_sata */
+	/* ich_pata_133: 4 	ICH with full UDMA6 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
-				  PIIX_FLAG_IGNORE_PCS,
-		.pio_mask	= 0x1f,	/* pio0-4 */
-		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
-		.port_ops	= &piix_sata_ops,
+		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+		.pio_mask 	= 0x1f,	/* pio 0-4 */
+		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
+		.udma_mask	= ATA_UDMA6, /* UDMA133 */
+		.port_ops	= &ich_pata_ops,
 	},
 
-	/* i6300esb_sata */
+	/* ich5_sata: 5 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
+		.flags		= PIIX_SATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6_sata */
+	/* ich6_sata: 6 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6_sata_ahci */
+	/* ich6_sata_ahci: 7 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -464,11 +520,10 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6m_sata_ahci */
+	/* ich6m_sata_ahci: 8 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -476,11 +531,10 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich7m_sata_ahci */
+	/* ich8_sata_ahci: 9 */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
@@ -488,16 +542,13 @@
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich8_sata_ahci */
+	/* piix_pata_mwdma: 10:  PIIX3 MWDMA only */
 	{
 		.sht		= &piix_sht,
-		.host_flags	= ATA_FLAG_SATA |
-				  PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
-				  PIIX_FLAG_AHCI,
+		.flags		= PIIX_PATA_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
-		.port_ops	= &piix_sata_ops,
+		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+		.port_ops	= &piix_pata_ops,
 	},
 };
 
@@ -512,13 +563,28 @@
 MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static int force_pcs = 0;
-module_param(force_pcs, int, 0444);
-MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
-		 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
+struct ich_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+/*
+ *	List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x27DF, 0x0005, 0x0280 },	/* ICH7 on Acer 5602WLMi */
+	{ 0x27DF, 0x1025, 0x0110 },	/* ICH7 on Acer 3682WLMi */
+	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
+	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on ACER Aspire 2023WLMi */
+	/* end marker */
+	{ 0, }
+};
 
 /**
- *	piix_pata_cbl_detect - Probe host controller cable detect info
+ *	ich_pata_cable_detect - Probe host controller cable detect info
  *	@ap: Port for which cable detect info is desired
  *
  *	Read 80c cable indicator from ATA PCI device's PCI config
@@ -527,51 +593,46 @@
  *	LOCKING:
  *	None (inherited from caller).
  */
-static void piix_pata_cbl_detect(struct ata_port *ap)
+
+static int ich_pata_cable_detect(struct ata_port *ap)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	const struct ich_laptop *lap = &ich_laptop[0];
 	u8 tmp, mask;
 
-	/* no 80c support in host controller? */
-	if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
-		goto cbl40;
+	/* Check for specials - Acer Aspire 5602WLMi */
+	while (lap->device) {
+		if (lap->device == pdev->device &&
+		    lap->subvendor == pdev->subsystem_vendor &&
+		    lap->subdevice == pdev->subsystem_device) {
+			return ATA_CBL_PATA40_SHORT;
+		}
+		lap++;
+	}
 
 	/* check BIOS cable detect results */
-	mask = ap->hard_port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
+	mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
 	pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
 	if ((tmp & mask) == 0)
-		goto cbl40;
-
-	ap->cbl = ATA_CBL_PATA80;
-	return;
-
-cbl40:
-	ap->cbl = ATA_CBL_PATA40;
-	ap->udma_mask &= ATA_UDMA_MASK_40C;
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
 }
 
 /**
  *	piix_pata_prereset - prereset for PATA host controller
  *	@ap: Target port
- *
- *	Prereset including cable detection.
+ *	@deadline: deadline jiffies for the operation
  *
  *	LOCKING:
  *	None (inherited from caller).
  */
-static int piix_pata_prereset(struct ata_port *ap)
+static int piix_pata_prereset(struct ata_port *ap, unsigned long deadline)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
-	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
-		ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
-		ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
-		return 0;
-	}
-
-	piix_pata_cbl_detect(ap);
-
-	return ata_std_prereset(ap);
+	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
+		return -ENOENT;
+	return ata_std_prereset(ap, deadline);
 }
 
 static void piix_pata_error_handler(struct ata_port *ap)
@@ -581,87 +642,6 @@
 }
 
 /**
- *	piix_sata_present_mask - determine present mask for SATA host controller
- *	@ap: Target port
- *
- *	Reads SATA PCI device's PCI config register Port Configuration
- *	and Status (PCS) to determine port and device availability.
- *
- *	LOCKING:
- *	None (inherited from caller).
- *
- *	RETURNS:
- *	determined present_mask
- */
-static unsigned int piix_sata_present_mask(struct ata_port *ap)
-{
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
-	struct piix_host_priv *hpriv = ap->host_set->private_data;
-	const unsigned int *map = hpriv->map;
-	int base = 2 * ap->hard_port_no;
-	unsigned int present_mask = 0;
-	int port, i;
-	u16 pcs;
-
-	pci_read_config_word(pdev, ICH5_PCS, &pcs);
-	DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
-
-	for (i = 0; i < 2; i++) {
-		port = map[base + i];
-		if (port < 0)
-			continue;
-		if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
-		    (pcs & 1 << (hpriv->map_db->present_shift + port)))
-			present_mask |= 1 << i;
-	}
-
-	DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
-		ap->id, pcs, present_mask);
-
-	return present_mask;
-}
-
-/**
- *	piix_sata_softreset - reset SATA host port via ATA SRST
- *	@ap: port to reset
- *	@classes: resulting classes of attached devices
- *
- *	Reset SATA host port via ATA SRST.  On controllers with
- *	reliable PCS present bits, the bits are used to determine
- *	device presence.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep)
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
-{
-	unsigned int present_mask;
-	int i, rc;
-
-	present_mask = piix_sata_present_mask(ap);
-
-	rc = ata_std_softreset(ap, classes);
-	if (rc)
-		return rc;
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		if (!(present_mask & (1 << i)))
-			classes[i] = ATA_DEV_NONE;
-	}
-
-	return 0;
-}
-
-static void piix_sata_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
-			   ata_std_postreset);
-}
-
-/**
  *	piix_set_piomode - Initialize host controller PATA PIO timings
  *	@ap: Port whose timings we are configuring
  *	@adev: um
@@ -675,12 +655,19 @@
 static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
 {
 	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
-	struct pci_dev *dev	= to_pci_dev(ap->host_set->dev);
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
 	unsigned int is_slave	= (adev->devno != 0);
-	unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40;
+	unsigned int master_port= ap->port_no ? 0x42 : 0x40;
 	unsigned int slave_port	= 0x44;
 	u16 master_data;
 	u8 slave_data;
+	u8 udma_enable;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for ICH controllers.
+	 */
 
 	static const	 /* ISP  RTC */
 	u8 timings[][2]	= { { 0, 0 },
@@ -689,20 +676,30 @@
 			    { 2, 1 },
 			    { 2, 3 }, };
 
+	if (pio >= 2)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE enable */
+
+	/* Intel specifies that the PPE functionality is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+
 	pci_read_config_word(dev, master_port, &master_data);
 	if (is_slave) {
+		/* Enable SITRE (seperate slave timing register) */
 		master_data |= 0x4000;
-		/* enable PPE, IE and TIME */
-		master_data |= 0x0070;
+		/* enable PPE1, IE1 and TIME1 as needed */
+		master_data |= (control << 4);
 		pci_read_config_byte(dev, slave_port, &slave_data);
-		slave_data &= (ap->hard_port_no ? 0x0f : 0xf0);
-		slave_data |=
-			(timings[pio][0] << 2) |
-			(timings[pio][1] << (ap->hard_port_no ? 4 : 0));
+		slave_data &= (ap->port_no ? 0x0f : 0xf0);
+		/* Load the timing nibble for this slave */
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
 	} else {
+		/* Master keeps the bits in a different format */
 		master_data &= 0xccf8;
-		/* enable PPE, IE and TIME */
-		master_data |= 0x0007;
+		/* Enable PPE, IE and TIME as appropriate */
+		master_data |= control;
 		master_data |=
 			(timings[pio][0] << 12) |
 			(timings[pio][1] << 8);
@@ -710,13 +707,23 @@
 	pci_write_config_word(dev, master_port, master_data);
 	if (is_slave)
 		pci_write_config_byte(dev, slave_port, slave_data);
+
+	/* Ensure the UDMA bit is off - it will be turned back on if
+	   UDMA is selected */
+
+	if (ap->udma_mask) {
+		pci_read_config_byte(dev, 0x48, &udma_enable);
+		udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+		pci_write_config_byte(dev, 0x48, udma_enable);
+	}
 }
 
 /**
- *	piix_set_dmamode - Initialize host controller PATA PIO timings
+ *	do_pata_set_dmamode - Initialize host controller PATA PIO timings
  *	@ap: Port whose timings we are configuring
- *	@adev: um
+ *	@adev: Drive in question
  *	@udma: udma mode, 0 - 6
+ *	@isich: set if the chip is an ICH device
  *
  *	Set UDMA mode for device, in host controller PCI config space.
  *
@@ -724,70 +731,141 @@
  *	None (inherited from caller).
  */
 
-static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich)
 {
-	unsigned int udma	= adev->dma_mode; /* FIXME: MWDMA too */
-	struct pci_dev *dev	= to_pci_dev(ap->host_set->dev);
-	u8 maslave		= ap->hard_port_no ? 0x42 : 0x40;
-	u8 speed		= udma;
-	unsigned int drive_dn	= (ap->hard_port_no ? 2 : 0) + adev->devno;
-	int a_speed		= 3 << (drive_dn * 4);
-	int u_flag		= 1 << drive_dn;
-	int v_flag		= 0x01 << drive_dn;
-	int w_flag		= 0x10 << drive_dn;
-	int u_speed		= 0;
-	int			sitre;
-	u16			reg4042, reg4a;
-	u8			reg48, reg54, reg55;
-
-	pci_read_config_word(dev, maslave, &reg4042);
-	DPRINTK("reg4042 = 0x%04x\n", reg4042);
-	sitre = (reg4042 & 0x4000) ? 1 : 0;
-	pci_read_config_byte(dev, 0x48, &reg48);
-	pci_read_config_word(dev, 0x4a, &reg4a);
-	pci_read_config_byte(dev, 0x54, &reg54);
-	pci_read_config_byte(dev, 0x55, &reg55);
-
-	switch(speed) {
-		case XFER_UDMA_4:
-		case XFER_UDMA_2:	u_speed = 2 << (drive_dn * 4); break;
-		case XFER_UDMA_6:
-		case XFER_UDMA_5:
-		case XFER_UDMA_3:
-		case XFER_UDMA_1:	u_speed = 1 << (drive_dn * 4); break;
-		case XFER_UDMA_0:	u_speed = 0 << (drive_dn * 4); break;
-		case XFER_MW_DMA_2:
-		case XFER_MW_DMA_1:	break;
-		default:
-			BUG();
-			return;
-	}
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u8 master_port		= ap->port_no ? 0x42 : 0x40;
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno + 2 * ap->port_no;
+	u8 udma_enable		= 0;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(dev, master_port, &master_data);
+	if (ap->udma_mask)
+		pci_read_config_byte(dev, 0x48, &udma_enable);
 
 	if (speed >= XFER_UDMA_0) {
-		if (!(reg48 & u_flag))
-			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
-		if (speed == XFER_UDMA_5) {
-			pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
-		} else {
-			pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+		unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+		u16 ideconf;
+		int u_clock, u_speed;
+
+		/*
+	 	 * UDMA is handled by a combination of clock switching and
+		 * selection of dividers
+		 *
+		 * Handy rule: Odd modes are UDMATIMx 01, even are 02
+		 *	       except UDMA0 which is 00
+		 */
+		u_speed = min(2 - (udma & 1), udma);
+		if (udma == 5)
+			u_clock = 0x1000;	/* 100Mhz */
+		else if (udma > 2)
+			u_clock = 1;		/* 66Mhz */
+		else
+			u_clock = 0;		/* 33Mhz */
+
+		udma_enable |= (1 << devid);
+
+		/* Load the CT/RP selection */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(3 << (4 * devid));
+		udma_timing |= u_speed << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+
+		if (isich) {
+			/* Select a 33/66/100Mhz clock */
+			pci_read_config_word(dev, 0x54, &ideconf);
+			ideconf &= ~(0x1001 << devid);
+			ideconf |= u_clock << devid;
+			/* For ICH or later we should set bit 10 for better
+			   performance (WR_PingPong_En) */
+			pci_write_config_word(dev, 0x54, ideconf);
 		}
-		if ((reg4a & a_speed) != u_speed)
-			pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
-		if (speed > XFER_UDMA_2) {
-			if (!(reg54 & v_flag))
-				pci_write_config_byte(dev, 0x54, reg54 | v_flag);
-		} else
-			pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
 	} else {
-		if (reg48 & u_flag)
-			pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
-		if (reg4a & a_speed)
-			pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
-		if (reg54 & v_flag)
-			pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
-		if (reg55 & w_flag)
-			pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (adev->devno) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= (0x0F + 0xE1 * ap->port_no);
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, master_port, master_data);
 	}
+	/* Don't scribble on 0x48 if the controller does not support UDMA */
+	if (ap->udma_mask)
+		pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+/**
+ *	piix_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	do_pata_set_dmamode(ap, adev, 0);
+}
+
+/**
+ *	ich_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	do_pata_set_dmamode(ap, adev, 1);
 }
 
 #define AHCI_PCI_BAR 5
@@ -875,18 +953,6 @@
 		pci_write_config_word(pdev, ICH5_PCS, new_pcs);
 		msleep(150);
 	}
-
-	if (force_pcs == 1) {
-		dev_printk(KERN_INFO, &pdev->dev,
-			   "force ignoring PCS (0x%x)\n", new_pcs);
-		pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
-		pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
-	} else if (force_pcs == 2) {
-		dev_printk(KERN_INFO, &pdev->dev,
-			   "force honoring PCS (0x%x)\n", new_pcs);
-		pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
-		pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
-	}
 }
 
 static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -916,7 +982,7 @@
 
 		case IDE:
 			WARN_ON((i & 1) || map[i + 1] != IDE);
-			pinfo[i / 2] = piix_port_info[ich5_pata];
+			pinfo[i / 2] = piix_port_info[ich_pata_100];
 			pinfo[i / 2].private_data = hpriv;
 			i++;
 			printk(" IDE IDE");
@@ -925,7 +991,7 @@
 		default:
 			printk(" P%d", map[i]);
 			if (i & 1)
-				pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
+				pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
 			break;
 		}
 	}
@@ -936,7 +1002,6 @@
 			   "invalid MAP value %u\n", map_value);
 
 	hpriv->map = map;
-	hpriv->map_db = map_db;
 }
 
 /**
@@ -957,10 +1022,11 @@
 static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
+	struct device *dev = &pdev->dev;
 	struct ata_port_info port_info[2];
-	struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
+	const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
 	struct piix_host_priv *hpriv;
-	unsigned long host_flags;
+	unsigned long port_flags;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev,
@@ -970,7 +1036,7 @@
 	if (!in_module_init)
 		return -ENODEV;
 
-	hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
 	if (!hpriv)
 		return -ENOMEM;
 
@@ -979,9 +1045,9 @@
 	port_info[0].private_data = hpriv;
 	port_info[1].private_data = hpriv;
 
-	host_flags = port_info[0].host_flags;
+	port_flags = port_info[0].flags;
 
-	if (host_flags & PIIX_FLAG_AHCI) {
+	if (port_flags & PIIX_FLAG_AHCI) {
 		u8 tmp;
 		pci_read_config_byte(pdev, PIIX_SCC, &tmp);
 		if (tmp == PIIX_AHCI_DEVICE) {
@@ -992,7 +1058,7 @@
 	}
 
 	/* Initialize SATA map */
-	if (host_flags & ATA_FLAG_SATA) {
+	if (port_flags & ATA_FLAG_SATA) {
 		piix_init_sata_map(pdev, port_info,
 				   piix_map_db_table[ent->driver_data]);
 		piix_init_pcs(pdev, port_info,
@@ -1005,7 +1071,7 @@
 	 * MSI is disabled (and it is disabled, as we don't use
 	 * message-signalled interrupts currently).
 	 */
-	if (host_flags & PIIX_FLAG_CHECKINTR)
+	if (port_flags & PIIX_FLAG_CHECKINTR)
 		pci_intx(pdev, 1);
 
 	if (piix_check_450nx_errata(pdev)) {
@@ -1017,22 +1083,15 @@
 		port_info[1].mwdma_mask = 0;
 		port_info[1].udma_mask = 0;
 	}
-	return ata_pci_init_one(pdev, ppinfo, 2);
-}
-
-static void piix_host_stop(struct ata_host_set *host_set)
-{
-	if (host_set->next == NULL)
-		kfree(host_set->private_data);
-	ata_host_stop(host_set);
+	return ata_pci_init_one(pdev, ppi);
 }
 
 static int __init piix_init(void)
 {
 	int rc;
 
-	DPRINTK("pci_module_init\n");
-	rc = pci_module_init(&piix_pci_driver);
+	DPRINTK("pci_register_driver\n");
+	rc = pci_register_driver(&piix_pci_driver);
 	if (rc)
 		return rc;
 
@@ -1049,4 +1108,3 @@
 
 module_init(piix_init);
 module_exit(piix_exit);
-
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/Kconfig linux-2.6.18.x86_64.p4/drivers/ata/Kconfig
--- linux-2.6.18.x86_64.p3/drivers/ata/Kconfig	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/Kconfig	2007-06-06 10:08:00.000000000 -0400
@@ -1,142 +1,589 @@
-menu "ATA device support"
+#
+# SATA/PATA driver configuration
+#
+
+menuconfig ATA
+	tristate "Serial ATA (prod) and Parallel ATA (experimental) drivers"
+	depends on !(M32R || M68K) || BROKEN
+	depends on !SUN4 || BROKEN
+	select SCSI
+	---help---
+	  If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+	  any other ATA device under Linux, say Y and make sure that you know
+	  the name of your ATA host adapter (the card inside your computer
+	  that "speaks" the ATA protocol, also called ATA controller),
+	  because you will be asked for it.
+
+if ATA
+
+config ATA_NONSTANDARD
+       bool
+       default n
 
-config SCSI_SATA
-	tristate "Serial ATA (SATA) support"
-	depends on SCSI
+config ATA_ACPI
+	bool
+	depends on ACPI && PCI
+	default y
 	help
-	  This driver family supports Serial ATA host controllers
-	  and devices.
-
-	  If unsure, say N.
+	  This option adds support for ATA-related ACPI objects.
+	  These ACPI objects add the ability to retrieve taskfiles
+	  from the ACPI BIOS and write them to the disk controller.
+	  These objects may be related to performance, security,
+	  power management, or other areas.
+	  You can disable this at kernel boot time by using the
+	  option libata.noacpi=1
 
-config SCSI_SATA_AHCI
+config SATA_AHCI
 	tristate "AHCI SATA support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for AHCI Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_SVW
+config SATA_SVW
 	tristate "ServerWorks Frodo / Apple K2 SATA support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for Broadcom/Serverworks/Apple K2
 	  SATA support.
 
 	  If unsure, say N.
 
-config SCSI_ATA_PIIX
-	tristate "Intel PIIX/ICH SATA support"
-	depends on SCSI_SATA && PCI
+config ATA_PIIX
+	tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
+	depends on PCI
 	help
-	  This option enables support for ICH5/6/7/8 Serial ATA.
-	  If PATA support was enabled previously, this enables
-	  support for select Intel PIIX/ICH PATA host controllers.
+	  This option enables support for ICH5/6/7/8 Serial ATA
+	  and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
+	  host controllers.
 
 	  If unsure, say N.
 
-config SCSI_SATA_MV
+config SATA_MV
 	tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI && EXPERIMENTAL
 	help
 	  This option enables support for the Marvell Serial ATA family.
 	  Currently supports 88SX[56]0[48][01] chips.
 
 	  If unsure, say N.
 
-config SCSI_SATA_NV
+config SATA_NV
 	tristate "NVIDIA SATA support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI
 	help
 	  This option enables support for NVIDIA Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_PDC_ADMA
+config PDC_ADMA
 	tristate "Pacific Digital ADMA support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for Pacific Digital ADMA controllers
 
 	  If unsure, say N.
 
-config SCSI_SATA_QSTOR
+config SATA_QSTOR
 	tristate "Pacific Digital SATA QStor support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for Pacific Digital Serial ATA QStor.
 
 	  If unsure, say N.
 
-config SCSI_SATA_PROMISE
+config SATA_PROMISE
 	tristate "Promise SATA TX2/TX4 support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for Promise Serial ATA TX2/TX4.
 
 	  If unsure, say N.
 
-config SCSI_SATA_SX4
+config SATA_SX4
 	tristate "Promise SATA SX4 support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI && EXPERIMENTAL
 	help
 	  This option enables support for Promise Serial ATA SX4.
 
 	  If unsure, say N.
 
-config SCSI_SATA_SIL
+config SATA_SIL
 	tristate "Silicon Image SATA support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI
 	help
 	  This option enables support for Silicon Image Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_SIL24
+config SATA_SIL24
 	tristate "Silicon Image 3124/3132 SATA support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI
 	help
 	  This option enables support for Silicon Image 3124/3132 Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_SIS
-	tristate "SiS 964/180 SATA support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
-	help
-	  This option enables support for SiS Serial ATA 964/180.
-
+config SATA_SIS
+	tristate "SiS 964/965/966/180 SATA support"
+	depends on PCI
+	select PATA_SIS
+	help
+	  This option enables support for SiS Serial ATA on
+	  SiS 964/965/966/180 and Parallel ATA on SiS 180.
+	  The PATA support for SiS 180 requires additionally to
+	  enable the PATA_SIS driver in the config.
 	  If unsure, say N.
 
-config SCSI_SATA_ULI
+config SATA_ULI
 	tristate "ULi Electronics SATA support"
-	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	depends on PCI
 	help
 	  This option enables support for ULi Electronics SATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_VIA
+config SATA_VIA
 	tristate "VIA SATA support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for VIA Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_VITESSE
+config SATA_VITESSE
 	tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
-	depends on SCSI_SATA && PCI
+	depends on PCI
 	help
 	  This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
 
 	  If unsure, say N.
 
-config SCSI_SATA_INTEL_COMBINED
+config SATA_INIC162X
+	tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for Initio 162x Serial ATA.
+
+config PATA_ALI
+	tristate "ALi PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the ALi ATA interfaces
+	  found on the many ALi chipsets.
+
+	  If unsure, say N.
+
+config PATA_AMD
+	tristate "AMD/NVidia PATA support"
+	depends on PCI
+	help
+	  This option enables support for the AMD and NVidia PATA
+	  interfaces found on the chipsets for Athlon/Athlon64.
+
+	  If unsure, say N.
+
+config PATA_ARTOP
+	tristate "ARTOP 6210/6260 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for ARTOP PATA controllers.
+
+	  If unsure, say N.
+
+config PATA_ATIIXP
+	tristate "ATI PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the ATI ATA interfaces
+	  found on the many ATI chipsets.
+
+	  If unsure, say N.
+
+config PATA_CMD640_PCI
+	tristate "CMD640 PCI PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the CMD640 PCI IDE
+	  interface chip. Only the primary channel is currently
+	  supported.
+
+	  If unsure, say N.
+
+config PATA_CMD64X
+	tristate "CMD64x PATA support (Very Experimental)"
+	depends on PCI&& EXPERIMENTAL
+	help
+	  This option enables support for the CMD64x series chips
+	  except for the CMD640.
+
+	  If unsure, say N.
+
+config PATA_CS5520
+	tristate "CS5510/5520 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the Cyrix 5510/5520
+	  companion chip used with the MediaGX/Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CS5530
+	tristate "CS5530 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the Cyrix/NatSemi/AMD CS5530
+	  companion chip used with the MediaGX/Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CS5535
+	tristate "CS5535 PATA support (Experimental)"
+	depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+	help
+	  This option enables support for the NatSemi/AMD CS5535
+	  companion chip used with the Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_CYPRESS
+	tristate "Cypress CY82C693 PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the Cypress/Contaq CY82C693
+	  chipset found in some Alpha systems
+
+	  If unsure, say N.
+
+config PATA_EFAR
+	tristate "EFAR SLC90E66 support"
+	depends on PCI
+	help
+	  This option enables support for the EFAR SLC90E66
+	  IDE controller found on some older machines.
+
+	  If unsure, say N.
+
+config ATA_GENERIC
+	tristate "Generic ATA support"
+	depends on PCI
+	help
+	  This option enables support for generic BIOS configured
+	  ATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_HPT366
+	tristate "HPT 366/368 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the HPT 366 and 368
+	  PATA controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_HPT37X
+	tristate "HPT 370/370A/371/372/374/302 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the majority of the later HPT
+	  PATA controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_HPT3X2N
+	tristate "HPT 372N/302N PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the N variant HPT PATA
+	  controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_HPT3X3
+	tristate "HPT 343/363 PATA support (Experimental)"
+	depends on PCI
+	help
+	  This option enables support for the HPT 343/363
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_ISAPNP
+	tristate "ISA Plug and Play PATA support (Experimental)"
+	depends on EXPERIMENTAL && ISAPNP
+	help
+	  This option enables support for ISA plug & play ATA
+	  controllers such as those found on old soundcards.
+
+	  If unsure, say N.
+
+config PATA_IT821X
+	tristate "IT8211/2 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the ITE 8211 and 8212
+	  PATA controllers via the new ATA layer, including RAID
+	  mode.
+
+	  If unsure, say N.
+
+config PATA_IT8213
+	tristate "IT8213 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the ITE 821 PATA
+          controllers via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_JMICRON
+	tristate "JMicron PATA support"
+	depends on PCI
+	help
+	  Enable support for the JMicron IDE controller, via the new
+	  ATA layer.
+
+	  If unsure, say N.
+
+config PATA_LEGACY
+	tristate "Legacy ISA PATA support (Experimental)"
+	depends on ISA && EXPERIMENTAL
+	help
+	  This option enables support for ISA/VLB bus legacy PATA
+	  ports and allows them to be accessed via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_TRIFLEX
+	tristate "Compaq Triflex PATA support"
+	depends on PCI
+	help
+	  Enable support for the Compaq 'Triflex' IDE controller as found
+	  on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_MARVELL
+	tristate "Marvell PATA support via legacy mode"
+	depends on PCI
+	help
+	  This option enables limited support for the Marvell 88SE6145 ATA
+	  controller.
+
+	  If unsure, say N.
+
+config PATA_MPC52xx
+	tristate "Freescale MPC52xx SoC internal IDE"
+	depends on PPC_MPC52xx
+	help
+	  This option enables support for integrated IDE controller
+	  of the Freescale MPC52xx SoC.
+
+	  If unsure, say N.
+
+config PATA_MPIIX
+	tristate "Intel PATA MPIIX support"
+	depends on PCI
+	help
+	  This option enables support for MPIIX PATA support.
+
+	  If unsure, say N.
+
+config PATA_OLDPIIX
+	tristate "Intel PATA old PIIX support"
+	depends on PCI
+	help
+	  This option enables support for early PIIX PATA support.
+
+	  If unsure, say N.
+
+config PATA_NETCELL
+	tristate "NETCELL Revolution RAID support"
+	depends on PCI
+	help
+	  This option enables support for the Netcell Revolution RAID
+	  PATA controller.
+
+	  If unsure, say N.
+
+config PATA_NS87410
+	tristate "Nat Semi NS87410 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the National Semiconductor
+	  NS87410 PCI-IDE controller.
+
+	  If unsure, say N.
+
+config PATA_OPTI
+	tristate "OPTI621/6215 PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables full PIO support for the early Opti ATA
+	  controllers found on some old motherboards.
+
+	  If unsure, say N.
+
+config PATA_OPTIDMA
+	tristate "OPTI FireStar PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables DMA/PIO support for the later OPTi
+	  controllers found on some old motherboards and in some
+	  laptops.
+
+	  If unsure, say N.
+
+config PATA_PCMCIA
+	tristate "PCMCIA PATA support"
+	depends on PCMCIA
+	help
+	  This option enables support for PCMCIA ATA interfaces, including
+	  compact flash card adapters via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_PDC_OLD
+	tristate "Older Promise PATA controller support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the Promise 20246, 20262, 20263,
+	  20265 and 20267 adapters.
+
+	  If unsure, say N.
+
+config PATA_QDI
+	tristate "QDI VLB PATA support"
+	depends on ISA
+	help
+	  Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
+
+config PATA_RADISYS
+	tristate "RADISYS 82600 PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the RADISYS 82600
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_RZ1000
+	tristate "PC Tech RZ1000 PATA support"
+	depends on PCI
+	help
+	  This option enables basic support for the PC Tech RZ1000/1
+	  PATA controllers via the new ATA layer
+
+	  If unsure, say N.
+
+config PATA_SC1200
+	tristate "SC1200 PATA support (Very Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the NatSemi/AMD SC1200 SoC
+	  companion chip used with the Geode processor family.
+
+	  If unsure, say N.
+
+config PATA_SERVERWORKS
+	tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
+	depends on PCI
+	help
+	  This option enables support for the Serverworks OSB4/CSB5/CSB6 and
+	  HT1000 PATA controllers, via the new ATA layer.
+
+	  If unsure, say N.
+
+config PATA_PDC2027X
+	tristate "Promise PATA 2027x support"
+	depends on PCI
+	help
+	  This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
+
+	  If unsure, say N.
+
+config PATA_SIL680
+	tristate "CMD / Silicon Image 680 PATA support"
+	depends on PCI
+	help
+	  This option enables support for CMD / Silicon Image 680 PATA.
+
+	  If unsure, say N.
+
+config PATA_SIS
+	tristate "SiS PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for SiS PATA controllers
+
+	  If unsure, say N.
+
+config PATA_VIA
+	tristate "VIA PATA support"
+	depends on PCI
+	help
+	  This option enables support for the VIA PATA interfaces
+	  found on the many VIA chipsets.
+
+	  If unsure, say N.
+
+config PATA_WINBOND
+	tristate "Winbond SL82C105 PATA support"
+	depends on PCI
+	help
+	  This option enables support for SL82C105 PATA devices found in the
+	  Netwinder and some other systems
+
+	  If unsure, say N.
+
+config PATA_WINBOND_VLB
+	tristate "Winbond W83759A VLB PATA support (Experimental)"
+	depends on ISA && EXPERIMENTAL
+	help
+	  Support for the Winbond W83759A controller on Vesa Local Bus
+	  systems.
+
+config PATA_PLATFORM
+	tristate "Generic platform device PATA support"
+	depends on EMBEDDED || ARCH_RPC
+	help
+	  This option enables support for generic directly connected ATA
+	  devices commonly found on embedded systems.
+
+	  If unsure, say N.
+
+config PATA_ICSIDE
+	tristate "Acorn ICS PATA support"
+	depends on ARM && ARCH_ACORN
+	help
+	  On Acorn systems, say Y here if you wish to use the ICS PATA
+	  interface card.  This is not required for ICS partition support.
+	  If you are unsure, say N to this.
+
+config PATA_IXP4XX_CF
+	tristate "IXP4XX Compact Flash support"
+	depends on ARCH_IXP4XX
+	help
+	  This option enables support for a Compact Flash connected on
+	  the ixp4xx expansion bus. This driver had been written for
+	  Loft/Avila boards in mind but can work with others.
+
+	  If unsure, say N.
+
+config PATA_SCC
+	tristate "Toshiba's Cell Reference Set IDE support"
+	depends on PCI && PPC_CELLEB
+	help
+	  This option enables support for the built-in IDE controller on
+	  Toshiba Cell Reference Board.
+
+	  If unsure, say N.
+
+config ATA_INTEL_COMBINED
 	bool
-	depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
+	depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
 	default y
 
-endmenu
-
+endif # ATA
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata-acpi.c linux-2.6.18.x86_64.p4/drivers/ata/libata-acpi.c
--- linux-2.6.18.x86_64.p3/drivers/ata/libata-acpi.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata-acpi.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,706 @@
+/*
+ * libata-acpi.c
+ * Provides ACPI support for PATA/SATA.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Copyright (C) 2006 Randy Dunlap
+ */
+
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/libata.h>
+#include <linux/pci.h>
+#include "libata.h"
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acnames.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+#include <acpi/acexcep.h>
+#include <acpi/acmacros.h>
+#include <acpi/actypes.h>
+
+#define SATA_ROOT_PORT(x)	(((x) >> 16) & 0xffff)
+#define SATA_PORT_NUMBER(x)	((x) & 0xffff)	/* or NO_PORT_MULT */
+#define NO_PORT_MULT		0xffff
+#define SATA_ADR_RSVD		0xffffffff
+
+#define REGS_PER_GTF		7
+struct taskfile_array {
+	u8	tfa[REGS_PER_GTF];	/* regs. 0x1f1 - 0x1f7 */
+};
+
+/*
+ *	Helper - belongs in the PCI layer somewhere eventually
+ */
+static int is_pci_dev(struct device *dev)
+{
+	return (dev->bus == &pci_bus_type);
+}
+
+/**
+ * sata_get_dev_handle - finds acpi_handle and PCI device.function
+ * @dev: device to locate
+ * @handle: returned acpi_handle for @dev
+ * @pcidevfn: return PCI device.func for @dev
+ *
+ * This function is somewhat SATA-specific.  Or at least the
+ * PATA & SATA versions of this function are different,
+ * so it's not entirely generic code.
+ *
+ * Returns 0 on success, <0 on error.
+ */
+static int sata_get_dev_handle(struct device *dev, acpi_handle *handle,
+					acpi_integer *pcidevfn)
+{
+	struct pci_dev	*pci_dev;
+	acpi_integer	addr;
+
+	if (!is_pci_dev(dev))
+		return -ENODEV;
+
+	pci_dev = to_pci_dev(dev);	/* NOTE: PCI-specific */
+	/* Please refer to the ACPI spec for the syntax of _ADR. */
+	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
+	*pcidevfn = addr;
+	*handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+	if (!*handle)
+		return -ENODEV;
+	return 0;
+}
+
+/**
+ * pata_get_dev_handle - finds acpi_handle and PCI device.function
+ * @dev: device to locate
+ * @handle: returned acpi_handle for @dev
+ * @pcidevfn: return PCI device.func for @dev
+ *
+ * The PATA and SATA versions of this function are different.
+ *
+ * Returns 0 on success, <0 on error.
+ */
+static int pata_get_dev_handle(struct device *dev, acpi_handle *handle,
+				acpi_integer *pcidevfn)
+{
+	unsigned int bus, devnum, func;
+	acpi_integer addr;
+	acpi_handle dev_handle, parent_handle;
+	struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER,
+					.pointer = NULL};
+	acpi_status status;
+	struct acpi_device_info	*dinfo = NULL;
+	int ret = -ENODEV;
+	struct pci_dev *pdev;
+
+	if (!is_pci_dev(dev))
+		return -ENODEV;
+
+	pdev = to_pci_dev(dev);
+
+	bus = pdev->bus->number;
+	devnum = PCI_SLOT(pdev->devfn);
+	func = PCI_FUNC(pdev->devfn);
+
+	dev_handle = DEVICE_ACPI_HANDLE(dev);
+	parent_handle = DEVICE_ACPI_HANDLE(dev->parent);
+
+	status = acpi_get_object_info(parent_handle, &buffer);
+	if (ACPI_FAILURE(status))
+		goto err;
+
+	dinfo = buffer.pointer;
+	if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
+	    dinfo->address == bus) {
+		/* ACPI spec for _ADR for PCI bus: */
+		addr = (acpi_integer)(devnum << 16 | func);
+		*pcidevfn = addr;
+		*handle = dev_handle;
+	} else {
+		goto err;
+	}
+
+	if (!*handle)
+		goto err;
+	ret = 0;
+err:
+	kfree(dinfo);
+	return ret;
+}
+
+struct walk_info {		/* can be trimmed some */
+	struct device	*dev;
+	struct acpi_device *adev;
+	acpi_handle	handle;
+	acpi_integer	pcidevfn;
+	unsigned int	drivenum;
+	acpi_handle	obj_handle;
+	struct ata_port *ataport;
+	struct ata_device *atadev;
+	u32		sata_adr;
+	int		status;
+	char		basepath[ACPI_PATHNAME_MAX];
+	int		basepath_len;
+};
+
+static acpi_status get_devices(acpi_handle handle,
+				u32 level, void *context, void **return_value)
+{
+	acpi_status		status;
+	struct walk_info	*winfo = context;
+	struct acpi_buffer	namebuf = {ACPI_ALLOCATE_BUFFER, NULL};
+	char			*pathname;
+	struct acpi_buffer	buffer;
+	struct acpi_device_info	*dinfo;
+
+	status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &namebuf);
+	if (status)
+		goto ret;
+	pathname = namebuf.pointer;
+
+	buffer.length = ACPI_ALLOCATE_BUFFER;
+	buffer.pointer = NULL;
+	status = acpi_get_object_info(handle, &buffer);
+	if (ACPI_FAILURE(status))
+		goto out2;
+
+	dinfo = buffer.pointer;
+
+	/* find full device path name for pcidevfn */
+	if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
+	    dinfo->address == winfo->pcidevfn) {
+		if (ata_msg_probe(winfo->ataport))
+			ata_dev_printk(winfo->atadev, KERN_DEBUG,
+				":%s: matches pcidevfn (0x%llx)\n",
+				pathname, winfo->pcidevfn);
+		strlcpy(winfo->basepath, pathname,
+			sizeof(winfo->basepath));
+		winfo->basepath_len = strlen(pathname);
+		goto out;
+	}
+
+	/* if basepath is not yet known, ignore this object */
+	if (!winfo->basepath_len)
+		goto out;
+
+	/* if this object is in scope of basepath, maybe use it */
+	if (strncmp(pathname, winfo->basepath,
+	    winfo->basepath_len) == 0) {
+		if (!(dinfo->valid & ACPI_VALID_ADR))
+			goto out;
+		if (ata_msg_probe(winfo->ataport))
+			ata_dev_printk(winfo->atadev, KERN_DEBUG,
+				"GOT ONE: (%s) root_port = 0x%llx,"
+				" port_num = 0x%llx\n", pathname,
+				SATA_ROOT_PORT(dinfo->address),
+				SATA_PORT_NUMBER(dinfo->address));
+		/* heuristics: */
+		if (SATA_PORT_NUMBER(dinfo->address) != NO_PORT_MULT)
+			if (ata_msg_probe(winfo->ataport))
+				ata_dev_printk(winfo->atadev,
+					KERN_DEBUG, "warning: don't"
+					" know how to handle SATA port"
+					" multiplier\n");
+		if (SATA_ROOT_PORT(dinfo->address) ==
+			winfo->ataport->port_no &&
+		    SATA_PORT_NUMBER(dinfo->address) == NO_PORT_MULT) {
+			if (ata_msg_probe(winfo->ataport))
+				ata_dev_printk(winfo->atadev,
+					KERN_DEBUG,
+					"THIS ^^^^^ is the requested"
+					" SATA drive (handle = 0x%p)\n",
+					handle);
+			winfo->sata_adr = dinfo->address;
+			winfo->obj_handle = handle;
+		}
+	}
+out:
+	kfree(dinfo);
+out2:
+	kfree(pathname);
+
+ret:
+	return status;
+}
+
+/* Get the SATA drive _ADR object. */
+static int get_sata_adr(struct device *dev, acpi_handle handle,
+			acpi_integer pcidevfn, unsigned int drive,
+			struct ata_port *ap,
+			struct ata_device *atadev, u32 *dev_adr)
+{
+	acpi_status	status;
+	struct walk_info *winfo;
+	int		err = -ENOMEM;
+
+	winfo = kzalloc(sizeof(struct walk_info), GFP_KERNEL);
+	if (!winfo)
+		goto out;
+
+	winfo->dev = dev;
+	winfo->atadev = atadev;
+	winfo->ataport = ap;
+	if (acpi_bus_get_device(handle, &winfo->adev) < 0)
+		if (ata_msg_probe(ap))
+			ata_dev_printk(winfo->atadev, KERN_DEBUG,
+				"acpi_bus_get_device failed\n");
+	winfo->handle = handle;
+	winfo->pcidevfn = pcidevfn;
+	winfo->drivenum = drive;
+
+	status = acpi_get_devices(NULL, get_devices, winfo, NULL);
+	if (ACPI_FAILURE(status)) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(winfo->atadev, KERN_DEBUG,
+				"%s: acpi_get_devices failed\n",
+				__FUNCTION__);
+		err = -ENODEV;
+	} else {
+		*dev_adr = winfo->sata_adr;
+		atadev->obj_handle = winfo->obj_handle;
+		err = 0;
+	}
+	kfree(winfo);
+out:
+	return err;
+}
+
+/**
+ * do_drive_get_GTF - get the drive bootup default taskfile settings
+ * @dev: target ATA device
+ * @gtf_length: number of bytes of _GTF data returned at @gtf_address
+ * @gtf_address: buffer containing _GTF taskfile arrays
+ *
+ * This applies to both PATA and SATA drives.
+ *
+ * The _GTF method has no input parameters.
+ * It returns a variable number of register set values (registers
+ * hex 1F1..1F7, taskfiles).
+ * The <variable number> is not known in advance, so have ACPI-CA
+ * allocate the buffer as needed and return it, then free it later.
+ *
+ * The returned @gtf_length and @gtf_address are only valid if the
+ * function return value is 0.
+ */
+static int do_drive_get_GTF(struct ata_device *dev, unsigned int *gtf_length,
+			    unsigned long *gtf_address, unsigned long *obj_loc)
+{
+	struct ata_port *ap = dev->ap;
+	acpi_status status;
+	acpi_handle dev_handle = NULL;
+	acpi_handle chan_handle, drive_handle;
+	acpi_integer pcidevfn = 0;
+	u32 dev_adr;
+	struct acpi_buffer output;
+	union acpi_object *out_obj;
+	struct device *gdev = ap->host->dev;
+	int err = -ENODEV;
+
+	*gtf_length = 0;
+	*gtf_address = 0UL;
+	*obj_loc = 0UL;
+
+	if (libata_noacpi)
+		return 0;
+
+	if (ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
+			       __FUNCTION__, ap->port_no);
+
+	if (!ata_dev_enabled(dev) || (ap->flags & ATA_FLAG_DISABLED)) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG, "%s: ERR: "
+				"ata_dev_present: %d, PORT_DISABLED: %lu\n",
+				__FUNCTION__, ata_dev_enabled(dev),
+				ap->flags & ATA_FLAG_DISABLED);
+		goto out;
+	}
+
+	/* Don't continue if device has no _ADR method.
+	 * _GTF is intended for known motherboard devices. */
+	if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
+		err = pata_get_dev_handle(gdev, &dev_handle, &pcidevfn);
+		if (err < 0) {
+			if (ata_msg_probe(ap))
+				ata_dev_printk(dev, KERN_DEBUG,
+					"%s: pata_get_dev_handle failed (%d)\n",
+					__FUNCTION__, err);
+			goto out;
+		}
+	} else {
+		err = sata_get_dev_handle(gdev, &dev_handle, &pcidevfn);
+		if (err < 0) {
+			if (ata_msg_probe(ap))
+				ata_dev_printk(dev, KERN_DEBUG,
+					"%s: sata_get_dev_handle failed (%d\n",
+					__FUNCTION__, err);
+			goto out;
+		}
+	}
+
+	/* Get this drive's _ADR info. if not already known. */
+	if (!dev->obj_handle) {
+		if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
+			/* get child objects of dev_handle == channel objects,
+	 		 * + _their_ children == drive objects */
+			/* channel is ap->port_no */
+			chan_handle = acpi_get_child(dev_handle,
+						ap->port_no);
+			if (ata_msg_probe(ap))
+				ata_dev_printk(dev, KERN_DEBUG,
+					"%s: chan adr=%d: chan_handle=0x%p\n",
+					__FUNCTION__, ap->port_no,
+					chan_handle);
+			if (!chan_handle) {
+				err = -ENODEV;
+				goto out;
+			}
+			/* TBD: could also check ACPI object VALID bits */
+			drive_handle = acpi_get_child(chan_handle, dev->devno);
+			if (!drive_handle) {
+				err = -ENODEV;
+				goto out;
+			}
+			dev_adr = dev->devno;
+			dev->obj_handle = drive_handle;
+		} else {	/* for SATA mode */
+			dev_adr = SATA_ADR_RSVD;
+			err = get_sata_adr(gdev, dev_handle, pcidevfn, 0,
+					ap, dev, &dev_adr);
+		}
+		if (err < 0 || dev_adr == SATA_ADR_RSVD ||
+		    !dev->obj_handle) {
+			if (ata_msg_probe(ap))
+				ata_dev_printk(dev, KERN_DEBUG,
+					"%s: get_sata/pata_adr failed: "
+					"err=%d, dev_adr=%u, obj_handle=0x%p\n",
+					__FUNCTION__, err, dev_adr,
+					dev->obj_handle);
+			goto out;
+		}
+	}
+
+	/* Setting up output buffer */
+	output.length = ACPI_ALLOCATE_BUFFER;
+	output.pointer = NULL;	/* ACPI-CA sets this; save/free it later */
+
+	/* _GTF has no input parameters */
+	err = -EIO;
+	status = acpi_evaluate_object(dev->obj_handle, "_GTF",
+					NULL, &output);
+	if (ACPI_FAILURE(status)) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG,
+				"%s: Run _GTF error: status = 0x%x\n",
+				__FUNCTION__, status);
+		goto out;
+	}
+
+	if (!output.length || !output.pointer) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
+				"length or ptr is NULL (0x%llx, 0x%p)\n",
+				__FUNCTION__,
+				(unsigned long long)output.length,
+				output.pointer);
+		kfree(output.pointer);
+		goto out;
+	}
+
+	out_obj = output.pointer;
+	if (out_obj->type != ACPI_TYPE_BUFFER) {
+		kfree(output.pointer);
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
+				"error: expected object type of "
+				" ACPI_TYPE_BUFFER, got 0x%x\n",
+				__FUNCTION__, out_obj->type);
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
+	    out_obj->buffer.length % REGS_PER_GTF) {
+		if (ata_msg_drv(ap))
+			ata_dev_printk(dev, KERN_ERR,
+				"%s: unexpected GTF length (%d) or addr (0x%p)\n",
+				__FUNCTION__, out_obj->buffer.length,
+				out_obj->buffer.pointer);
+		err = -ENOENT;
+		goto out;
+	}
+
+	*gtf_length = out_obj->buffer.length;
+	*gtf_address = (unsigned long)out_obj->buffer.pointer;
+	*obj_loc = (unsigned long)out_obj;
+	if (ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_DEBUG, "%s: returning "
+			"gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
+			__FUNCTION__, *gtf_length, *gtf_address, *obj_loc);
+	err = 0;
+out:
+	return err;
+}
+
+/**
+ * taskfile_load_raw - send taskfile registers to host controller
+ * @dev: target ATA device
+ * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
+ *
+ * Outputs ATA taskfile to standard ATA host controller using MMIO
+ * or PIO as indicated by the ATA_FLAG_MMIO flag.
+ * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
+ * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
+ * hob_lbal, hob_lbam, and hob_lbah.
+ *
+ * This function waits for idle (!BUSY and !DRQ) after writing
+ * registers.  If the control register has a new value, this
+ * function also waits for idle after writing control and before
+ * writing the remaining registers.
+ *
+ * LOCKING: TBD:
+ * Inherited from caller.
+ */
+static void taskfile_load_raw(struct ata_device *dev,
+			      const struct taskfile_array *gtf)
+{
+	struct ata_port *ap = dev->ap;
+	struct ata_taskfile tf;
+	unsigned int err;
+
+	if (ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_DEBUG, "%s: (0x1f1-1f7): hex: "
+			"%02x %02x %02x %02x %02x %02x %02x\n",
+			__FUNCTION__,
+			gtf->tfa[0], gtf->tfa[1], gtf->tfa[2],
+			gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
+
+	if ((gtf->tfa[0] == 0) && (gtf->tfa[1] == 0) && (gtf->tfa[2] == 0)
+	    && (gtf->tfa[3] == 0) && (gtf->tfa[4] == 0) && (gtf->tfa[5] == 0)
+	    && (gtf->tfa[6] == 0))
+		return;
+
+	ata_tf_init(dev, &tf);
+
+	/* convert gtf to tf */
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
+	tf.protocol = ATA_PROT_NODATA;
+	tf.feature = gtf->tfa[0];	/* 0x1f1 */
+	tf.nsect   = gtf->tfa[1];	/* 0x1f2 */
+	tf.lbal    = gtf->tfa[2];	/* 0x1f3 */
+	tf.lbam    = gtf->tfa[3];	/* 0x1f4 */
+	tf.lbah    = gtf->tfa[4];	/* 0x1f5 */
+	tf.device  = gtf->tfa[5];	/* 0x1f6 */
+	tf.command = gtf->tfa[6];	/* 0x1f7 */
+
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	if (err && ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_ERR,
+			"%s: ata_exec_internal failed: %u\n",
+			__FUNCTION__, err);
+}
+
+/**
+ * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
+ * @dev: target ATA device
+ * @gtf_length: total number of bytes of _GTF taskfiles
+ * @gtf_address: location of _GTF taskfile arrays
+ *
+ * This applies to both PATA and SATA drives.
+ *
+ * Write {gtf_address, length gtf_length} in groups of
+ * REGS_PER_GTF bytes.
+ */
+static int do_drive_set_taskfiles(struct ata_device *dev,
+				  unsigned int gtf_length,
+				  unsigned long gtf_address)
+{
+	struct ata_port *ap = dev->ap;
+	int err = -ENODEV;
+	int gtf_count = gtf_length / REGS_PER_GTF;
+	int ix;
+	struct taskfile_array	*gtf;
+
+	if (ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
+			       __FUNCTION__, ap->port_no);
+
+	if (libata_noacpi || !(ap->flags & ATA_FLAG_ACPI_SATA))
+		return 0;
+
+	if (!ata_dev_enabled(dev) || (ap->flags & ATA_FLAG_DISABLED))
+		goto out;
+	if (!gtf_count)		/* shouldn't be here */
+		goto out;
+
+	if (gtf_length % REGS_PER_GTF) {
+		if (ata_msg_drv(ap))
+			ata_dev_printk(dev, KERN_ERR,
+				"%s: unexpected GTF length (%d)\n",
+				__FUNCTION__, gtf_length);
+		goto out;
+	}
+
+	for (ix = 0; ix < gtf_count; ix++) {
+		gtf = (struct taskfile_array *)
+			(gtf_address + ix * REGS_PER_GTF);
+
+		/* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */
+		taskfile_load_raw(dev, gtf);
+	}
+
+	err = 0;
+out:
+	return err;
+}
+
+/**
+ * ata_acpi_exec_tfs - get then write drive taskfile settings
+ * @ap: the ata_port for the drive
+ *
+ * This applies to both PATA and SATA drives.
+ */
+int ata_acpi_exec_tfs(struct ata_port *ap)
+{
+	int ix;
+	int ret = 0;
+	unsigned int gtf_length;
+	unsigned long gtf_address;
+	unsigned long obj_loc;
+
+	if (libata_noacpi)
+		return 0;
+	/*
+	 * TBD - implement PATA support.  For now,
+	 * we should not run GTF on PATA devices since some
+	 * PATA require execution of GTM/STM before GTF.
+	 */
+	if (!(ap->flags & ATA_FLAG_ACPI_SATA))
+		return 0;
+
+	for (ix = 0; ix < ATA_MAX_DEVICES; ix++) {
+		struct ata_device *dev = &ap->device[ix];
+
+		if (!ata_dev_enabled(dev))
+			continue;
+
+		ret = do_drive_get_GTF(dev, &gtf_length, &gtf_address,
+				       &obj_loc);
+		if (ret < 0) {
+			if (ata_msg_probe(ap))
+				ata_port_printk(ap, KERN_DEBUG,
+					"%s: get_GTF error (%d)\n",
+					__FUNCTION__, ret);
+			break;
+		}
+
+		ret = do_drive_set_taskfiles(dev, gtf_length, gtf_address);
+		kfree((void *)obj_loc);
+		if (ret < 0) {
+			if (ata_msg_probe(ap))
+				ata_port_printk(ap, KERN_DEBUG,
+					"%s: set_taskfiles error (%d)\n",
+					__FUNCTION__, ret);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * ata_acpi_push_id - send Identify data to drive
+ * @dev: target ATA device
+ *
+ * _SDD ACPI object: for SATA mode only
+ * Must be after Identify (Packet) Device -- uses its data
+ * ATM this function never returns a failure.  It is an optional
+ * method and if it fails for whatever reason, we should still
+ * just keep going.
+ */
+int ata_acpi_push_id(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->ap;
+	acpi_handle handle;
+	acpi_integer pcidevfn;
+	int err;
+	struct device *gdev = ap->host->dev;
+	u32 dev_adr;
+	acpi_status status;
+	struct acpi_object_list input;
+	union acpi_object in_params[1];
+
+	if (libata_noacpi)
+		return 0;
+
+	if (ata_msg_probe(ap))
+		ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
+			       __FUNCTION__, dev->devno, ap->port_no);
+
+	/* Don't continue if not a SATA device. */
+	if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG,
+				"%s: Not a SATA device\n", __FUNCTION__);
+		goto out;
+	}
+
+	/* Don't continue if device has no _ADR method.
+	 * _SDD is intended for known motherboard devices. */
+	err = sata_get_dev_handle(gdev, &handle, &pcidevfn);
+	if (err < 0) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG,
+				"%s: sata_get_dev_handle failed (%d\n",
+				__FUNCTION__, err);
+		goto out;
+	}
+
+	/* Get this drive's _ADR info, if not already known */
+	if (!dev->obj_handle) {
+		dev_adr = SATA_ADR_RSVD;
+		err = get_sata_adr(gdev, handle, pcidevfn, dev->devno, ap, dev,
+					&dev_adr);
+		if (err < 0 || dev_adr == SATA_ADR_RSVD ||
+			!dev->obj_handle) {
+			if (ata_msg_probe(ap))
+				ata_dev_printk(dev, KERN_DEBUG,
+					"%s: get_sata_adr failed: "
+					"err=%d, dev_adr=%u, obj_handle=0x%p\n",
+					__FUNCTION__, err, dev_adr,
+					dev->obj_handle);
+			goto out;
+		}
+	}
+
+	/* Give the drive Identify data to the drive via the _SDD method */
+	/* _SDD: set up input parameters */
+	input.count = 1;
+	input.pointer = in_params;
+	in_params[0].type = ACPI_TYPE_BUFFER;
+	in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS;
+	in_params[0].buffer.pointer = (u8 *)dev->id;
+	/* Output buffer: _SDD has no output */
+
+	/* It's OK for _SDD to be missing too. */
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+	status = acpi_evaluate_object(dev->obj_handle, "_SDD", &input, NULL);
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	err = ACPI_FAILURE(status) ? -EIO : 0;
+	if (err < 0) {
+		if (ata_msg_probe(ap))
+			ata_dev_printk(dev, KERN_DEBUG,
+				       "%s _SDD error: status = 0x%x\n",
+				       __FUNCTION__, status);
+	}
+
+	/* always return success */
+out:
+	return 0;
+}
+
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata-core.c linux-2.6.18.x86_64.p4/drivers/ata/libata-core.c
--- linux-2.6.18.x86_64.p3/drivers/ata/libata-core.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata-core.c	2007-06-06 10:08:00.000000000 -0400
@@ -50,7 +50,6 @@
 #include <linux/jiffies.h>
 #include <linux/scatterlist.h>
 #include <scsi/scsi.h>
-#include "../scsi/scsi_priv.h"
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
@@ -60,6 +59,9 @@
 
 #include "libata.h"
 
+#define DRV_VERSION	"2.21"	/* must be exactly four chars */
+
+
 /* debounce timing parameters in msecs { interval, duration, timeout } */
 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
@@ -70,7 +72,7 @@
 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
 static void ata_dev_xfermask(struct ata_device *dev);
 
-static unsigned int ata_unique_id = 1;
+unsigned int ata_print_id = 1;
 static struct workqueue_struct *ata_wq;
 
 struct workqueue_struct *ata_aux_wq;
@@ -87,10 +89,18 @@
 module_param_named(fua, libata_fua, int, 0444);
 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
 
+static int ata_ignore_hpa = 0;
+module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
+MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
+
 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
 module_param(ata_probe_timeout, int, 0444);
 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 
+int libata_noacpi = 1;
+module_param_named(noacpi, libata_noacpi, int, 0444);
+MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
+
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("Library module for ATA devices");
 MODULE_LICENSE("GPL");
@@ -200,7 +210,8 @@
 
 /**
  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
- *	@qc: command to examine and configure
+ *	@tf: command to examine and configure
+ *	@dev: device tf belongs to
  *
  *	Examine the device configuration and tf->flags to calculate
  *	the proper read/write commands and protocol to use.
@@ -208,10 +219,8 @@
  *	LOCKING:
  *	caller.
  */
-int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 {
-	struct ata_taskfile *tf = &qc->tf;
-	struct ata_device *dev = qc->dev;
 	u8 cmd;
 
 	int index, fua, lba48, write;
@@ -223,7 +232,7 @@
 	if (dev->flags & ATA_DFLAG_PIO) {
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
-	} else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
 		/* Unable to use DMA due to host limitation */
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
@@ -241,6 +250,172 @@
 }
 
 /**
+ *	ata_tf_read_block - Read block address from ATA taskfile
+ *	@tf: ATA taskfile of interest
+ *	@dev: ATA device @tf belongs to
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Read block address from @tf.  This function can handle all
+ *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
+ *	flags select the address format to use.
+ *
+ *	RETURNS:
+ *	Block address read from @tf.
+ */
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+{
+	u64 block = 0;
+
+	if (tf->flags & ATA_TFLAG_LBA) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			block |= (u64)tf->hob_lbah << 40;
+			block |= (u64)tf->hob_lbam << 32;
+			block |= tf->hob_lbal << 24;
+		} else
+			block |= (tf->device & 0xf) << 24;
+
+		block |= tf->lbah << 16;
+		block |= tf->lbam << 8;
+		block |= tf->lbal;
+	} else {
+		u32 cyl, head, sect;
+
+		cyl = tf->lbam | (tf->lbah << 8);
+		head = tf->device & 0xf;
+		sect = tf->lbal;
+
+		block = (cyl * dev->heads + head) * dev->sectors + sect;
+	}
+
+	return block;
+}
+
+/**
+ *	ata_build_rw_tf - Build ATA taskfile for given read/write request
+ *	@tf: Target ATA taskfile
+ *	@dev: ATA device @tf belongs to
+ *	@block: Block address
+ *	@n_block: Number of blocks
+ *	@tf_flags: RW/FUA etc...
+ *	@tag: tag
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	Build ATA taskfile @tf for read/write request described by
+ *	@block, @n_block, @tf_flags and @tag on @dev.
+ *
+ *	RETURNS:
+ *
+ *	0 on success, -ERANGE if the request is too large for @dev,
+ *	-EINVAL if the request is invalid.
+ */
+int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+		    u64 block, u32 n_block, unsigned int tf_flags,
+		    unsigned int tag)
+{
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->flags |= tf_flags;
+
+	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
+		/* yay, NCQ */
+		if (!lba_48_ok(block, n_block))
+			return -ERANGE;
+
+		tf->protocol = ATA_PROT_NCQ;
+		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+
+		if (tf->flags & ATA_TFLAG_WRITE)
+			tf->command = ATA_CMD_FPDMA_WRITE;
+		else
+			tf->command = ATA_CMD_FPDMA_READ;
+
+		tf->nsect = tag << 3;
+		tf->hob_feature = (n_block >> 8) & 0xff;
+		tf->feature = n_block & 0xff;
+
+		tf->hob_lbah = (block >> 40) & 0xff;
+		tf->hob_lbam = (block >> 32) & 0xff;
+		tf->hob_lbal = (block >> 24) & 0xff;
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device = 1 << 6;
+		if (tf->flags & ATA_TFLAG_FUA)
+			tf->device |= 1 << 7;
+	} else if (dev->flags & ATA_DFLAG_LBA) {
+		tf->flags |= ATA_TFLAG_LBA;
+
+		if (lba_28_ok(block, n_block)) {
+			/* use LBA28 */
+			tf->device |= (block >> 24) & 0xf;
+		} else if (lba_48_ok(block, n_block)) {
+			if (!(dev->flags & ATA_DFLAG_LBA48))
+				return -ERANGE;
+
+			/* use LBA48 */
+			tf->flags |= ATA_TFLAG_LBA48;
+
+			tf->hob_nsect = (n_block >> 8) & 0xff;
+
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else
+			/* request too large even for LBA48 */
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		tf->nsect = n_block & 0xff;
+
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device |= ATA_LBA;
+	} else {
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		/* The request -may- be too large for CHS addressing. */
+		if (!lba_28_ok(block, n_block))
+			return -ERANGE;
+
+		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+			return -EINVAL;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+
+		/* Check whether the converted CHS can fit.
+		   Cylinder: 0-65535
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+			return -ERANGE;
+
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
+	}
+
+	return 0;
+}
+
+/**
  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
  *	@pio_mask: pio_mask
  *	@mwdma_mask: mwdma_mask
@@ -387,9 +562,13 @@
 		"PIO2",
 		"PIO3",
 		"PIO4",
+		"PIO5",
+		"PIO6",
 		"MWDMA0",
 		"MWDMA1",
 		"MWDMA2",
+		"MWDMA3",
+		"MWDMA4",
 		"UDMA/16",
 		"UDMA/25",
 		"UDMA/33",
@@ -423,56 +602,14 @@
 {
 	if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
+					     ATA_DNXFER_QUIET);
 		dev->class++;
 	}
 }
 
 /**
- *	ata_pio_devchk - PATA device presence detection
- *	@ap: ATA channel to examine
- *	@device: Device to examine (starting at zero)
- *
- *	This technique was originally described in
- *	Hale Landis's ATADRVR (www.ata-atapi.com), and
- *	later found its way into the ATA/ATAPI spec.
- *
- *	Write a pattern to the ATA shadow registers,
- *	and if a device is present, it will respond by
- *	correctly storing and echoing back the
- *	ATA shadow register contents.
- *
- *	LOCKING:
- *	caller.
- */
-
-static unsigned int ata_pio_devchk(struct ata_port *ap,
-				   unsigned int device)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	u8 nsect, lbal;
-
-	ap->ops->dev_select(ap, device);
-
-	outb(0x55, ioaddr->nsect_addr);
-	outb(0xaa, ioaddr->lbal_addr);
-
-	outb(0xaa, ioaddr->nsect_addr);
-	outb(0x55, ioaddr->lbal_addr);
-
-	outb(0x55, ioaddr->nsect_addr);
-	outb(0xaa, ioaddr->lbal_addr);
-
-	nsect = inb(ioaddr->nsect_addr);
-	lbal = inb(ioaddr->lbal_addr);
-
-	if ((nsect == 0x55) && (lbal == 0xaa))
-		return 1;	/* we found a device */
-
-	return 0;		/* nothing found */
-}
-
-/**
- *	ata_mmio_devchk - PATA device presence detection
+ *	ata_devchk - PATA device presence detection
  *	@ap: ATA channel to examine
  *	@device: Device to examine (starting at zero)
  *
@@ -489,25 +626,24 @@
  *	caller.
  */
 
-static unsigned int ata_mmio_devchk(struct ata_port *ap,
-				    unsigned int device)
+static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	u8 nsect, lbal;
 
 	ap->ops->dev_select(ap, device);
 
-	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
-	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
 
-	writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
-	writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
+	iowrite8(0xaa, ioaddr->nsect_addr);
+	iowrite8(0x55, ioaddr->lbal_addr);
 
-	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
-	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+	iowrite8(0x55, ioaddr->nsect_addr);
+	iowrite8(0xaa, ioaddr->lbal_addr);
 
-	nsect = readb((void __iomem *) ioaddr->nsect_addr);
-	lbal = readb((void __iomem *) ioaddr->lbal_addr);
+	nsect = ioread8(ioaddr->nsect_addr);
+	lbal = ioread8(ioaddr->lbal_addr);
 
 	if ((nsect == 0x55) && (lbal == 0xaa))
 		return 1;	/* we found a device */
@@ -516,27 +652,6 @@
 }
 
 /**
- *	ata_devchk - PATA device presence detection
- *	@ap: ATA channel to examine
- *	@device: Device to examine (starting at zero)
- *
- *	Dispatch ATA device presence detection, depending
- *	on whether we are using PIO or MMIO to talk to the
- *	ATA shadow registers.
- *
- *	LOCKING:
- *	caller.
- */
-
-static unsigned int ata_devchk(struct ata_port *ap,
-				    unsigned int device)
-{
-	if (ap->flags & ATA_FLAG_MMIO)
-		return ata_mmio_devchk(ap, device);
-	return ata_pio_devchk(ap, device);
-}
-
-/**
  *	ata_dev_classify - determine device type based on ATA-spec signature
  *	@tf: ATA taskfile register set for device to be identified
  *
@@ -597,7 +712,7 @@
  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
  */
 
-static unsigned int
+unsigned int
 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
 {
 	struct ata_taskfile tf;
@@ -613,8 +728,11 @@
 	if (r_err)
 		*r_err = err;
 
-	/* see if device passed diags */
-	if (err == 1)
+	/* see if device passed diags: if master then continue and warn later */
+	if (err == 0 && device == 0)
+		/* diagnostic fail : do nothing _YET_ */
+		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
+	else if (err == 1)
 		/* do nothing */ ;
 	else if ((device == 0) && (err == 0x81))
 		/* do nothing */ ;
@@ -694,6 +812,207 @@
 	*p = '\0';
 }
 
+static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
+{
+	u64 sectors = 0;
+
+	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
+	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
+	sectors |= (tf->hob_lbal & 0xff) << 24;
+	sectors |= (tf->lbah & 0xff) << 16;
+	sectors |= (tf->lbam & 0xff) << 8;
+	sectors |= (tf->lbal & 0xff);
+
+	return ++sectors;
+}
+
+static u64 ata_tf_to_lba(struct ata_taskfile *tf)
+{
+	u64 sectors = 0;
+
+	sectors |= (tf->device & 0x0f) << 24;
+	sectors |= (tf->lbah & 0xff) << 16;
+	sectors |= (tf->lbam & 0xff) << 8;
+	sectors |= (tf->lbal & 0xff);
+
+	return ++sectors;
+}
+
+/**
+ *	ata_read_native_max_address_ext	-	LBA48 native max query
+ *	@dev: Device to query
+ *
+ *	Perform an LBA48 size query upon the device in question. Return the
+ *	actual LBA48 size or zero if the command fails.
+ */
+
+static u64 ata_read_native_max_address_ext(struct ata_device *dev)
+{
+	unsigned int err;
+	struct ata_taskfile tf;
+
+	ata_tf_init(dev, &tf);
+
+	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
+	tf.protocol |= ATA_PROT_NODATA;
+	tf.device |= 0x40;
+
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	if (err)
+		return 0;
+
+	return ata_tf_to_lba48(&tf);
+}
+
+/**
+ *	ata_read_native_max_address	-	LBA28 native max query
+ *	@dev: Device to query
+ *
+ *	Performa an LBA28 size query upon the device in question. Return the
+ *	actual LBA28 size or zero if the command fails.
+ */
+
+static u64 ata_read_native_max_address(struct ata_device *dev)
+{
+	unsigned int err;
+	struct ata_taskfile tf;
+
+	ata_tf_init(dev, &tf);
+
+	tf.command = ATA_CMD_READ_NATIVE_MAX;
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+	tf.protocol |= ATA_PROT_NODATA;
+	tf.device |= 0x40;
+
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	if (err)
+		return 0;
+
+	return ata_tf_to_lba(&tf);
+}
+
+/**
+ *	ata_set_native_max_address_ext	-	LBA48 native max set
+ *	@dev: Device to query
+ *	@new_sectors: new max sectors value to set for the device
+ *
+ *	Perform an LBA48 size set max upon the device in question. Return the
+ *	actual LBA48 size or zero if the command fails.
+ */
+
+static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
+{
+	unsigned int err;
+	struct ata_taskfile tf;
+
+	new_sectors--;
+
+	ata_tf_init(dev, &tf);
+
+	tf.command = ATA_CMD_SET_MAX_EXT;
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
+	tf.protocol |= ATA_PROT_NODATA;
+	tf.device |= 0x40;
+
+	tf.lbal = (new_sectors >> 0) & 0xff;
+	tf.lbam = (new_sectors >> 8) & 0xff;
+	tf.lbah = (new_sectors >> 16) & 0xff;
+
+	tf.hob_lbal = (new_sectors >> 24) & 0xff;
+	tf.hob_lbam = (new_sectors >> 32) & 0xff;
+	tf.hob_lbah = (new_sectors >> 40) & 0xff;
+
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	if (err)
+		return 0;
+
+	return ata_tf_to_lba48(&tf);
+}
+
+/**
+ *	ata_set_native_max_address	-	LBA28 native max set
+ *	@dev: Device to query
+ *	@new_sectors: new max sectors value to set for the device
+ *
+ *	Perform an LBA28 size set max upon the device in question. Return the
+ *	actual LBA28 size or zero if the command fails.
+ */
+
+static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
+{
+	unsigned int err;
+	struct ata_taskfile tf;
+
+	new_sectors--;
+
+	ata_tf_init(dev, &tf);
+
+	tf.command = ATA_CMD_SET_MAX;
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+	tf.protocol |= ATA_PROT_NODATA;
+
+	tf.lbal = (new_sectors >> 0) & 0xff;
+	tf.lbam = (new_sectors >> 8) & 0xff;
+	tf.lbah = (new_sectors >> 16) & 0xff;
+	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
+
+	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	if (err)
+		return 0;
+
+	return ata_tf_to_lba(&tf);
+}
+
+/**
+ *	ata_hpa_resize		-	Resize a device with an HPA set
+ *	@dev: Device to resize
+ *
+ *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
+ *	it if required to the full size of the media. The caller must check
+ *	the drive has the HPA feature set enabled.
+ */
+
+static u64 ata_hpa_resize(struct ata_device *dev)
+{
+	u64 sectors = dev->n_sectors;
+	u64 hpa_sectors;
+
+	if (ata_id_has_lba48(dev->id))
+		hpa_sectors = ata_read_native_max_address_ext(dev);
+	else
+		hpa_sectors = ata_read_native_max_address(dev);
+
+	/* if no hpa, both should be equal */
+	ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
+				"hpa_sectors = %lld\n",
+		__FUNCTION__, (long long)sectors, (long long)hpa_sectors);
+
+	if (hpa_sectors > sectors) {
+		ata_dev_printk(dev, KERN_INFO,
+			"Host Protected Area detected:\n"
+			"\tcurrent size: %lld sectors\n"
+			"\tnative size: %lld sectors\n",
+			(long long)sectors, (long long)hpa_sectors);
+
+		if (ata_ignore_hpa) {
+			if (ata_id_has_lba48(dev->id))
+				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
+			else
+				hpa_sectors = ata_set_native_max_address(dev,
+								hpa_sectors);
+
+			if (hpa_sectors) {
+				ata_dev_printk(dev, KERN_INFO, "native size "
+					"increased to %lld sectors\n",
+					(long long)hpa_sectors);
+				return hpa_sectors;
+			}
+		}
+	}
+	return sectors;
+}
+
 static u64 ata_id_n_sectors(const u16 *id)
 {
 	if (ata_id_has_lba(id)) {
@@ -710,6 +1029,48 @@
 }
 
 /**
+ *	ata_id_to_dma_mode	-	Identify DMA mode from id block
+ *	@dev: device to identify
+ *	@unknown: mode to assume if we cannot tell
+ *
+ *	Set up the timing values for the device based upon the identify
+ *	reported values for the DMA mode. This function is used by drivers
+ *	which rely upon firmware configured modes, but wish to report the
+ *	mode correctly when possible.
+ *
+ *	In addition we emit similarly formatted messages to the default
+ *	ata_dev_set_mode handler, in order to provide consistency of
+ *	presentation.
+ */
+
+void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
+{
+	unsigned int mask;
+	u8 mode;
+
+	/* Pack the DMA modes */
+	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
+	if (dev->id[53] & 0x04)
+		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
+
+	/* Select the mode in use */
+	mode = ata_xfer_mask2mode(mask);
+
+	if (mode != 0) {
+		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
+		       ata_mode_string(mask));
+	} else {
+		/* SWDMA perhaps ? */
+		mode = unknown;
+		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+	}
+
+	/* Configure the device reporting */
+	dev->xfer_mode = mode;
+	dev->xfer_shift = ata_xfer_mode2shift(mode);
+}
+
+/**
  *	ata_noop_dev_select - Select device 0/1 on ATA bus
  *	@ap: ATA channel to manipulate
  *	@device: ATA device (numbered from zero) to select
@@ -750,11 +1111,7 @@
 	else
 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
 
-	if (ap->flags & ATA_FLAG_MMIO) {
-		writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
-	} else {
-		outb(tmp, ap->ioaddr.device_addr);
-	}
+	iowrite8(tmp, ap->ioaddr.device_addr);
 	ata_pause(ap);		/* needed; also flushes, for mmio */
 }
 
@@ -781,8 +1138,8 @@
 			   unsigned int wait, unsigned int can_sleep)
 {
 	if (ata_msg_probe(ap))
-		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
-				"device %u, wait %u\n", ap->id, device, wait);
+		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
+				"device %u, wait %u\n", device, wait);
 
 	if (wait)
 		ata_wait_idle(ap);
@@ -864,7 +1221,11 @@
 		 * the PIO timing number for the maximum. Turn it into
 		 * a mask.
 		 */
-		pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
+		if (mode < 5)	/* Valid PIO range */
+                	pio_mask = (2 << mode) - 1;
+		else
+			pio_mask = 1;
 
 		/* But wait.. there's more. Design your standards by
 		 * committee and you too can get a free iordy field to
@@ -876,6 +1237,23 @@
 
 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
 
+	if (ata_id_is_cfa(id)) {
+		/*
+		 *	Process compact flash extended modes
+		 */
+		int pio = id[163] & 0x7;
+		int dma = (id[163] >> 3) & 7;
+
+		if (pio)
+			pio_mask |= (1 << 5);
+		if (pio > 1)
+			pio_mask |= (1 << 6);
+		if (dma)
+			mwdma_mask |= (1 << 3);
+		if (dma > 1)
+			mwdma_mask |= (1 << 4);
+	}
+
 	udma_mask = 0;
 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
@@ -887,7 +1265,7 @@
  *	ata_port_queue_task - Queue port_task
  *	@ap: The ata_port to queue port_task for
  *	@fn: workqueue function to be scheduled
- *	@data: data value to pass to workqueue function
+ *	@data: data for @fn to use
  *	@delay: delay time for workqueue function
  *
  *	Schedule @fn(@data) for execution after @delay jiffies using
@@ -902,7 +1280,7 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
 			 unsigned long delay)
 {
 	int rc;
@@ -911,6 +1289,7 @@
 		return;
 
 	PREPARE_WORK(&ap->port_task, fn, data);
+	ap->port_task_data = data;
 
 	if (!delay)
 		rc = queue_work(ata_wq, &ap->port_task);
@@ -964,7 +1343,7 @@
 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
 }
 
-void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
 {
 	struct completion *waiting = qc->private_data;
 
@@ -972,13 +1351,13 @@
 }
 
 /**
- *	ata_exec_internal - execute libata internal command
+ *	ata_exec_internal_sg - execute libata internal command
  *	@dev: Device to which the command is sent
  *	@tf: Taskfile registers for the command and the result
  *	@cdb: CDB for packet command
  *	@dma_dir: Data tranfer direction of the command
- *	@buf: Data buffer of the command
- *	@buflen: Length of data buffer
+ *	@sg: sg list for the data buffer of the command
+ *	@n_elem: Number of sg entries
  *
  *	Executes libata internal command with timeout.  @tf contains
  *	command on entry and result on return.  Timeout and error
@@ -992,9 +1371,10 @@
  *	RETURNS:
  *	Zero on success, AC_ERR_* mask on failure
  */
-unsigned ata_exec_internal(struct ata_device *dev,
-			   struct ata_taskfile *tf, const u8 *cdb,
-			   int dma_dir, void *buf, unsigned int buflen)
+unsigned ata_exec_internal_sg(struct ata_device *dev,
+			      struct ata_taskfile *tf, const u8 *cdb,
+			      int dma_dir, struct scatterlist *sg,
+			      unsigned int n_elem)
 {
 	struct ata_port *ap = dev->ap;
 	u8 command = tf->command;
@@ -1050,8 +1430,13 @@
 	qc->flags |= ATA_QCFLAG_RESULT_TF;
 	qc->dma_dir = dma_dir;
 	if (dma_dir != DMA_NONE) {
-		ata_sg_init_one(qc, buf, buflen);
-		qc->nsect = buflen / ATA_SECT_SIZE;
+		unsigned int i, buflen = 0;
+
+		for (i = 0; i < n_elem; i++)
+			buflen += sg[i].length;
+
+		ata_sg_init(qc, sg, n_elem);
+		qc->nbytes = buflen;
 	}
 
 	qc->private_data = &wait;
@@ -1093,12 +1478,16 @@
 	if (ap->ops->post_internal_cmd)
 		ap->ops->post_internal_cmd(qc);
 
-	if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
-		if (ata_msg_warn(ap))
-			ata_dev_printk(dev, KERN_WARNING,
-				"zero err_mask for failed "
-				"internal command, assuming AC_ERR_OTHER\n");
-		qc->err_mask |= AC_ERR_OTHER;
+	/* perform minimal error analysis */
+	if (qc->flags & ATA_QCFLAG_FAILED) {
+		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+			qc->err_mask |= AC_ERR_DEV;
+
+		if (!qc->err_mask)
+			qc->err_mask |= AC_ERR_OTHER;
+
+		if (qc->err_mask & ~AC_ERR_OTHER)
+			qc->err_mask &= ~AC_ERR_OTHER;
 	}
 
 	/* finish up */
@@ -1134,6 +1523,41 @@
 }
 
 /**
+ *	ata_exec_internal - execute libata internal command
+ *	@dev: Device to which the command is sent
+ *	@tf: Taskfile registers for the command and the result
+ *	@cdb: CDB for packet command
+ *	@dma_dir: Data tranfer direction of the command
+ *	@buf: Data buffer of the command
+ *	@buflen: Length of data buffer
+ *
+ *	Wrapper around ata_exec_internal_sg() which takes simple
+ *	buffer instead of sg list.
+ *
+ *	LOCKING:
+ *	None.  Should be called with kernel context, might sleep.
+ *
+ *	RETURNS:
+ *	Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal(struct ata_device *dev,
+			   struct ata_taskfile *tf, const u8 *cdb,
+			   int dma_dir, void *buf, unsigned int buflen)
+{
+	struct scatterlist *psg = NULL, sg;
+	unsigned int n_elem = 0;
+
+	if (dma_dir != DMA_NONE) {
+		WARN_ON(!buf);
+		sg_init_one(&sg, buf, buflen);
+		psg = &sg;
+		n_elem++;
+	}
+
+	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
+}
+
+/**
  *	ata_do_simple_cmd - execute simple internal command
  *	@dev: Device to which the command is sent
  *	@cmd: Opcode to execute
@@ -1170,34 +1594,48 @@
 
 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
 {
-	int pio;
-	int speed = adev->pio_mode - XFER_PIO_0;
-
-	if (speed < 2)
+	/* Controller doesn't support  IORDY. Probably a pointless check
+	   as the caller should know this */
+	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
 		return 0;
-	if (speed > 2)
+	/* PIO3 and higher it is mandatory */
+	if (adev->pio_mode > XFER_PIO_2)
+		return 1;
+	/* We turn it on when possible */
+	if (ata_id_has_iordy(adev->id))
 		return 1;
+	return 0;
+}
 
-	/* If we have no drive specific rule, then PIO 2 is non IORDY */
+/**
+ *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
+ *	@adev: ATA device
+ *
+ *	Compute the highest mode possible if we are not using iordy. Return
+ *	-1 if no iordy mode is available.
+ */
 
+static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
+{
+	/* If we have no drive specific rule, then PIO 2 is non IORDY */
 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
-		pio = adev->id[ATA_ID_EIDE_PIO];
+		u16 pio = adev->id[ATA_ID_EIDE_PIO];
 		/* Is the speed faster than the drive allows non IORDY ? */
 		if (pio) {
 			/* This is cycle times not frequency - watch the logic! */
 			if (pio > 240)	/* PIO2 is 240nS per cycle */
-				return 1;
-			return 0;
+				return 3 << ATA_SHIFT_PIO;
+			return 7 << ATA_SHIFT_PIO;
 		}
 	}
-	return 0;
+	return 3 << ATA_SHIFT_PIO;
 }
 
 /**
  *	ata_dev_read_id - Read ID data from the specified device
  *	@dev: target device
  *	@p_class: pointer to class of the target device (may be changed)
- *	@post_reset: is this read ID post-reset?
+ *	@flags: ATA_READID_* flags
  *	@id: buffer to read IDENTIFY data into
  *
  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
@@ -1212,21 +1650,20 @@
  *	0 on success, -errno otherwise.
  */
 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
-		    int post_reset, u16 *id)
+		    unsigned int flags, u16 *id)
 {
 	struct ata_port *ap = dev->ap;
 	unsigned int class = *p_class;
 	struct ata_taskfile tf;
 	unsigned int err_mask = 0;
 	const char *reason;
+	int may_fallback = 1, tried_spinup = 0;
 	int rc;
 
 	if (ata_msg_ctl(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
-			       __FUNCTION__, ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
 
 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
-
  retry:
 	ata_tf_init(dev, &tf);
 
@@ -1245,14 +1682,50 @@
 
 	tf.protocol = ATA_PROT_PIO;
 
+	/* Some devices choke if TF registers contain garbage.  Make
+	 * sure those are properly initialized.
+	 */
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+
+	/* Device presence detection is unreliable on some
+	 * controllers.  Always poll IDENTIFY if available.
+	 */
+	tf.flags |= ATA_TFLAG_POLLING;
+
 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
 				     id, sizeof(id[0]) * ATA_ID_WORDS);
 	if (err_mask) {
+		if (err_mask & AC_ERR_NODEV_HINT) {
+			DPRINTK("ata%u.%d: NODEV after polling detection\n",
+				ap->print_id, dev->devno);
+			return -ENOENT;
+		}
+
+		/* Device or controller might have reported the wrong
+		 * device class.  Give a shot at the other IDENTIFY if
+		 * the current one is aborted by the device.
+		 */
+		if (may_fallback &&
+		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+			may_fallback = 0;
+
+			if (class == ATA_DEV_ATA)
+				class = ATA_DEV_ATAPI;
+			else
+				class = ATA_DEV_ATA;
+			goto retry;
+		}
+
 		rc = -EIO;
 		reason = "I/O error";
 		goto err_out;
 	}
 
+	/* Falling back doesn't make sense if ID data was read
+	 * successfully at least once.
+	 */
+	may_fallback = 0;
+
 	swap_buf_le16(id, ATA_ID_WORDS);
 
 	/* sanity check */
@@ -1267,7 +1740,33 @@
 			goto err_out;
 	}
 
-	if (post_reset && class == ATA_DEV_ATA) {
+	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
+		tried_spinup = 1;
+		/*
+		 * Drive powered-up in standby mode, and requires a specific
+		 * SET_FEATURES spin-up subcommand before it will accept
+		 * anything other than the original IDENTIFY command.
+		 */
+		ata_tf_init(dev, &tf);
+		tf.command = ATA_CMD_SET_FEATURES;
+		tf.feature = SETFEATURES_SPINUP;
+		tf.protocol = ATA_PROT_NODATA;
+		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+		if (err_mask) {
+			rc = -EIO;
+			reason = "SPINUP failed";
+			goto err_out;
+		}
+		/*
+		 * If the drive initially returned incomplete IDENTIFY info,
+		 * we now must reissue the IDENTIFY command.
+		 */
+		if (id[2] == 0x37c8)
+			goto retry;
+	}
+
+	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
 		/*
 		 * The exact sequence expected by certain pre-ATA4 drives is:
 		 * SRST RESET
@@ -1287,7 +1786,7 @@
 			/* current CHS translation info (id[53-58]) might be
 			 * changed. reread the identify device info.
 			 */
-			post_reset = 0;
+			flags &= ~ATA_READID_POSTRESET;
 			goto retry;
 		}
 	}
@@ -1318,9 +1817,12 @@
 		desc[0] = '\0';
 		return;
 	}
-
+	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
+		snprintf(desc, desc_sz, "NCQ (not used)");
+		return;
+	}
 	if (ap->flags & ATA_FLAG_NCQ) {
-		hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
+		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
 		dev->flags |= ATA_DFLAG_NCQ;
 	}
 
@@ -1330,23 +1832,9 @@
 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
 }
 
-static void ata_set_port_max_cmd_len(struct ata_port *ap)
-{
-	int i;
-
-	if (ap->host) {
-		ap->host->max_cmd_len = 0;
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			ap->host->max_cmd_len = max_t(unsigned int,
-						      ap->host->max_cmd_len,
-						      ap->device[i].cdb_len);
-	}
-}
-
 /**
  *	ata_dev_configure - Configure the specified ATA/ATAPI device
  *	@dev: Target device to configure
- *	@print_info: Enable device info printout
  *
  *	Configure @dev according to @dev->id.  Generic and low-level
  *	driver specific fixups are also applied.
@@ -1357,23 +1845,35 @@
  *	RETURNS:
  *	0 on success, -errno otherwise
  */
-int ata_dev_configure(struct ata_device *dev, int print_info)
+int ata_dev_configure(struct ata_device *dev)
 {
 	struct ata_port *ap = dev->ap;
+	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
 	const u16 *id = dev->id;
 	unsigned int xfer_mask;
+	char revbuf[7];		/* XYZ-99\0 */
+	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
+	char modelbuf[ATA_ID_PROD_LEN+1];
 	int rc;
 
 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
-		ata_dev_printk(dev, KERN_INFO,
-			       "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
-			       __FUNCTION__, ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
+			       __FUNCTION__);
 		return 0;
 	}
 
 	if (ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
-			       __FUNCTION__, ap->id, dev->devno);
+		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
+
+	/* set _SDD */
+	rc = ata_acpi_push_id(dev);
+	if (rc) {
+		ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
+			rc);
+	}
+
+	/* retrieve and execute the ATA task file of _GTF */
+	ata_acpi_exec_tfs(ap);
 
 	/* print device capabilities */
 	if (ata_msg_probe(ap))
@@ -1405,8 +1905,28 @@
 
 	/* ATA-specific feature tests */
 	if (dev->class == ATA_DEV_ATA) {
+		if (ata_id_is_cfa(id)) {
+			if (id[162] & 1) /* CPRM may make this media unusable */
+				ata_dev_printk(dev, KERN_WARNING,
+					       "supports DRM functions and may "
+					       "not be fully accessable.\n");
+			snprintf(revbuf, 7, "CFA");
+		}
+		else
+			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
+
 		dev->n_sectors = ata_id_n_sectors(id);
 
+		/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
+		ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
+				sizeof(fwrevbuf));
+
+		ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
+				sizeof(modelbuf));
+
+		if (dev->id[59] & 0x100)
+			dev->multi_count = dev->id[59] & 0xff;
+
 		if (ata_id_has_lba(id)) {
 			const char *lba_desc;
 			char ncq_desc[20];
@@ -1416,19 +1936,29 @@
 			if (ata_id_has_lba48(id)) {
 				dev->flags |= ATA_DFLAG_LBA48;
 				lba_desc = "LBA48";
+
+				if (dev->n_sectors >= (1UL << 28) &&
+				    ata_id_has_flush_ext(id))
+					dev->flags |= ATA_DFLAG_FLUSH_EXT;
 			}
 
+			if (ata_id_hpa_enabled(dev->id))
+				dev->n_sectors = ata_hpa_resize(dev);
+
 			/* config NCQ */
 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
 
 			/* print device info to dmesg */
-			if (ata_msg_drv(ap) && print_info)
-				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
-					"max %s, %Lu sectors: %s %s\n",
-					ata_id_major_version(id),
-					ata_mode_string(xfer_mask),
+			if (ata_msg_drv(ap) && print_info) {
+				ata_dev_printk(dev, KERN_INFO,
+					"%s: %s, %s, max %s\n",
+					revbuf, modelbuf, fwrevbuf,
+					ata_mode_string(xfer_mask));
+				ata_dev_printk(dev, KERN_INFO,
+					"%Lu sectors, multi %u: %s %s\n",
 					(unsigned long long)dev->n_sectors,
-					lba_desc, ncq_desc);
+					dev->multi_count, lba_desc, ncq_desc);
+			}
 		} else {
 			/* CHS */
 
@@ -1445,22 +1975,17 @@
 			}
 
 			/* print device info to dmesg */
-			if (ata_msg_drv(ap) && print_info)
-				ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
-					"max %s, %Lu sectors: CHS %u/%u/%u\n",
-					ata_id_major_version(id),
-					ata_mode_string(xfer_mask),
-					(unsigned long long)dev->n_sectors,
-					dev->cylinders, dev->heads,
-					dev->sectors);
-		}
-
-		if (dev->id[59] & 0x100) {
-			dev->multi_count = dev->id[59] & 0xff;
-			if (ata_msg_drv(ap) && print_info)
+			if (ata_msg_drv(ap) && print_info) {
+				ata_dev_printk(dev, KERN_INFO,
+					"%s: %s, %s, max %s\n",
+					revbuf,	modelbuf, fwrevbuf,
+					ata_mode_string(xfer_mask));
 				ata_dev_printk(dev, KERN_INFO,
-					"ata%u: dev %u multi count %u\n",
-					ap->id, dev->devno, dev->multi_count);
+					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
+					(unsigned long long)dev->n_sectors,
+					dev->multi_count, dev->cylinders,
+					dev->heads, dev->sectors);
+			}
 		}
 
 		dev->cdb_len = 16;
@@ -1492,7 +2017,22 @@
 				       cdb_intr_string);
 	}
 
-	ata_set_port_max_cmd_len(ap);
+	/* determine max_sectors */
+	dev->max_sectors = ATA_MAX_SECTORS;
+	if (dev->flags & ATA_DFLAG_LBA48)
+		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
+	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+		/* Let the user know. We don't want to disallow opens for
+		   rescue purposes, or in case the vendor is just a blithering
+		   idiot */
+                if (print_info) {
+			ata_dev_printk(dev, KERN_WARNING,
+"Drive reports diagnostics failure. This may indicate a drive\n");
+			ata_dev_printk(dev, KERN_WARNING,
+"fault or invalid emulation. Contact drive vendor for information.\n");
+		}
+	}
 
 	/* limit bridge transfers to udma5, 200 sectors */
 	if (ata_dev_knobble(dev)) {
@@ -1503,8 +2043,16 @@
 		dev->max_sectors = ATA_MAX_SECTORS;
 	}
 
+	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
+		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+					 dev->max_sectors);
+
+	/* limit ATAPI DMA to R/W commands only */
+	if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
+		dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
+
 	if (ap->ops->dev_config)
-		ap->ops->dev_config(ap, dev);
+		ap->ops->dev_config(dev);
 
 	if (ata_msg_probe(ap))
 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
@@ -1519,6 +2067,56 @@
 }
 
 /**
+ *	ata_cable_40wire	-	return 40 wire cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which want to hardwire 40 wire cable
+ *	detection.
+ */
+
+int ata_cable_40wire(struct ata_port *ap)
+{
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	ata_cable_80wire	-	return 80 wire cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which want to hardwire 80 wire cable
+ *	detection.
+ */
+
+int ata_cable_80wire(struct ata_port *ap)
+{
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	ata_cable_unknown	-	return unknown PATA cable.
+ *	@ap: port
+ *
+ *	Helper method for drivers which have no PATA cable detection.
+ */
+
+int ata_cable_unknown(struct ata_port *ap)
+{
+	return ATA_CBL_PATA_UNK;
+}
+
+/**
+ *	ata_cable_sata	-	return SATA cable type
+ *	@ap: port
+ *
+ *	Helper method for drivers which have SATA cables
+ */
+
+int ata_cable_sata(struct ata_port *ap)
+{
+	return ATA_CBL_SATA;
+}
+
+/**
  *	ata_bus_probe - Reset and probe ATA bus
  *	@ap: Bus to probe
  *
@@ -1537,7 +2135,7 @@
 {
 	unsigned int classes[ATA_MAX_DEVICES];
 	int tries[ATA_MAX_DEVICES];
-	int i, rc, down_xfermask;
+	int i, rc;
 	struct ata_device *dev;
 
 	ata_port_probe(ap);
@@ -1546,8 +2144,6 @@
 		tries[i] = ATA_PROBE_MAX_TRIES;
 
  retry:
-	down_xfermask = 0;
-
 	/* reset and determine device classes */
 	ap->ops->phy_reset(ap);
 
@@ -1571,8 +2167,11 @@
 	for (i = 0; i < ATA_MAX_DEVICES; i++)
 		ap->device[i].pio_mode = XFER_PIO_0;
 
-	/* read IDENTIFY page and configure devices */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+	/* read IDENTIFY page and configure devices. We have to do the identify
+	   specific sequence bass-ackwards so that PDIAG- is released by
+	   the slave device */
+
+	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
 		dev = &ap->device[i];
 
 		if (tries[i])
@@ -1581,21 +2180,35 @@
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
+		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
+				     dev->id);
 		if (rc)
 			goto fail;
+	}
+
+	/* Now ask for the cable type as PDIAG- should have been released */
+	if (ap->ops->cable_detect)
+		ap->cbl = ap->ops->cable_detect(ap);
+
+	/* After the identify sequence we can now set up the devices. We do
+	   this in the normal order so that the user doesn't get confused */
+
+	for(i = 0; i < ATA_MAX_DEVICES; i++) {
+		dev = &ap->device[i];
+		if (!ata_dev_enabled(dev))
+			continue;
 
-		rc = ata_dev_configure(dev, 1);
+		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
+		rc = ata_dev_configure(dev);
+		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
 		if (rc)
 			goto fail;
 	}
 
 	/* configure transfer mode */
 	rc = ata_set_mode(ap, &dev);
-	if (rc) {
-		down_xfermask = 1;
+	if (rc)
 		goto fail;
-	}
 
 	for (i = 0; i < ATA_MAX_DEVICES; i++)
 		if (ata_dev_enabled(&ap->device[i]))
@@ -1607,25 +2220,29 @@
 	return -ENODEV;
 
  fail:
+	tries[dev->devno]--;
+
 	switch (rc) {
 	case -EINVAL:
-	case -ENODEV:
+		/* eeek, something went very wrong, give up */
 		tries[dev->devno] = 0;
 		break;
+
+	case -ENODEV:
+		/* give it just one more chance */
+		tries[dev->devno] = min(tries[dev->devno], 1);
 	case -EIO:
-		sata_down_spd_limit(ap);
-		/* fall through */
-	default:
-		tries[dev->devno]--;
-		if (down_xfermask &&
-		    ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
-			tries[dev->devno] = 0;
+		if (tries[dev->devno] == 1) {
+			/* This is the last chance, better to slow
+			 * down than lose it.
+			 */
+			sata_down_spd_limit(ap);
+			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		}
 	}
 
-	if (!tries[dev->devno]) {
-		ata_down_xfermask_limit(dev, 1);
+	if (!tries[dev->devno])
 		ata_dev_disable(dev);
-	}
 
 	goto retry;
 }
@@ -1637,7 +2254,7 @@
  *	Modify @ap data structure such that the system
  *	thinks that the entire port is enabled.
  *
- *	LOCKING: host_set lock, or some other form of
+ *	LOCKING: host lock, or some other form of
  *	serialization.
  */
 
@@ -1655,7 +2272,7 @@
  *	LOCKING:
  *	None.
  */
-static void sata_print_link_status(struct ata_port *ap)
+void sata_print_link_status(struct ata_port *ap)
 {
 	u32 sstatus, scontrol, tmp;
 
@@ -1775,7 +2392,7 @@
  *	never attempt to probe or communicate with devices
  *	on this port.
  *
- *	LOCKING: host_set lock, or some other form of
+ *	LOCKING: host lock, or some other form of
  *	serialization.
  */
 
@@ -1906,10 +2523,11 @@
  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
  */
 /*
- * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
- * for PIO 5, which is a nonstandard extension and UDMA6, which
- * is currently supported only by Maxtor drives.
+ * for UDMA6, which is currently supported only by Maxtor drives.
+ *
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
  */
 
 static const struct ata_timing ata_timing[] = {
@@ -1919,6 +2537,8 @@
 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
 
+	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
+	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
@@ -1933,7 +2553,8 @@
 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
 
-/*	{ XFER_PIO_5,     20,  50,  30, 100,  50,  30, 100,   0 }, */
+	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
+	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
 
@@ -2027,7 +2648,7 @@
 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
 	 */
 
-	if (speed > XFER_PIO_4) {
+	if (speed > XFER_PIO_6) {
 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
 	}
@@ -2046,13 +2667,19 @@
 		t->recover = t->cycle - t->active;
 	}
 
+	/* In a few cases quantisation may produce enough errors to
+	   leave t->cycle too low for the sum of active and recovery
+	   if so we must correct this */
+	if (t->active + t->recover > t->cycle)
+		t->cycle = t->active + t->recover;
+
 	return 0;
 }
 
 /**
  *	ata_down_xfermask_limit - adjust dev xfer masks downward
  *	@dev: Device to adjust xfer masks
- *	@force_pio0: Force PIO0
+ *	@sel: ATA_DNXFER_* selector
  *
  *	Adjust xfer masks of @dev downward.  Note that this function
  *	does not apply the change.  Invoking ata_set_mode() afterwards
@@ -2064,41 +2691,83 @@
  *	RETURNS:
  *	0 on success, negative errno on failure
  */
-int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
+int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
 {
-	unsigned long xfer_mask;
-	int highbit;
+	char buf[32];
+	unsigned int orig_mask, xfer_mask;
+	unsigned int pio_mask, mwdma_mask, udma_mask;
+	int quiet, highbit;
 
-	xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
-				      dev->udma_mask);
+	quiet = !!(sel & ATA_DNXFER_QUIET);
+	sel &= ~ATA_DNXFER_QUIET;
 
-	if (!xfer_mask)
-		goto fail;
-	/* don't gear down to MWDMA from UDMA, go directly to PIO */
-	if (xfer_mask & ATA_MASK_UDMA)
-		xfer_mask &= ~ATA_MASK_MWDMA;
+	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
+						  dev->mwdma_mask,
+						  dev->udma_mask);
+	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
+
+	switch (sel) {
+	case ATA_DNXFER_PIO:
+		highbit = fls(pio_mask) - 1;
+		pio_mask &= ~(1 << highbit);
+		break;
 
-	highbit = fls(xfer_mask) - 1;
-	xfer_mask &= ~(1 << highbit);
-	if (force_pio0)
-		xfer_mask &= 1 << ATA_SHIFT_PIO;
-	if (!xfer_mask)
-		goto fail;
+	case ATA_DNXFER_DMA:
+		if (udma_mask) {
+			highbit = fls(udma_mask) - 1;
+			udma_mask &= ~(1 << highbit);
+			if (!udma_mask)
+				return -ENOENT;
+		} else if (mwdma_mask) {
+			highbit = fls(mwdma_mask) - 1;
+			mwdma_mask &= ~(1 << highbit);
+			if (!mwdma_mask)
+				return -ENOENT;
+		}
+		break;
+
+	case ATA_DNXFER_40C:
+		udma_mask &= ATA_UDMA_MASK_40C;
+		break;
+
+	case ATA_DNXFER_FORCE_PIO0:
+		pio_mask &= 1;
+	case ATA_DNXFER_FORCE_PIO:
+		mwdma_mask = 0;
+		udma_mask = 0;
+		break;
+
+	default:
+		BUG();
+	}
+
+	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+
+	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
+		return -ENOENT;
+
+	if (!quiet) {
+		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+			snprintf(buf, sizeof(buf), "%s:%s",
+				 ata_mode_string(xfer_mask),
+				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
+		else
+			snprintf(buf, sizeof(buf), "%s",
+				 ata_mode_string(xfer_mask));
+
+		ata_dev_printk(dev, KERN_WARNING,
+			       "limiting speed to %s\n", buf);
+	}
 
 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
 			    &dev->udma_mask);
 
-	ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
-		       ata_mode_string(xfer_mask));
-
 	return 0;
-
- fail:
-	return -EINVAL;
 }
 
 static int ata_dev_set_mode(struct ata_device *dev)
 {
+	struct ata_eh_context *ehc = &dev->ap->eh_context;
 	unsigned int err_mask;
 	int rc;
 
@@ -2107,13 +2776,19 @@
 		dev->flags |= ATA_DFLAG_PIO;
 
 	err_mask = ata_dev_set_xfermode(dev);
+	/* Old CFA may refuse this command, which is just fine */
+	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
+        	err_mask &= ~AC_ERR_DEV;
+
 	if (err_mask) {
 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
 			       "(err_mask=0x%x)\n", err_mask);
 		return -EIO;
 	}
 
+	ehc->i.flags |= ATA_EHI_POST_SETMODE;
 	rc = ata_dev_revalidate(dev, 0);
+	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
 	if (rc)
 		return rc;
 
@@ -2126,12 +2801,13 @@
 }
 
 /**
- *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+ *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
  *	@ap: port on which timings will be programmed
  *	@r_failed_dev: out paramter for failed device
  *
- *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
- *	ata_set_mode() fails, pointer to the failing device is
+ *	Standard implementation of the function used to tune and set
+ *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
+ *	ata_dev_set_mode() fails, pointer to the failing device is
  *	returned in @r_failed_dev.
  *
  *	LOCKING:
@@ -2140,24 +2816,12 @@
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+
+int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
 {
 	struct ata_device *dev;
 	int i, rc = 0, used_dma = 0, found = 0;
 
-	/* has private set_mode? */
-	if (ap->ops->set_mode) {
-		/* FIXME: make ->set_mode handle no device case and
-		 * return error code and failing device on failure.
-		 */
-		for (i = 0; i < ATA_MAX_DEVICES; i++) {
-			if (ata_dev_ready(&ap->device[i])) {
-				ap->ops->set_mode(ap);
-				break;
-			}
-		}
-		return 0;
-	}
 
 	/* step 1: calculate xfer_mask */
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
@@ -2217,8 +2881,8 @@
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
 		dev = &ap->device[i];
 
-		/* don't udpate suspended devices' xfer mode */
-		if (!ata_dev_ready(dev))
+		/* don't update suspended devices' xfer mode */
+		if (!ata_dev_enabled(dev))
 			continue;
 
 		rc = ata_dev_set_mode(dev);
@@ -2229,12 +2893,8 @@
 	/* Record simplex status. If we selected DMA then the other
 	 * host channels are not permitted to do so.
 	 */
-	if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
-		ap->host_set->simplex_claimed = 1;
-
-	/* step5: chip specific finalisation */
-	if (ap->ops->post_set_mode)
-		ap->ops->post_set_mode(ap);
+	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
+		ap->host->simplex_claimed = ap;
 
  out:
 	if (rc)
@@ -2243,6 +2903,29 @@
 }
 
 /**
+ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+ *	@ap: port on which timings will be programmed
+ *	@r_failed_dev: out paramter for failed device
+ *
+ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
+ *	ata_set_mode() fails, pointer to the failing device is
+ *	returned in @r_failed_dev.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+	/* has private set_mode? */
+	if (ap->ops->set_mode)
+		return ap->ops->set_mode(ap, r_failed_dev);
+	return ata_do_set_mode(ap, r_failed_dev);
+}
+
+/**
  *	ata_tf_to_host - issue ATA taskfile to host controller
  *	@ap: port to which command is being issued
  *	@tf: ATA taskfile register set
@@ -2252,7 +2935,7 @@
  *	other threads.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 static inline void ata_tf_to_host(struct ata_port *ap,
@@ -2271,11 +2954,14 @@
  *	Sleep until ATA Status register bit BSY clears,
  *	or a timeout occurs.
  *
- *	LOCKING: None.
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
  */
-
-unsigned int ata_busy_sleep (struct ata_port *ap,
-			     unsigned long tmout_pat, unsigned long tmout)
+int ata_busy_sleep(struct ata_port *ap,
+		   unsigned long tmout_pat, unsigned long tmout)
 {
 	unsigned long timer_start, timeout;
 	u8 status;
@@ -2283,69 +2969,122 @@
 	status = ata_busy_wait(ap, ATA_BUSY, 300);
 	timer_start = jiffies;
 	timeout = timer_start + tmout_pat;
-	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
 		msleep(50);
 		status = ata_busy_wait(ap, ATA_BUSY, 3);
 	}
 
-	if (status & ATA_BUSY)
+	if (status != 0xff && (status & ATA_BUSY))
 		ata_port_printk(ap, KERN_WARNING,
-				"port is slow to respond, this delay is known to "
-				"occur on vacant SATA ports\n");
+				"port is slow to respond, please be patient "
+				"(Status 0x%x)\n", status);
 
 	timeout = timer_start + tmout;
-	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+	while (status != 0xff && (status & ATA_BUSY) &&
+	       time_before(jiffies, timeout)) {
 		msleep(50);
 		status = ata_chk_status(ap);
 	}
 
+	if (status == 0xff)
+		return -ENODEV;
+
 	if (status & ATA_BUSY) {
 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
-				"(%lu secs)\n", tmout / HZ);
-		return 1;
+				"(%lu secs, Status 0x%x)\n",
+				tmout / HZ, status);
+		return -EBUSY;
 	}
 
 	return 0;
 }
 
-static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+/**
+ *	ata_wait_ready - sleep until BSY clears, or timeout
+ *	@ap: port containing status register to be polled
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Sleep until ATA Status register bit BSY clears, or timeout
+ *	occurs.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
+{
+	unsigned long start = jiffies;
+	int warned = 0;
+
+	while (1) {
+		u8 status = ata_chk_status(ap);
+		unsigned long now = jiffies;
+
+		if (!(status & ATA_BUSY))
+			return 0;
+		if (!ata_port_online(ap) && status == 0xff)
+			return -ENODEV;
+		if (time_after(now, deadline))
+			return -EBUSY;
+
+		if (!warned && time_after(now, start + 5 * HZ) &&
+		    (deadline - now > 3 * HZ)) {
+			ata_port_printk(ap, KERN_WARNING,
+				"port is slow to respond, please be patient "
+				"(Status 0x%x)\n", status);
+			warned = 1;
+		}
+
+		msleep(50);
+	}
+}
+
+static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
+			      unsigned long deadline)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	unsigned int dev0 = devmask & (1 << 0);
 	unsigned int dev1 = devmask & (1 << 1);
-	unsigned long timeout;
+	int rc, ret = 0;
 
 	/* if device 0 was found in ata_devchk, wait for its
 	 * BSY bit to clear
 	 */
-	if (dev0)
-		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+	if (dev0) {
+		rc = ata_wait_ready(ap, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
+	}
 
 	/* if device 1 was found in ata_devchk, wait for
 	 * register access, then wait for BSY to clear
 	 */
-	timeout = jiffies + ATA_TMOUT_BOOT;
 	while (dev1) {
 		u8 nsect, lbal;
 
 		ap->ops->dev_select(ap, 1);
-		if (ap->flags & ATA_FLAG_MMIO) {
-			nsect = readb((void __iomem *) ioaddr->nsect_addr);
-			lbal = readb((void __iomem *) ioaddr->lbal_addr);
-		} else {
-			nsect = inb(ioaddr->nsect_addr);
-			lbal = inb(ioaddr->lbal_addr);
-		}
+		nsect = ioread8(ioaddr->nsect_addr);
+		lbal = ioread8(ioaddr->lbal_addr);
 		if ((nsect == 1) && (lbal == 1))
 			break;
-		if (time_after(jiffies, timeout)) {
-			dev1 = 0;
-			break;
-		}
+		if (time_after(jiffies, deadline))
+			return -EBUSY;
 		msleep(50);	/* give drive a breather */
 	}
-	if (dev1)
-		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+	if (dev1) {
+		rc = ata_wait_ready(ap, deadline);
+		if (rc) {
+			if (rc != -ENODEV)
+				return rc;
+			ret = rc;
+		}
+	}
 
 	/* is all this really necessary? */
 	ap->ops->dev_select(ap, 0);
@@ -2353,29 +3092,23 @@
 		ap->ops->dev_select(ap, 1);
 	if (dev0)
 		ap->ops->dev_select(ap, 0);
+
+	return ret;
 }
 
-static unsigned int ata_bus_softreset(struct ata_port *ap,
-				      unsigned int devmask)
+static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+			     unsigned long deadline)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 
-	DPRINTK("ata%u: bus reset via SRST\n", ap->id);
+	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
 
 	/* software reset.  causes dev0 to be selected */
-	if (ap->flags & ATA_FLAG_MMIO) {
-		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
-		udelay(20);	/* FIXME: flush */
-		writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
-		udelay(20);	/* FIXME: flush */
-		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
-	} else {
-		outb(ap->ctl, ioaddr->ctl_addr);
-		udelay(10);
-		outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
-		udelay(10);
-		outb(ap->ctl, ioaddr->ctl_addr);
-	}
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
+	udelay(20);	/* FIXME: flush */
+	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+	udelay(20);	/* FIXME: flush */
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
 
 	/* spec mandates ">= 2ms" before checking status.
 	 * We wait 150ms, because that was the magic delay used for
@@ -2387,20 +3120,16 @@
 	 *
 	 * Old drivers/ide uses the 2mS rule and then waits for ready
 	 */
-	msleep(1000);
+	msleep(150);
 
 	/* Before we perform post reset processing we want to see if
 	 * the bus shows 0xFF because the odd clown forgets the D7
 	 * pulldown resistor.
 	 */
-	if (ata_check_status(ap) == 0xFF) {
-		ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
-		return AC_ERR_OTHER;
-	}
-
-	ata_bus_post_reset(ap, devmask);
+	if (ata_check_status(ap) == 0xFF)
+		return -ENODEV;
 
-	return 0;
+	return ata_bus_post_reset(ap, devmask, deadline);
 }
 
 /**
@@ -2417,7 +3146,7 @@
  *
  *	LOCKING:
  *	PCI/etc. bus probe sem.
- *	Obtains host_set lock.
+ *	Obtains host lock.
  *
  *	SIDE EFFECTS:
  *	Sets ATA_FLAG_DISABLED if bus reset fails.
@@ -2429,8 +3158,9 @@
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
 	u8 err;
 	unsigned int dev0, dev1 = 0, devmask = 0;
+	int rc;
 
-	DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
+	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
 
 	/* determine if device 0/1 are present */
 	if (ap->flags & ATA_FLAG_SATA_RESET)
@@ -2450,9 +3180,11 @@
 	ap->ops->dev_select(ap, 0);
 
 	/* issue bus reset */
-	if (ap->flags & ATA_FLAG_SRST)
-		if (ata_bus_softreset(ap, devmask))
+	if (ap->flags & ATA_FLAG_SRST) {
+		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
+		if (rc && rc != -ENODEV)
 			goto err_out;
+	}
 
 	/*
 	 * determine by signature whether we have ATA or ATAPI devices
@@ -2462,8 +3194,7 @@
 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
 
 	/* re-enable interrupts */
-	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
-		ata_irq_on(ap);
+	ap->ops->irq_on(ap);
 
 	/* is double-select really necessary? */
 	if (ap->device[1].class != ATA_DEV_NONE)
@@ -2478,10 +3209,7 @@
 
 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
 		/* set up device control for ATA_FLAG_SATA_RESET */
-		if (ap->flags & ATA_FLAG_MMIO)
-			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
-		else
-			outb(ap->ctl, ioaddr->ctl_addr);
+		iowrite8(ap->ctl, ioaddr->ctl_addr);
 	}
 
 	DPRINTK("EXIT\n");
@@ -2498,29 +3226,37 @@
  *	sata_phy_debounce - debounce SATA phy status
  *	@ap: ATA port to debounce SATA phy status for
  *	@params: timing parameters { interval, duratinon, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
  *
  *	Make sure SStatus of @ap reaches stable state, determined by
  *	holding the same value where DET is not 1 for @duration polled
  *	every @interval, before @timeout.  Timeout constraints the
- *	beginning of the stable state.  Because, after hot unplugging,
- *	DET gets stuck at 1 on some controllers, this functions waits
+ *	beginning of the stable state.  Because DET gets stuck at 1 on
+ *	some controllers after hot unplugging, this functions waits
  *	until timeout then returns 0 if DET is stable at 1.
  *
+ *	@timeout is further limited by @deadline.  The sooner of the
+ *	two is used.
+ *
  *	LOCKING:
  *	Kernel thread context (may sleep)
  *
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
+int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
+		      unsigned long deadline)
 {
 	unsigned long interval_msec = params[0];
-	unsigned long duration = params[1] * HZ / 1000;
-	unsigned long timeout = jiffies + params[2] * HZ / 1000;
-	unsigned long last_jiffies;
+	unsigned long duration = msecs_to_jiffies(params[1]);
+	unsigned long last_jiffies, t;
 	u32 last, cur;
 	int rc;
 
+	t = jiffies + msecs_to_jiffies(params[2]);
+	if (time_before(t, deadline))
+		deadline = t;
+
 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
 		return rc;
 	cur &= 0xf;
@@ -2536,7 +3272,7 @@
 
 		/* DET stable? */
 		if (cur == last) {
-			if (cur == 1 && time_before(jiffies, timeout))
+			if (cur == 1 && time_before(jiffies, deadline))
 				continue;
 			if (time_after(jiffies, last_jiffies + duration))
 				return 0;
@@ -2547,8 +3283,8 @@
 		last = cur;
 		last_jiffies = jiffies;
 
-		/* check timeout */
-		if (time_after(jiffies, timeout))
+		/* check deadline */
+		if (time_after(jiffies, deadline))
 			return -EBUSY;
 	}
 }
@@ -2557,6 +3293,7 @@
  *	sata_phy_resume - resume SATA phy
  *	@ap: ATA port to resume SATA phy for
  *	@params: timing parameters { interval, duratinon, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
  *
  *	Resume SATA phy of @ap and debounce it.
  *
@@ -2566,7 +3303,8 @@
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
+int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
+		    unsigned long deadline)
 {
 	u32 scontrol;
 	int rc;
@@ -2584,43 +3322,19 @@
 	 */
 	msleep(200);
 
-	return sata_phy_debounce(ap, params);
-}
-
-static void ata_wait_spinup(struct ata_port *ap)
-{
-	struct ata_eh_context *ehc = &ap->eh_context;
-	unsigned long end, secs;
-	int rc;
-
-	/* first, debounce phy if SATA */
-	if (ap->cbl == ATA_CBL_SATA) {
-		rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
-
-		/* if debounced successfully and offline, no need to wait */
-		if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
-			return;
-	}
-
-	/* okay, let's give the drive time to spin up */
-	end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
-	secs = ((end - jiffies) + HZ - 1) / HZ;
-
-	if (time_after(jiffies, end))
-		return;
-
-	if (secs > 5)
-		ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
-				"(%lu secs)\n", secs);
-
-	schedule_timeout_uninterruptible(end - jiffies);
+	return sata_phy_debounce(ap, params, deadline);
 }
 
 /**
  *	ata_std_prereset - prepare for reset
  *	@ap: ATA port to be reset
+ *	@deadline: deadline jiffies for the operation
  *
- *	@ap is about to be reset.  Initialize it.
+ *	@ap is about to be reset.  Initialize it.  Failure from
+ *	prereset makes libata abort whole reset sequence and give up
+ *	that port, so prereset should be best-effort.  It does its
+ *	best to prepare for reset sequence but if things go wrong, it
+ *	should just whine, not fail.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
@@ -2628,41 +3342,41 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_std_prereset(struct ata_port *ap)
+int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
 {
 	struct ata_eh_context *ehc = &ap->eh_context;
 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
 	int rc;
 
-	/* handle link resume & hotplug spinup */
+	/* handle link resume */
 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
 		ehc->i.action |= ATA_EH_HARDRESET;
 
-	if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
-	    (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
-		ata_wait_spinup(ap);
-
 	/* if we're about to do hardreset, nothing more to do */
 	if (ehc->i.action & ATA_EH_HARDRESET)
 		return 0;
 
 	/* if SATA, resume phy */
 	if (ap->cbl == ATA_CBL_SATA) {
-		rc = sata_phy_resume(ap, timing);
-		if (rc && rc != -EOPNOTSUPP) {
-			/* phy resume failed */
+		rc = sata_phy_resume(ap, timing, deadline);
+		/* whine about phy resume failure but proceed */
+		if (rc && rc != -EOPNOTSUPP)
 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
 					"link for reset (errno=%d)\n", rc);
-			return rc;
-		}
 	}
 
 	/* Wait for !BSY if the controller can wait for the first D2H
 	 * Reg FIS and we don't know that no device is attached.
 	 */
-	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
-		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
+		rc = ata_wait_ready(ap, deadline);
+		if (rc && rc != -ENODEV) {
+			ata_port_printk(ap, KERN_WARNING, "device not ready "
+					"(errno=%d), forcing hardreset\n", rc);
+			ehc->i.action |= ATA_EH_HARDRESET;
+		}
+	}
 
 	return 0;
 }
@@ -2671,6 +3385,7 @@
  *	ata_std_softreset - reset host port via ATA SRST
  *	@ap: port to reset
  *	@classes: resulting classes of attached devices
+ *	@deadline: deadline jiffies for the operation
  *
  *	Reset host port using ATA SRST.
  *
@@ -2680,10 +3395,12 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
+int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
+		      unsigned long deadline)
 {
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-	unsigned int devmask = 0, err_mask;
+	unsigned int devmask = 0;
+	int rc;
 	u8 err;
 
 	DPRINTK("ENTER\n");
@@ -2704,11 +3421,11 @@
 
 	/* issue bus reset */
 	DPRINTK("about to softreset, devmask=%x\n", devmask);
-	err_mask = ata_bus_softreset(ap, devmask);
-	if (err_mask) {
-		ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
-				err_mask);
-		return -EIO;
+	rc = ata_bus_softreset(ap, devmask, deadline);
+	/* if link is occupied, -ENODEV too is an error */
+	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
+		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+		return rc;
 	}
 
 	/* determine by signature whether we have ATA or ATAPI devices */
@@ -2722,9 +3439,10 @@
 }
 
 /**
- *	sata_std_hardreset - reset host port via SATA phy reset
+ *	sata_port_hardreset - reset port via SATA phy reset
  *	@ap: port to reset
- *	@class: resulting class of attached device
+ *	@timing: timing parameters { interval, duratinon, timeout } in msec
+ *	@deadline: deadline jiffies for the operation
  *
  *	SATA phy-reset host port using DET bits of SControl register.
  *
@@ -2734,10 +3452,9 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
+int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
+			unsigned long deadline)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
-	const unsigned long *timing = sata_ehc_deb_timing(ehc);
 	u32 scontrol;
 	int rc;
 
@@ -2750,24 +3467,24 @@
 		 * and Sil3124.
 		 */
 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
-			return rc;
+			goto out;
 
 		scontrol = (scontrol & 0x0f0) | 0x304;
 
 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
-			return rc;
+			goto out;
 
 		sata_set_spd(ap);
 	}
 
 	/* issue phy wake/reset */
 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
-		return rc;
+		goto out;
 
 	scontrol = (scontrol & 0x0f0) | 0x301;
 
 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
-		return rc;
+		goto out;
 
 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
 	 * 10.4.2 says at least 1 ms.
@@ -2775,7 +3492,42 @@
 	msleep(1);
 
 	/* bring phy back */
-	sata_phy_resume(ap, timing);
+	rc = sata_phy_resume(ap, timing, deadline);
+ out:
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	sata_std_hardreset - reset host port via SATA phy reset
+ *	@ap: port to reset
+ *	@class: resulting class of attached device
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	SATA phy-reset host port using DET bits of SControl register,
+ *	wait for !BSY and classify the attached device.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
+		       unsigned long deadline)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* do hardreset */
+	rc = sata_port_hardreset(ap, timing, deadline);
+	if (rc) {
+		ata_port_printk(ap, KERN_ERR,
+				"COMRESET failed (errno=%d)\n", rc);
+		return rc;
+	}
 
 	/* TODO: phy layer with polling, timeouts, etc. */
 	if (ata_port_offline(ap)) {
@@ -2784,10 +3536,15 @@
 		return 0;
 	}
 
-	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
+	/* wait a while before checking status, see SRST for more info */
+	msleep(150);
+
+	rc = ata_wait_ready(ap, deadline);
+	/* link occupied, -ENODEV too is an error */
+	if (rc) {
 		ata_port_printk(ap, KERN_ERR,
-				"COMRESET failed (device not ready)\n");
-		return -EIO;
+				"COMRESET failed (errno=%d)\n", rc);
+		return rc;
 	}
 
 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
@@ -2824,11 +3581,8 @@
 		sata_scr_write(ap, SCR_ERROR, serror);
 
 	/* re-enable interrupts */
-	if (!ap->ops->error_handler) {
-		/* FIXME: hack. create a hook instead */
-		if (ap->ioaddr.ctl_addr)
-			ata_irq_on(ap);
-	}
+	if (!ap->ops->error_handler)
+		ap->ops->irq_on(ap);
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
@@ -2843,12 +3597,8 @@
 	}
 
 	/* set up device control */
-	if (ap->ioaddr.ctl_addr) {
-		if (ap->flags & ATA_FLAG_MMIO)
-			writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
-		else
-			outb(ap->ctl, ap->ioaddr.ctl_addr);
-	}
+	if (ap->ioaddr.ctl_addr)
+		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
 
 	DPRINTK("EXIT\n");
 }
@@ -2873,8 +3623,8 @@
 			       const u16 *new_id)
 {
 	const u16 *old_id = dev->id;
-	unsigned char model[2][41], serial[2][21];
-	u64 new_n_sectors;
+	unsigned char model[2][ATA_ID_PROD_LEN + 1];
+	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
 
 	if (dev->class != new_class) {
 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
@@ -2882,11 +3632,10 @@
 		return 0;
 	}
 
-	ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
-	ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
-	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
-	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
-	new_n_sectors = ata_id_n_sectors(new_id);
+	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
+	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
+	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
+	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
 
 	if (strcmp(model[0], model[1])) {
 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
@@ -2900,21 +3649,13 @@
 		return 0;
 	}
 
-	if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
-		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
-			       "%llu != %llu\n",
-			       (unsigned long long)dev->n_sectors,
-			       (unsigned long long)new_n_sectors);
-		return 0;
-	}
-
 	return 1;
 }
 
 /**
- *	ata_dev_revalidate - Revalidate ATA device
- *	@dev: device to revalidate
- *	@post_reset: is this revalidation after reset?
+ *	ata_dev_reread_id - Re-read IDENTIFY data
+ *	@adev: target ATA device
+ *	@readid_flags: read ID flags
  *
  *	Re-read IDENTIFY page and make sure @dev is still attached to
  *	the port.
@@ -2925,92 +3666,164 @@
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_dev_revalidate(struct ata_device *dev, int post_reset)
+int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
 {
 	unsigned int class = dev->class;
 	u16 *id = (void *)dev->ap->sector_buf;
 	int rc;
 
-	if (!ata_dev_enabled(dev)) {
-		rc = -ENODEV;
+	/* read ID data */
+	rc = ata_dev_read_id(dev, &class, readid_flags, id);
+	if (rc)
+		return rc;
+
+	/* is the device still there? */
+	if (!ata_dev_same_device(dev, class, id))
+		return -ENODEV;
+
+	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
+	return 0;
+}
+
+/**
+ *	ata_dev_revalidate - Revalidate ATA device
+ *	@dev: device to revalidate
+ *	@readid_flags: read ID flags
+ *
+ *	Re-read IDENTIFY page, make sure @dev is still attached to the
+ *	port and reconfigure it according to the new IDENTIFY page.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, negative errno otherwise
+ */
+int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
+{
+	u64 n_sectors = dev->n_sectors;
+	int rc;
+
+	if (!ata_dev_enabled(dev))
+		return -ENODEV;
+
+	/* re-read ID */
+	rc = ata_dev_reread_id(dev, readid_flags);
+	if (rc)
 		goto fail;
-	}
 
-	/* read ID data */
-	rc = ata_dev_read_id(dev, &class, post_reset, id);
+	/* configure device according to the new ID */
+	rc = ata_dev_configure(dev);
 	if (rc)
 		goto fail;
 
-	/* is the device still there? */
-	if (!ata_dev_same_device(dev, class, id)) {
+	/* verify n_sectors hasn't changed */
+	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
+		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
+			       "%llu != %llu\n",
+			       (unsigned long long)n_sectors,
+			       (unsigned long long)dev->n_sectors);
 		rc = -ENODEV;
 		goto fail;
 	}
 
-	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
-
-	/* configure device according to the new ID */
-	rc = ata_dev_configure(dev, 0);
-	if (rc == 0)
-		return 0;
+	return 0;
 
  fail:
 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
 	return rc;
 }
 
-static const char * const ata_dma_blacklist [] = {
-	"WDC AC11000H", NULL,
-	"WDC AC22100H", NULL,
-	"WDC AC32500H", NULL,
-	"WDC AC33100H", NULL,
-	"WDC AC31600H", NULL,
-	"WDC AC32100H", "24.09P07",
-	"WDC AC23200L", "21.10N21",
-	"Compaq CRD-8241B",  NULL,
-	"CRD-8400B", NULL,
-	"CRD-8480B", NULL,
-	"CRD-8482B", NULL,
- 	"CRD-84", NULL,
-	"SanDisk SDP3B", NULL,
-	"SanDisk SDP3B-64", NULL,
-	"SANYO CD-ROM CRD", NULL,
-	"HITACHI CDR-8", NULL,
-	"HITACHI CDR-8335", NULL,
-	"HITACHI CDR-8435", NULL,
-	"Toshiba CD-ROM XM-6202B", NULL,
-	"TOSHIBA CD-ROM XM-1702BC", NULL,
-	"CD-532E-A", NULL,
-	"E-IDE CD-ROM CR-840", NULL,
-	"CD-ROM Drive/F5A", NULL,
-	"WPI CDD-820", NULL,
-	"SAMSUNG CD-ROM SC-148C", NULL,
-	"SAMSUNG CD-ROM SC", NULL,
-	"SanDisk SDP3B-64", NULL,
-	"ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
-	"_NEC DV5800A", NULL,
-	"SAMSUNG CD-ROM SN-124", "N001"
+struct ata_blacklist_entry {
+	const char *model_num;
+	const char *model_rev;
+	unsigned long horkage;
 };
 
-static int ata_strim(char *s, size_t len)
-{
-	len = strnlen(s, len);
+static const struct ata_blacklist_entry ata_device_blacklist [] = {
+	/* Devices with DMA related problems under Linux */
+	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
+	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
+	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
+	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
+	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
+	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
+	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
+	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
+	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
+	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
+	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
+	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
+	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
+	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
+	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
+	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
+
+	/* Weird ATAPI devices */
+	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 |
+						ATA_HORKAGE_DMA_RW_ONLY },
+
+	/* Devices we expect to fail diagnostics */
+
+	/* Devices where NCQ should be avoided */
+	/* NCQ is slow */
+        { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
+	/* http://thread.gmane.org/gmane.linux.ide/14907 */
+	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
+	/* NCQ is broken */
+	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
+	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
+	/* NCQ hard hangs device under heavier load, needs hard power cycle */
+	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
+	/* Blacklist entries taken from Silicon Image 3124/3132
+	   Windows driver .inf file - also several Linux problem reports */
+	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
+	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
+	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
+
+	/* Devices with NCQ limits */
+
+	/* End Marker */
+	{ }
+};
 
-	/* ATAPI specifies that empty space is blank-filled; remove blanks */
-	while ((len > 0) && (s[len - 1] == ' ')) {
-		len--;
-		s[len] = 0;
+unsigned long ata_device_blacklisted(const struct ata_device *dev)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
+	const struct ata_blacklist_entry *ad = ata_device_blacklist;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
+
+	while (ad->model_num) {
+		if (!strcmp(ad->model_num, model_num)) {
+			if (ad->model_rev == NULL)
+				return ad->horkage;
+			if (!strcmp(ad->model_rev, model_rev))
+				return ad->horkage;
+		}
+		ad++;
 	}
-	return len;
+	return 0;
 }
 
 static int ata_dma_blacklisted(const struct ata_device *dev)
 {
-	unsigned char model_num[40];
-	unsigned char model_rev[16];
-	unsigned int nlen, rlen;
-	int i;
-
 	/* We don't support polling DMA.
 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
@@ -3018,23 +3831,7 @@
 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
 	    (dev->flags & ATA_DFLAG_CDB_INTR))
 		return 1;
-
-	ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
-			  sizeof(model_num));
-	ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
-			  sizeof(model_rev));
-	nlen = ata_strim(model_num, sizeof(model_num));
-	rlen = ata_strim(model_rev, sizeof(model_rev));
-
-	for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
-		if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
-			if (ata_dma_blacklist[i+1] == NULL)
-				return 1;
-			if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
-				return 1;
-		}
-	}
-	return 0;
+	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
 }
 
 /**
@@ -3046,61 +3843,72 @@
  *	known limits including host controller limits, device
  *	blacklist, etc...
  *
- *	FIXME: The current implementation limits all transfer modes to
- *	the fastest of the lowested device on the port.  This is not
- *	required on most controllers.
- *
  *	LOCKING:
  *	None.
  */
 static void ata_dev_xfermask(struct ata_device *dev)
 {
 	struct ata_port *ap = dev->ap;
-	struct ata_host_set *hs = ap->host_set;
+	struct ata_host *host = ap->host;
 	unsigned long xfer_mask;
-	int i;
 
+	/* controller modes available */
 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
 				      ap->mwdma_mask, ap->udma_mask);
 
-	/* Apply cable rule here.  Don't apply it early because when
-	 * we handle hot plug the cable type can itself change.
-	 */
-	if (ap->cbl == ATA_CBL_PATA40)
-		xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+	/* drive modes available */
+	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
+				       dev->mwdma_mask, dev->udma_mask);
+	xfer_mask &= ata_id_xfermask(dev->id);
 
-	/* FIXME: Use port-wide xfermask for now */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *d = &ap->device[i];
-
-		if (ata_dev_absent(d))
-			continue;
-
-		if (ata_dev_disabled(d)) {
-			/* to avoid violating device selection timing */
-			xfer_mask &= ata_pack_xfermask(d->pio_mask,
-						       UINT_MAX, UINT_MAX);
-			continue;
-		}
-
-		xfer_mask &= ata_pack_xfermask(d->pio_mask,
-					       d->mwdma_mask, d->udma_mask);
-		xfer_mask &= ata_id_xfermask(d->id);
-		if (ata_dma_blacklisted(d))
-			xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+	/*
+	 *	CFA Advanced TrueIDE timings are not allowed on a shared
+	 *	cable
+	 */
+	if (ata_dev_pair(dev)) {
+		/* No PIO5 or PIO6 */
+		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
+		/* No MWDMA3 or MWDMA 4 */
+		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
 	}
 
-	if (ata_dma_blacklisted(dev))
+	if (ata_dma_blacklisted(dev)) {
+		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
 		ata_dev_printk(dev, KERN_WARNING,
 			       "device is on DMA blacklist, disabling DMA\n");
+	}
 
-	if (hs->flags & ATA_HOST_SIMPLEX) {
-		if (hs->simplex_claimed)
-			xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+	if ((host->flags & ATA_HOST_SIMPLEX) &&
+            host->simplex_claimed && host->simplex_claimed != ap) {
+		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
+			       "other device, disabling DMA\n");
 	}
 
+	if (ap->flags & ATA_FLAG_NO_IORDY)
+		xfer_mask &= ata_pio_mask_no_iordy(dev);
+
 	if (ap->ops->mode_filter)
-		xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
+		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
+
+	/* Apply cable rule here.  Don't apply it early because when
+	 * we handle hot plug the cable type can itself change.
+	 * Check this last so that we know if the transfer rate was
+	 * solely limited by the cable.
+	 * Unknown or 80 wire cables reported host side are checked
+	 * drive side as well. Cases where we know a 40wire cable
+	 * is used safely for 80 are not checked here.
+	 */
+	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+		/* UDMA/44 or higher would be available */
+		if((ap->cbl == ATA_CBL_PATA40) ||
+   		    (ata_drive_40wire(dev->id) &&
+		     (ap->cbl == ATA_CBL_PATA_UNK ||
+                     ap->cbl == ATA_CBL_PATA80))) {
+		      	ata_dev_printk(dev, KERN_WARNING,
+				 "limited to UDMA/33 due to 40-wire cable\n");
+			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+		}
 
 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
 			    &dev->mwdma_mask, &dev->udma_mask);
@@ -3186,10 +3994,9 @@
  *	Unmap all mapped DMA memory associated with this command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
-static void ata_sg_clean(struct ata_queued_cmd *qc)
+void ata_sg_clean(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct scatterlist *sg = qc->__sg;
@@ -3246,7 +4053,7 @@
  *	associated with the current disk command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  */
 static void ata_fill_sg(struct ata_queued_cmd *qc)
@@ -3298,7 +4105,7 @@
  *	supplied PACKET command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS: 0 when ATAPI DMA can be used
  *               nonzero otherwise
@@ -3308,6 +4115,26 @@
 	struct ata_port *ap = qc->ap;
 	int rc = 0; /* Assume ATAPI DMA is OK by default */
 
+	/* some drives can only do ATAPI DMA on read/write */
+	if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
+		struct scsi_cmnd *cmd = qc->scsicmd;
+		u8 *scsicmd = cmd->cmnd;
+
+		switch (scsicmd[0]) {
+		case READ_10:
+		case WRITE_10:
+		case READ_12:
+		case WRITE_12:
+		case READ_6:
+		case WRITE_6:
+			/* atapi dma maybe ok */
+			break;
+		default:
+			/* turn off atapi dma */
+			return 1;
+		}
+	}
+
 	if (ap->ops->check_atapi_dma)
 		rc = ap->ops->check_atapi_dma(qc);
 
@@ -3320,7 +4147,7 @@
  *	Prepare ATA taskfile for submission.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_qc_prep(struct ata_queued_cmd *qc)
 {
@@ -3342,24 +4169,20 @@
  *	to point to a single memory buffer, @buf of byte length @buflen.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
 {
-	struct scatterlist *sg;
-
 	qc->flags |= ATA_QCFLAG_SINGLE;
 
-	memset(&qc->sgent, 0, sizeof(qc->sgent));
 	qc->__sg = &qc->sgent;
 	qc->n_elem = 1;
 	qc->orig_n_elem = 1;
 	qc->buf_virt = buf;
 	qc->nbytes = buflen;
 
-	sg = qc->__sg;
-	sg_init_one(sg, buf, buflen);
+	sg_init_one(&qc->sgent, buf, buflen);
 }
 
 /**
@@ -3373,7 +4196,7 @@
  *	elements.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
@@ -3392,7 +4215,7 @@
  *	DMA-map the memory buffer associated with queued_cmd @qc.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, negative on error.
@@ -3461,7 +4284,7 @@
  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, negative on error.
@@ -3475,7 +4298,7 @@
 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
 	int n_elem, pre_n_elem, dir, trim_sg = 0;
 
-	VPRINTK("ENTER, ata%u\n", ap->id);
+	VPRINTK("ENTER, ata%u\n", ap->print_id);
 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
 
 	/* we must lengthen transfers to end on a 32-bit boundary */
@@ -3562,53 +4385,7 @@
 }
 
 /**
- *	ata_mmio_data_xfer - Transfer data by MMIO
- *	@adev: device for this I/O
- *	@buf: data buffer
- *	@buflen: buffer length
- *	@write_data: read/write
- *
- *	Transfer data from/to the device data register by MMIO.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
-			unsigned int buflen, int write_data)
-{
-	struct ata_port *ap = adev->ap;
-	unsigned int i;
-	unsigned int words = buflen >> 1;
-	u16 *buf16 = (u16 *) buf;
-	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
-
-	/* Transfer multiple of 2 bytes */
-	if (write_data) {
-		for (i = 0; i < words; i++)
-			writew(le16_to_cpu(buf16[i]), mmio);
-	} else {
-		for (i = 0; i < words; i++)
-			buf16[i] = cpu_to_le16(readw(mmio));
-	}
-
-	/* Transfer trailing 1 byte, if any. */
-	if (unlikely(buflen & 0x01)) {
-		u16 align_buf[1] = { 0 };
-		unsigned char *trailing_buf = buf + buflen - 1;
-
-		if (write_data) {
-			memcpy(align_buf, trailing_buf, 1);
-			writew(le16_to_cpu(align_buf[0]), mmio);
-		} else {
-			align_buf[0] = cpu_to_le16(readw(mmio));
-			memcpy(trailing_buf, align_buf, 1);
-		}
-	}
-}
-
-/**
- *	ata_pio_data_xfer - Transfer data by PIO
+ *	ata_data_xfer - Transfer data by PIO
  *	@adev: device to target
  *	@buf: data buffer
  *	@buflen: buffer length
@@ -3619,18 +4396,17 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-
-void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
-		       unsigned int buflen, int write_data)
+void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
+		   unsigned int buflen, int write_data)
 {
 	struct ata_port *ap = adev->ap;
 	unsigned int words = buflen >> 1;
 
 	/* Transfer multiple of 2 bytes */
 	if (write_data)
-		outsw(ap->ioaddr.data_addr, buf, words);
+		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
 	else
-		insw(ap->ioaddr.data_addr, buf, words);
+		ioread16_rep(ap->ioaddr.data_addr, buf, words);
 
 	/* Transfer trailing 1 byte, if any. */
 	if (unlikely(buflen & 0x01)) {
@@ -3639,16 +4415,16 @@
 
 		if (write_data) {
 			memcpy(align_buf, trailing_buf, 1);
-			outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
 		} else {
-			align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
+			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
 			memcpy(trailing_buf, align_buf, 1);
 		}
 	}
 }
 
 /**
- *	ata_pio_data_xfer_noirq - Transfer data by PIO
+ *	ata_data_xfer_noirq - Transfer data by PIO
  *	@adev: device to target
  *	@buf: data buffer
  *	@buflen: buffer length
@@ -3660,22 +4436,21 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-
-void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
-				    unsigned int buflen, int write_data)
+void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
+			 unsigned int buflen, int write_data)
 {
 	unsigned long flags;
 	local_irq_save(flags);
-	ata_pio_data_xfer(adev, buf, buflen, write_data);
+	ata_data_xfer(adev, buf, buflen, write_data);
 	local_irq_restore(flags);
 }
 
 
 /**
- *	ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ *	ata_pio_sector - Transfer a sector of data.
  *	@qc: Command on going
  *
- *	Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ *	Transfer qc->sect_size bytes of data from/to the ATA device.
  *
  *	LOCKING:
  *	Inherited from caller.
@@ -3690,11 +4465,11 @@
 	unsigned int offset;
 	unsigned char *buf;
 
-	if (qc->cursect == (qc->nsect - 1))
+	if (qc->curbytes == qc->nbytes - qc->sect_size)
 		ap->hsm_task_state = HSM_ST_LAST;
 
 	page = sg[qc->cursg].page;
-	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
+	offset = sg[qc->cursg].offset + qc->cursg_ofs;
 
 	/* get the current page and offset */
 	page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -3710,29 +4485,29 @@
 		buf = kmap_atomic(page, KM_IRQ0);
 
 		/* do the actual data transfer */
-		ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
+		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
 
 		kunmap_atomic(buf, KM_IRQ0);
 		local_irq_restore(flags);
 	} else {
 		buf = page_address(page);
-		ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
+		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
 	}
 
-	qc->cursect++;
-	qc->cursg_ofs++;
+	qc->curbytes += qc->sect_size;
+	qc->cursg_ofs += qc->sect_size;
 
-	if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
+	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
 		qc->cursg++;
 		qc->cursg_ofs = 0;
 	}
 }
 
 /**
- *	ata_pio_sectors - Transfer one or many 512-byte sectors.
+ *	ata_pio_sectors - Transfer one or many sectors.
  *	@qc: Command on going
  *
- *	Transfer one or many ATA_SECT_SIZE of data from/to the
+ *	Transfer one or many sectors of data from/to the
  *	ATA device for the DRQ request.
  *
  *	LOCKING:
@@ -3747,7 +4522,8 @@
 
 		WARN_ON(qc->dev->multi_count == 0);
 
-		nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
+		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
+			    qc->dev->multi_count);
 		while (nsect--)
 			ata_pio_sector(qc);
 	} else
@@ -3923,7 +4699,7 @@
 	if (do_write != i_write)
 		goto err_out;
 
-	VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
+	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
 
 	__atapi_pio_bytes(qc, bytes);
 
@@ -3970,7 +4746,7 @@
  *	Finish @qc which is running on standard HSM.
  *
  *	LOCKING:
- *	If @in_wq is zero, spin_lock_irqsave(host_set lock).
+ *	If @in_wq is zero, spin_lock_irqsave(host lock).
  *	Otherwise, none on entry and grabs host lock.
  */
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
@@ -3982,13 +4758,13 @@
 		if (in_wq) {
 			spin_lock_irqsave(ap->lock, flags);
 
-			/* EH might have kicked in while host_set lock
-			 * is released.
+			/* EH might have kicked in while host lock is
+			 * released.
 			 */
 			qc = ata_qc_from_tag(ap, qc->tag);
 			if (qc) {
 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
-					ata_irq_on(ap);
+					ap->ops->irq_on(ap);
 					ata_qc_complete(qc);
 				} else
 					ata_port_freeze(ap);
@@ -4004,7 +4780,7 @@
 	} else {
 		if (in_wq) {
 			spin_lock_irqsave(ap->lock, flags);
-			ata_irq_on(ap);
+			ap->ops->irq_on(ap);
 			ata_qc_complete(qc);
 			spin_unlock_irqrestore(ap->lock, flags);
 		} else
@@ -4040,7 +4816,7 @@
 
 fsm_start:
 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
-		ap->id, qc->tf.protocol, ap->hsm_task_state, status);
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
 
 	switch (ap->hsm_task_state) {
 	case HSM_ST_FIRST:
@@ -4073,8 +4849,8 @@
 		 * let the EH abort the command or reset the device.
 		 */
 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
-			printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
-			       ap->id, status);
+			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
+					"error, dev_stat 0x%X\n", status);
 			qc->err_mask |= AC_ERR_HSM;
 			ap->hsm_task_state = HSM_ST_ERR;
 			goto fsm_start;
@@ -4131,8 +4907,9 @@
 			 * let the EH abort the command or reset the device.
 			 */
 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
-				printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
-				       ap->id, status);
+				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
+						"device error, dev_stat 0x%X\n",
+						status);
 				qc->err_mask |= AC_ERR_HSM;
 				ap->hsm_task_state = HSM_ST_ERR;
 				goto fsm_start;
@@ -4152,8 +4929,12 @@
 					/* device stops HSM for abort/error */
 					qc->err_mask |= AC_ERR_DEV;
 				else
-					/* HSM violation. Let EH handle this */
-					qc->err_mask |= AC_ERR_HSM;
+					/* HSM violation. Let EH handle this.
+					 * Phantom devices also trigger this
+					 * condition.  Mark hint.
+					 */
+					qc->err_mask |= AC_ERR_HSM |
+							AC_ERR_NODEV_HINT;
 
 				ap->hsm_task_state = HSM_ST_ERR;
 				goto fsm_start;
@@ -4214,7 +4995,7 @@
 
 		/* no more data to transfer */
 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
-			ap->id, qc->dev->devno, status);
+			ap->print_id, qc->dev->devno, status);
 
 		WARN_ON(qc->err_mask);
 
@@ -4348,7 +5129,7 @@
  *	in case something prevents using it.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_qc_free(struct ata_queued_cmd *qc)
 {
@@ -4392,6 +5173,14 @@
 	qc->complete_fn(qc);
 }
 
+static void fill_result_tf(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	qc->result_tf.flags = qc->tf.flags;
+	ap->ops->tf_read(ap, &qc->result_tf);
+}
+
 /**
  *	ata_qc_complete - Complete an active ATA command
  *	@qc: Command to complete
@@ -4401,7 +5190,7 @@
  *	command has completed, with either an ok or not-ok status.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_qc_complete(struct ata_queued_cmd *qc)
 {
@@ -4429,7 +5218,7 @@
 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
 			if (!ata_tag_internal(qc->tag)) {
 				/* always fill result TF for failed qc */
-				ap->ops->tf_read(ap, &qc->result_tf);
+				fill_result_tf(qc);
 				ata_qc_schedule_eh(qc);
 				return;
 			}
@@ -4437,7 +5226,7 @@
 
 		/* read result TF if requested */
 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
-			ap->ops->tf_read(ap, &qc->result_tf);
+			fill_result_tf(qc);
 
 		__ata_qc_complete(qc);
 	} else {
@@ -4446,7 +5235,7 @@
 
 		/* read result TF if failed or requested */
 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
-			ap->ops->tf_read(ap, &qc->result_tf);
+			fill_result_tf(qc);
 
 		__ata_qc_complete(qc);
 	}
@@ -4464,7 +5253,7 @@
  *	and commands are completed accordingly.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Number of completed commands on success, -errno otherwise.
@@ -4535,7 +5324,7 @@
  *	writing the taskfile to hardware, starting the command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_qc_issue(struct ata_queued_cmd *qc)
 {
@@ -4596,7 +5385,7 @@
  *	May be used as the qc_issue() entry in ata_port_operations.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, AC_ERR_* mask on failure
@@ -4612,6 +5401,7 @@
 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
 		switch (qc->tf.protocol) {
 		case ATA_PROT_PIO:
+		case ATA_PROT_NODATA:
 		case ATA_PROT_ATAPI:
 		case ATA_PROT_ATAPI_NODATA:
 			qc->tf.flags |= ATA_TFLAG_POLLING;
@@ -4626,6 +5416,14 @@
 		}
 	}
 
+	/* Some controllers show flaky interrupt behavior after
+	 * setting xfer mode.  Use polling instead.
+	 */
+	if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
+		     qc->tf.feature == SETFEATURES_XFER) &&
+	    (ap->flags & ATA_FLAG_SETXFER_POLLING))
+		qc->tf.flags |= ATA_TFLAG_POLLING;
+
 	/* select the device */
 	ata_dev_select(ap, qc->dev->devno, 1, 0);
 
@@ -4725,7 +5523,7 @@
  *	handled via polling with interrupts disabled (nIEN bit).
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	One if interrupt was handled, zero if not (shared irq).
@@ -4734,10 +5532,11 @@
 inline unsigned int ata_host_intr (struct ata_port *ap,
 				   struct ata_queued_cmd *qc)
 {
+	struct ata_eh_info *ehi = &ap->eh_info;
 	u8 status, host_stat = 0;
 
 	VPRINTK("ata%u: protocol %d task_state %d\n",
-		ap->id, qc->tf.protocol, ap->hsm_task_state);
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
 
 	/* Check whether we are expecting interrupt in this state */
 	switch (ap->hsm_task_state) {
@@ -4758,7 +5557,8 @@
 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
 			/* check status of DMA engine */
 			host_stat = ap->ops->bmdma_status(ap);
-			VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+			VPRINTK("ata%u: host_stat 0x%X\n",
+				ap->print_id, host_stat);
 
 			/* if it's not our irq... */
 			if (!(host_stat & ATA_DMA_INTR))
@@ -4794,6 +5594,11 @@
 	ap->ops->irq_clear(ap);
 
 	ata_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
 	return 1;	/* irq handled */
 
 idle_irq:
@@ -4801,7 +5606,7 @@
 
 #ifdef ATA_IRQ_TRAP
 	if ((ap->stats.idle_irq % 1000) == 0) {
-		ata_irq_ack(ap, 0); /* debug trap */
+		ap->ops->irq_ack(ap, 0); /* debug trap */
 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
 		return 1;
 	}
@@ -4812,33 +5617,32 @@
 /**
  *	ata_interrupt - Default ATA host interrupt handler
  *	@irq: irq line (unused)
- *	@dev_instance: pointer to our ata_host_set information structure
- *	@regs: unused
+ *	@dev_instance: pointer to our ata_host information structure
  *
  *	Default interrupt handler for PCI IDE devices.  Calls
  *	ata_host_intr() for each port that is not disabled.
  *
  *	LOCKING:
- *	Obtains host_set lock during operation.
+ *	Obtains host lock during operation.
  *
  *	RETURNS:
  *	IRQ_NONE or IRQ_HANDLED.
  */
 
-irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	unsigned int i;
 	unsigned int handled = 0;
 	unsigned long flags;
 
 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
-	spin_lock_irqsave(&host_set->lock, flags);
+	spin_lock_irqsave(&host->lock, flags);
 
-	for (i = 0; i < host_set->n_ports; i++) {
+	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap;
 
-		ap = host_set->ports[i];
+		ap = host->ports[i];
 		if (ap &&
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
@@ -4850,7 +5654,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&host_set->lock, flags);
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	return IRQ_RETVAL(handled);
 }
@@ -5001,7 +5805,7 @@
 	if (!ata_try_flush_cache(dev))
 		return 0;
 
-	if (ata_id_has_flush_ext(dev->id))
+	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
 		cmd = ATA_CMD_FLUSH_EXT;
 	else
 		cmd = ATA_CMD_FLUSH;
@@ -5015,15 +5819,16 @@
 	return 0;
 }
 
-static int ata_host_set_request_pm(struct ata_host_set *host_set,
-				   pm_message_t mesg, unsigned int action,
-				   unsigned int ehi_flags, int wait)
+#ifdef CONFIG_PM
+static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+			       unsigned int action, unsigned int ehi_flags,
+			       int wait)
 {
 	unsigned long flags;
 	int i, rc;
 
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
 		/* Previous resume operation might still be in
 		 * progress.  Wait for PM_PENDING to clear.
@@ -5063,11 +5868,11 @@
 }
 
 /**
- *	ata_host_set_suspend - suspend host_set
- *	@host_set: host_set to suspend
+ *	ata_host_suspend - suspend host
+ *	@host: host to suspend
  *	@mesg: PM message
  *
- *	Suspend @host_set.  Actual operation is performed by EH.  This
+ *	Suspend @host.  Actual operation is performed by EH.  This
  *	function requests EH to perform PM operations and waits for EH
  *	to finish.
  *
@@ -5077,59 +5882,34 @@
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
-	int i, j, rc;
-
-	rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
-	if (rc)
-		goto fail;
-
-	/* EH is quiescent now.  Fail if we have any ready device.
-	 * This happens if hotplug occurs between completion of device
-	 * suspension and here.
-	 */
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
-
-		for (j = 0; j < ATA_MAX_DEVICES; j++) {
-			struct ata_device *dev = &ap->device[j];
-
-			if (ata_dev_ready(dev)) {
-				ata_port_printk(ap, KERN_WARNING,
-						"suspend failed, device %d "
-						"still active\n", dev->devno);
-				rc = -EBUSY;
-				goto fail;
-			}
-		}
-	}
-
-	host_set->dev->power.power_state = mesg;
-	return 0;
+	int rc;
 
- fail:
-	ata_host_set_resume(host_set);
+	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+	if (rc == 0)
+		host->dev->power.power_state = mesg;
 	return rc;
 }
 
 /**
- *	ata_host_set_resume - resume host_set
- *	@host_set: host_set to resume
+ *	ata_host_resume - resume host
+ *	@host: host to resume
  *
- *	Resume @host_set.  Actual operation is performed by EH.  This
+ *	Resume @host.  Actual operation is performed by EH.  This
  *	function requests EH to perform PM operations and returns.
  *	Note that all resume operations are performed parallely.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_host_set_resume(struct ata_host_set *host_set)
+void ata_host_resume(struct ata_host *host)
 {
-	ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
-				ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
-	host_set->dev->power.power_state = PMSG_ON;
+	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
+			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
+	host->dev->power.power_state = PMSG_ON;
 }
+#endif
 
 /**
  *	ata_port_start - Set port up for dma.
@@ -5143,54 +5923,25 @@
  *	LOCKING:
  *	Inherited from caller.
  */
-
-int ata_port_start (struct ata_port *ap)
+int ata_port_start(struct ata_port *ap)
 {
 	struct device *dev = ap->dev;
 	int rc;
 
-	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
+	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
+				      GFP_KERNEL);
 	if (!ap->prd)
 		return -ENOMEM;
 
 	rc = ata_pad_alloc(ap, dev);
-	if (rc) {
-		dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+	if (rc)
 		return rc;
-	}
-
-	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
 
+	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
+		(unsigned long long)ap->prd_dma);
 	return 0;
 }
 
-
-/**
- *	ata_port_stop - Undo ata_port_start()
- *	@ap: Port to shut down
- *
- *	Frees the PRD table.
- *
- *	May be used as the port_stop() entry in ata_port_operations.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-void ata_port_stop (struct ata_port *ap)
-{
-	struct device *dev = ap->dev;
-
-	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
-	ata_pad_free(ap, dev);
-}
-
-void ata_host_stop (struct ata_host_set *host_set)
-{
-	if (host_set->mmio_base)
-		iounmap(host_set->mmio_base);
-}
-
 /**
  *	ata_dev_init - Initialize an ata_device structure
  *	@dev: Device structure to initialize
@@ -5210,7 +5961,7 @@
 
 	/* High bits of dev->flags are used to record warm plug
 	 * requests which occur asynchronously.  Synchronize using
-	 * host_set lock.
+	 * host lock.
 	 */
 	spin_lock_irqsave(ap->lock, flags);
 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
@@ -5224,37 +5975,36 @@
 }
 
 /**
- *	ata_port_init - Initialize an ata_port structure
- *	@ap: Structure to initialize
- *	@host_set: Collection of hosts to which @ap belongs
- *	@ent: Probe information provided by low-level driver
- *	@port_no: Port number associated with this ata_port
+ *	ata_port_alloc - allocate and initialize basic ATA port resources
+ *	@host: ATA host this allocated port belongs to
  *
- *	Initialize a new ata_port structure.
+ *	Allocate and initialize basic ATA port resources.
+ *
+ *	RETURNS:
+ *	Allocate ATA port on success, NULL on failure.
  *
  *	LOCKING:
- *	Inherited from caller.
+ *	Inherited from calling layer (may sleep).
  */
-void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
-		   const struct ata_probe_ent *ent, unsigned int port_no)
+struct ata_port *ata_port_alloc(struct ata_host *host)
 {
+	struct ata_port *ap;
 	unsigned int i;
 
-	ap->lock = &host_set->lock;
+	DPRINTK("ENTER\n");
+
+	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+	if (!ap)
+		return NULL;
+
+	ap->pflags |= ATA_PFLAG_INITIALIZING;
+	ap->lock = &host->lock;
 	ap->flags = ATA_FLAG_DISABLED;
-	ap->id = ata_unique_id++;
+	ap->print_id = -1;
 	ap->ctl = ATA_DEVCTL_OBS;
-	ap->host_set = host_set;
-	ap->dev = ent->dev;
-	ap->port_no = port_no;
-	ap->hard_port_no =
-		ent->legacy_mode ? ent->hard_port_no : port_no;
-	ap->pio_mask = ent->pio_mask;
-	ap->mwdma_mask = ent->mwdma_mask;
-	ap->udma_mask = ent->udma_mask;
-	ap->flags |= ent->host_flags;
-	ap->flags |= ent->port_flags[port_no];	/* pata fix */
-	ap->ops = ent->port_ops;
+	ap->host = host;
+	ap->dev = host->dev;
+
 	ap->hw_sata_spd_limit = UINT_MAX;
 	ap->active_tag = ATA_TAG_POISON;
 	ap->last_ctl = 0xFF;
@@ -5274,10 +6024,7 @@
 	INIT_LIST_HEAD(&ap->eh_done_q);
 	init_waitqueue_head(&ap->eh_wait_q);
 
-	/* set cable type */
 	ap->cbl = ATA_CBL_NONE;
-	if (ap->flags & ATA_FLAG_SATA)
-		ap->cbl = ATA_CBL_SATA;
 
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
 		struct ata_device *dev = &ap->device[i];
@@ -5290,257 +6037,285 @@
 	ap->stats.unhandled_irq = 1;
 	ap->stats.idle_irq = 1;
 #endif
+	return ap;
+}
+
+static void ata_host_release(struct device *gendev, void *res)
+{
+	struct ata_host *host = dev_get_drvdata(gendev);
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (!ap)
+			continue;
+
+		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+
+	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
+		host->ops->host_stop(host);
 
-	memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (!ap)
+			continue;
+
+		if (ap->scsi_host)
+			scsi_host_put(ap->scsi_host);
+
+		kfree(ap);
+		host->ports[i] = NULL;
+	}
+
+	dev_set_drvdata(gendev, NULL);
 }
 
 /**
- *	ata_host_init - Initialize an ata_port structure
- *	@ap: Structure to initialize
- *	@host: associated SCSI mid-layer structure
- *	@host_set: Collection of hosts to which @ap belongs
- *	@ent: Probe information provided by low-level driver
- *	@port_no: Port number associated with this ata_port
+ *	ata_host_alloc - allocate and init basic ATA host resources
+ *	@dev: generic device this host is associated with
+ *	@max_ports: maximum number of ATA ports associated with this host
+ *
+ *	Allocate and initialize basic ATA host resources.  LLD calls
+ *	this function to allocate a host, initializes it fully and
+ *	attaches it using ata_host_register().
  *
- *	Initialize a new ata_port structure, and its associated
- *	scsi_host.
+ *	@max_ports ports are allocated and host->n_ports is
+ *	initialized to @max_ports.  The caller is allowed to decrease
+ *	host->n_ports before calling ata_host_register().  The unused
+ *	ports will be automatically freed on registration.
+ *
+ *	RETURNS:
+ *	Allocate ATA host on success, NULL on failure.
  *
  *	LOCKING:
- *	Inherited from caller.
+ *	Inherited from calling layer (may sleep).
  */
-static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
-			  struct ata_host_set *host_set,
-			  const struct ata_probe_ent *ent, unsigned int port_no)
+struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
 {
-	unsigned int i;
+	struct ata_host *host;
+	size_t sz;
+	int i;
 
-	host->max_id = 16;
-	host->max_lun = 1;
-	host->max_channel = 1;
-	host->unique_id = ata_unique_id++;
-	host->max_cmd_len = 12;
+	DPRINTK("ENTER\n");
 
-	ap->lock = &host_set->lock;
-	ap->flags = ATA_FLAG_DISABLED;
-	ap->id = host->unique_id;
-	ap->host = host;
-	ap->ctl = ATA_DEVCTL_OBS;
-	ap->host_set = host_set;
-	ap->dev = ent->dev;
-	ap->port_no = port_no;
-	ap->hard_port_no =
-		ent->legacy_mode ? ent->hard_port_no : port_no;
-	ap->pio_mask = ent->pio_mask;
-	ap->mwdma_mask = ent->mwdma_mask;
-	ap->udma_mask = ent->udma_mask;
-	ap->flags |= ent->host_flags;
-	ap->flags |= ent->port_flags[port_no];	/* pata fix */
-	ap->ops = ent->port_ops;
-	ap->hw_sata_spd_limit = UINT_MAX;
-	ap->active_tag = ATA_TAG_POISON;
-	ap->last_ctl = 0xFF;
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		return NULL;
 
-#if defined(ATA_VERBOSE_DEBUG)
-	/* turn on all debugging levels */
-	ap->msg_enable = 0x00FF;
-#elif defined(ATA_DEBUG)
-	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
-#else
-	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
-#endif
+	/* alloc a container for our list of ATA ports (buses) */
+	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+	/* alloc a container for our list of ATA ports (buses) */
+	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
+	if (!host)
+		goto err_out;
 
-	INIT_WORK(&ap->port_task, NULL, NULL);
-	INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
-	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
-	INIT_LIST_HEAD(&ap->eh_done_q);
-	init_waitqueue_head(&ap->eh_wait_q);
+	devres_add(dev, host);
+	dev_set_drvdata(dev, host);
 
-	/* set cable type */
-	ap->cbl = ATA_CBL_NONE;
-	if (ap->flags & ATA_FLAG_SATA)
-		ap->cbl = ATA_CBL_SATA;
+	spin_lock_init(&host->lock);
+	host->dev = dev;
+	host->n_ports = max_ports;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		dev->ap = ap;
-		dev->devno = i;
-		ata_dev_init(dev);
+	/* allocate ports bound to this host */
+	for (i = 0; i < max_ports; i++) {
+		struct ata_port *ap;
+
+		ap = ata_port_alloc(host);
+		if (!ap)
+			goto err_out;
+
+		ap->port_no = i;
+		host->ports[i] = ap;
 	}
 
-#ifdef ATA_IRQ_TRAP
-	ap->stats.unhandled_irq = 1;
-	ap->stats.idle_irq = 1;
-#endif
+	devres_remove_group(dev, NULL);
+	return host;
 
-	memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+ err_out:
+	devres_release_group(dev, NULL);
+	return NULL;
 }
 
 /**
- *	ata_host_add - Attach low-level ATA driver to system
- *	@ent: Information provided by low-level driver
- *	@host_set: Collections of ports to which we add
- *	@port_no: Port number associated with this host
- *
- *	Attach low-level ATA driver to system.
+ *	ata_host_alloc_pinfo - alloc host and init with port_info array
+ *	@dev: generic device this host is associated with
+ *	@ppi: array of ATA port_info to initialize host with
+ *	@n_ports: number of ATA ports attached to this host
  *
- *	LOCKING:
- *	PCI/etc. bus probe sem.
+ *	Allocate ATA host and initialize with info from @ppi.  If NULL
+ *	terminated, @ppi may contain fewer entries than @n_ports.  The
+ *	last entry will be used for the remaining ports.
  *
  *	RETURNS:
- *	New ata_port on success, for NULL on error.
+ *	Allocate ATA host on success, NULL on failure.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
  */
+struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+				      const struct ata_port_info * const * ppi,
+				      int n_ports)
+{
+	const struct ata_port_info *pi;
+	struct ata_host *host;
+	int i, j;
 
-static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
-				      struct ata_host_set *host_set,
-				      unsigned int port_no)
-{
-	struct Scsi_Host *host;
-	struct ata_port *ap;
-	int rc;
+	host = ata_host_alloc(dev, n_ports);
+	if (!host)
+		return NULL;
 
-	DPRINTK("ENTER\n");
+	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
-	if (!ent->port_ops->error_handler &&
-	    !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
-		printk(KERN_ERR "ata%u: no reset mechanism available\n",
-		       port_no);
-		return NULL;
+		if (ppi[j])
+			pi = ppi[j++];
+
+		ap->pio_mask = pi->pio_mask;
+		ap->mwdma_mask = pi->mwdma_mask;
+		ap->udma_mask = pi->udma_mask;
+		ap->flags |= pi->flags;
+		ap->ops = pi->port_ops;
+
+		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
+			host->ops = pi->port_ops;
+		if (!host->private_data && pi->private_data)
+			host->private_data = pi->private_data;
 	}
 
-	host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
-	if (!host)
-		return NULL;
+	return host;
+}
 
-	host->transportt = &ata_scsi_transport_template;
+/**
+ *	ata_host_start - start and freeze ports of an ATA host
+ *	@host: ATA host to start ports for
+ *
+ *	Start and then freeze ports of @host.  Started status is
+ *	recorded in host->flags, so this function can be called
+ *	multiple times.  Ports are guaranteed to get started only
+ *	once.  If host->ops isn't initialized yet, its set to the
+ *	first non-dummy port ops.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 if all ports are started successfully, -errno otherwise.
+ */
+int ata_host_start(struct ata_host *host)
+{
+	int i, rc;
 
-	ap = ata_shost_to_port(host);
+	if (host->flags & ATA_HOST_STARTED)
+		return 0;
 
-	ata_host_init(ap, host, host_set, ent, port_no);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
-	rc = ap->ops->port_start(ap);
-	if (rc)
-		goto err_out;
+		if (!host->ops && !ata_port_is_dummy(ap))
+			host->ops = ap->ops;
 
-	return ap;
+		if (ap->ops->port_start) {
+			rc = ap->ops->port_start(ap);
+			if (rc) {
+				ata_port_printk(ap, KERN_ERR, "failed to "
+						"start port (errno=%d)\n", rc);
+				goto err_out;
+			}
+		}
 
-err_out:
-	scsi_host_put(host);
-	return NULL;
+		ata_eh_freeze_port(ap);
+	}
+
+	host->flags |= ATA_HOST_STARTED;
+	return 0;
+
+ err_out:
+	while (--i >= 0) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+	return rc;
 }
 
 /**
- *	ata_sas_host_init - Initialize a host_set struct
- *	@host_set:	host_set to initialize
- *	@dev:		device host_set is attached to
- *	@flags:	host_set flags
- *	@ops:		port_ops
+ *	ata_sas_host_init - Initialize a host struct
+ *	@host:	host to initialize
+ *	@dev:	device host is attached to
+ *	@flags:	host flags
+ *	@ops:	port_ops
  *
  *	LOCKING:
  *	PCI/etc. bus probe sem.
  *
  */
-
-void ata_host_set_init(struct ata_host_set *host_set,
-		       struct device *dev, unsigned long flags,
-		       const struct ata_port_operations *ops)
+/* KILLME - the only user left is ipr */
+void ata_host_init(struct ata_host *host, struct device *dev,
+		   unsigned long flags, const struct ata_port_operations *ops)
 {
-	spin_lock_init(&host_set->lock);
-	host_set->dev = dev;
-	host_set->flags = flags;
-	host_set->ops = ops;
+	spin_lock_init(&host->lock);
+	host->dev = dev;
+	host->flags = flags;
+	host->ops = ops;
 }
 
 /**
- *	ata_device_add - Register hardware device with ATA and SCSI layers
- *	@ent: Probe information describing hardware device to be registered
- *
- *	This function processes the information provided in the probe
- *	information struct @ent, allocates the necessary ATA and SCSI
- *	host information structures, initializes them, and registers
- *	everything with requisite kernel subsystems.
+ *	ata_host_register - register initialized ATA host
+ *	@host: ATA host to register
+ *	@sht: template for SCSI host
  *
- *	This function requests irqs, probes the ATA bus, and probes
- *	the SCSI bus.
+ *	Register initialized ATA host.  @host is allocated using
+ *	ata_host_alloc() and fully initialized by LLD.  This function
+ *	starts ports, registers @host with ATA and SCSI layers and
+ *	probe registered devices.
  *
  *	LOCKING:
- *	PCI/etc. bus probe sem.
+ *	Inherited from calling layer (may sleep).
  *
  *	RETURNS:
- *	Number of ports registered.  Zero on error (no ports registered).
+ *	0 on success, -errno otherwise.
  */
-int ata_device_add(const struct ata_probe_ent *ent)
+int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
-	unsigned int count = 0, i;
-	struct device *dev = ent->dev;
-	struct ata_host_set *host_set;
-	int rc;
-
-	DPRINTK("ENTER\n");
-	/* alloc a container for our list of ATA ports (buses) */
-	host_set = kzalloc(sizeof(struct ata_host_set) +
-			   (ent->n_ports * sizeof(void *)), GFP_KERNEL);
-	if (!host_set)
-		return 0;
-	spin_lock_init(&host_set->lock);
-
-	host_set->dev = dev;
-	host_set->n_ports = ent->n_ports;
-	host_set->irq = ent->irq;
-	host_set->mmio_base = ent->mmio_base;
-	host_set->private_data = ent->private_data;
-	host_set->ops = ent->port_ops;
-	host_set->flags = ent->host_set_flags;
+	int i, rc;
 
-	/* register each port bound to this device */
-	for (i = 0; i < ent->n_ports; i++) {
-		struct ata_port *ap;
-		unsigned long xfer_mode_mask;
+	/* host must have been started */
+	if (!(host->flags & ATA_HOST_STARTED)) {
+		dev_printk(KERN_ERR, host->dev,
+			   "BUG: trying to register unstarted host\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
 
-		ap = ata_host_add(ent, host_set, i);
-		if (!ap)
-			goto err_out;
+	/* Blow away unused ports.  This happens when LLD can't
+	 * determine the exact number of ports to allocate at
+	 * allocation time.
+	 */
+	for (i = host->n_ports; host->ports[i]; i++)
+		kfree(host->ports[i]);
 
-		host_set->ports[i] = ap;
-		xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
-				(ap->mwdma_mask << ATA_SHIFT_MWDMA) |
-				(ap->pio_mask << ATA_SHIFT_PIO);
+	/* give ports names and add SCSI hosts */
+	for (i = 0; i < host->n_ports; i++)
+		host->ports[i]->print_id = ata_print_id++;
 
-		/* print per-port info to dmesg */
-		ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
-				"ctl 0x%lX bmdma 0x%lX irq %lu\n",
-				ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
-				ata_mode_string(xfer_mode_mask),
-				ap->ioaddr.cmd_addr,
-				ap->ioaddr.ctl_addr,
-				ap->ioaddr.bmdma_addr,
-				ent->irq);
-
-		ata_chk_status(ap);
-		host_set->ops->irq_clear(ap);
-		ata_eh_freeze_port(ap);	/* freeze port before requesting IRQ */
-		count++;
-	}
-
-	if (!count)
-		goto err_free_ret;
-
-	/* obtain irq, that is shared between channels */
-	rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
-			 DRV_NAME, host_set);
-	if (rc) {
-		dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
-			   ent->irq, rc);
-		goto err_out;
-	}
+	rc = ata_scsi_add_hosts(host, sht);
+	if (rc)
+		return rc;
 
-	/* perform each probe synchronously */
-	DPRINTK("probe begin\n");
-	for (i = 0; i < count; i++) {
-		struct ata_port *ap;
+	/* set cable, sata_spd_limit and report */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		int irq_line;
 		u32 scontrol;
-		int rc;
+		unsigned long xfer_mask;
 
-		ap = host_set->ports[i];
+		/* set SATA cable type if still unset */
+		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
+			ap->cbl = ATA_CBL_SATA;
 
 		/* init sata_spd_limit to the current value */
 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
@@ -5549,16 +6324,35 @@
 		}
 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
 
-		rc = scsi_add_host(ap->host, dev);
-		if (rc) {
-			ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
-			/* FIXME: do something useful here */
-			/* FIXME: handle unconditional calls to
-			 * scsi_scan_host and ata_host_remove, below,
-			 * at the very least
-			 */
-		}
+		/* report the secondary IRQ for second channel legacy */
+		irq_line = host->irq;
+		if (i == 1 && host->irq2)
+			irq_line = host->irq2;
 
+		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
+					      ap->udma_mask);
+
+		/* print per-port info to dmesg */
+		if (!ata_port_is_dummy(ap))
+			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
+					"ctl 0x%p bmdma 0x%p irq %d\n",
+					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
+					ata_mode_string(xfer_mask),
+					ap->ioaddr.cmd_addr,
+					ap->ioaddr.ctl_addr,
+					ap->ioaddr.bmdma_addr,
+					irq_line);
+		else
+			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
+	}
+
+	/* perform each probe synchronously */
+	DPRINTK("probe begin\n");
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		int rc;
+
+		/* probe */
 		if (ap->ops->error_handler) {
 			struct ata_eh_info *ehi = &ap->eh_info;
 			unsigned long flags;
@@ -5572,6 +6366,7 @@
 			ehi->action |= ATA_EH_SOFTRESET;
 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
 
+			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
 			ap->pflags |= ATA_PFLAG_LOADING;
 			ata_port_schedule_eh(ap);
 
@@ -5580,9 +6375,9 @@
 			/* wait for EH to finish */
 			ata_port_wait_eh(ap);
 		} else {
-			DPRINTK("ata%u: bus probe begin\n", ap->id);
+			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
 			rc = ata_bus_probe(ap);
-			DPRINTK("ata%u: bus probe end\n", ap->id);
+			DPRINTK("ata%u: bus probe end\n", ap->print_id);
 
 			if (rc) {
 				/* FIXME: do something useful here?
@@ -5597,29 +6392,55 @@
 
 	/* probes are done, now scan each port's disk(s) */
 	DPRINTK("host probe begin\n");
-	for (i = 0; i < count; i++) {
-		struct ata_port *ap = host_set->ports[i];
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
 		ata_scsi_scan_host(ap);
 	}
 
-	dev_set_drvdata(dev, host_set);
+	return 0;
+}
+
+/**
+ *	ata_host_activate - start host, request IRQ and register it
+ *	@host: target ATA host
+ *	@irq: IRQ to request
+ *	@irq_handler: irq_handler used when requesting IRQ
+ *	@irq_flags: irq_flags used when requesting IRQ
+ *	@sht: scsi_host_template to use when registering the host
+ *
+ *	After allocating an ATA host and initializing it, most libata
+ *	LLDs perform three steps to activate the host - start host,
+ *	request IRQ and register it.  This helper takes necessasry
+ *	arguments and performs the three steps in one go.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_host_activate(struct ata_host *host, int irq,
+		      irq_handler_t irq_handler, unsigned long irq_flags,
+		      struct scsi_host_template *sht)
+{
+	int rc;
+
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+
+	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
+			      dev_driver_string(host->dev), host);
+	if (rc)
+		return rc;
 
-	VPRINTK("EXIT, returning %u\n", ent->n_ports);
-	return ent->n_ports; /* success */
+	rc = ata_host_register(host, sht);
+	/* if failed, just free the IRQ and leave ports alone */
+	if (rc)
+		devm_free_irq(host->dev, irq, host);
 
-err_out:
-	for (i = 0; i < count; i++) {
-		struct ata_port *ap = host_set->ports[i];
-		if (ap) {
-			ap->ops->port_stop(ap);
-			scsi_host_put(ap->host);
-		}
-	}
-err_free_ret:
-	kfree(host_set);
-	VPRINTK("EXIT, returning 0\n");
-	return 0;
+	return rc;
 }
 
 /**
@@ -5677,77 +6498,24 @@
 
  skip_eh:
 	/* remove the associated SCSI host */
-	scsi_remove_host(ap->host);
+	scsi_remove_host(ap->scsi_host);
 }
 
 /**
- *	ata_host_set_remove - PCI layer callback for device removal
- *	@host_set: ATA host set that was removed
+ *	ata_host_detach - Detach all ports of an ATA host
+ *	@host: Host to detach
  *
- *	Unregister all objects associated with this host set. Free those
- *	objects.
+ *	Detach all ports of @host.
  *
  *	LOCKING:
- *	Inherited from calling layer (may sleep).
- */
-
-void ata_host_set_remove(struct ata_host_set *host_set)
-{
-	unsigned int i;
-
-	for (i = 0; i < host_set->n_ports; i++)
-		ata_port_detach(host_set->ports[i]);
-
-	free_irq(host_set->irq, host_set);
-
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
-
-		ata_scsi_release(ap->host);
-
-		if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
-			struct ata_ioports *ioaddr = &ap->ioaddr;
-
-			if (ioaddr->cmd_addr == 0x1f0)
-				release_region(0x1f0, 8);
-			else if (ioaddr->cmd_addr == 0x170)
-				release_region(0x170, 8);
-		}
-
-		scsi_host_put(ap->host);
-	}
-
-	if (host_set->ops->host_stop)
-		host_set->ops->host_stop(host_set);
-
-	kfree(host_set);
-}
-
-/**
- *	ata_scsi_release - SCSI layer callback hook for host unload
- *	@host: libata host to be unloaded
- *
- *	Performs all duties necessary to shut down a libata port...
- *	Kill port kthread, disable port, and release resources.
- *
- *	LOCKING:
- *	Inherited from SCSI layer.
- *
- *	RETURNS:
- *	One.
+ *	Kernel thread context (may sleep).
  */
-
-int ata_scsi_release(struct Scsi_Host *host)
+void ata_host_detach(struct ata_host *host)
 {
-	struct ata_port *ap = ata_shost_to_port(host);
-
-	DPRINTK("ENTER\n");
-
-	ap->ops->port_disable(ap);
-	ap->ops->port_stop(ap);
+	int i;
 
-	DPRINTK("EXIT\n");
-	return 1;
+	for (i = 0; i < host->n_ports; i++)
+		ata_port_detach(host->ports[i]);
 }
 
 /**
@@ -5779,40 +6547,23 @@
 
 #ifdef CONFIG_PCI
 
-void ata_pci_host_stop (struct ata_host_set *host_set)
-{
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
-
-	pci_iounmap(pdev, host_set->mmio_base);
-}
-
 /**
  *	ata_pci_remove_one - PCI layer callback for device removal
  *	@pdev: PCI device that was removed
  *
- *	PCI layer indicates to libata via this hook that
- *	hot-unplug or module unload event has occurred.
- *	Handle this by unregistering all objects associated
- *	with this PCI device.  Free those objects.  Then finally
- *	release PCI resources and disable device.
+ *	PCI layer indicates to libata via this hook that hot-unplug or
+ *	module unload event has occurred.  Detach all ports.  Resource
+ *	release is handled via devres.
  *
  *	LOCKING:
  *	Inherited from PCI layer (may sleep).
  */
-
-void ata_pci_remove_one (struct pci_dev *pdev)
+void ata_pci_remove_one(struct pci_dev *pdev)
 {
 	struct device *dev = pci_dev_to_dev(pdev);
-	struct ata_host_set *host_set = dev_get_drvdata(dev);
-	struct ata_host_set *host_set2 = host_set->next;
+	struct ata_host *host = dev_get_drvdata(dev);
 
-	ata_host_set_remove(host_set);
-	if (host_set2)
-		ata_host_set_remove(host_set2);
-
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-	dev_set_drvdata(dev, NULL);
+	ata_host_detach(host);
 }
 
 /* move to PCI subsystem */
@@ -5849,57 +6600,60 @@
 	return (tmp == bits->val) ? 1 : 0;
 }
 
-void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM
+void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
 	pci_save_state(pdev);
+	pci_disable_device(pdev);
 
-	if (state.event == PM_EVENT_SUSPEND) {
-		pci_disable_device(pdev);
+	if (mesg.event == PM_EVENT_SUSPEND)
 		pci_set_power_state(pdev, PCI_D3hot);
-	}
 }
 
-void ata_pci_device_do_resume(struct pci_dev *pdev)
+int ata_pci_device_do_resume(struct pci_dev *pdev)
 {
+	int rc;
+
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
-	pci_enable_device(pdev);
+
+	rc = pcim_enable_device(pdev);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "failed to enable device after resume (%d)\n", rc);
+		return rc;
+	}
+
 	pci_set_master(pdev);
+	return 0;
 }
 
-int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
 	int rc = 0;
 
-	rc = ata_host_set_suspend(host_set, state);
+	rc = ata_host_suspend(host, mesg);
 	if (rc)
 		return rc;
 
-	if (host_set->next) {
-		rc = ata_host_set_suspend(host_set->next, state);
-		if (rc) {
-			ata_host_set_resume(host_set);
-			return rc;
-		}
-	}
-
-	ata_pci_device_do_suspend(pdev, state);
+	ata_pci_device_do_suspend(pdev, mesg);
 
 	return 0;
 }
 
 int ata_pci_device_resume(struct pci_dev *pdev)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
-
-	ata_pci_device_do_resume(pdev);
-	ata_host_set_resume(host_set);
-	if (host_set->next)
-		ata_host_set_resume(host_set->next);
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
 
-	return 0;
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc == 0)
+		ata_host_resume(host);
+	return rc;
 }
+#endif /* CONFIG_PM */
+
 #endif /* CONFIG_PCI */
 
 
@@ -5926,7 +6680,7 @@
 	destroy_workqueue(ata_aux_wq);
 }
 
-module_init(ata_init);
+subsys_initcall(ata_init);
 module_exit(ata_exit);
 
 static unsigned long ratelimit_time;
@@ -5997,6 +6751,43 @@
 }
 
 /*
+ * Dummy port_ops
+ */
+static void ata_dummy_noret(struct ata_port *ap)	{ }
+static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
+static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
+
+static u8 ata_dummy_check_status(struct ata_port *ap)
+{
+	return ATA_DRDY;
+}
+
+static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
+{
+	return AC_ERR_SYSTEM;
+}
+
+const struct ata_port_operations ata_dummy_port_ops = {
+	.port_disable		= ata_port_disable,
+	.check_status		= ata_dummy_check_status,
+	.check_altstatus	= ata_dummy_check_status,
+	.dev_select		= ata_noop_dev_select,
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= ata_dummy_qc_issue,
+	.freeze			= ata_dummy_noret,
+	.thaw			= ata_dummy_noret,
+	.error_handler		= ata_dummy_noret,
+	.post_internal_cmd	= ata_dummy_qc_noret,
+	.irq_clear		= ata_dummy_noret,
+	.port_start		= ata_dummy_ret0,
+	.port_stop		= ata_dummy_noret,
+};
+
+const struct ata_port_info ata_dummy_port_info = {
+	.port_ops		= &ata_dummy_port_ops,
+};
+
+/*
  * libata is essentially a library of internal helper functions for
  * low-level ATA host controller drivers.  As such, the API/ABI is
  * likely to change as new drivers are added and updated.
@@ -6006,12 +6797,17 @@
 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
 EXPORT_SYMBOL_GPL(ata_std_bios_param);
 EXPORT_SYMBOL_GPL(ata_std_ports);
-EXPORT_SYMBOL_GPL(ata_host_set_init);
-EXPORT_SYMBOL_GPL(ata_device_add);
-EXPORT_SYMBOL_GPL(ata_port_detach);
-EXPORT_SYMBOL_GPL(ata_host_set_remove);
+EXPORT_SYMBOL_GPL(ata_host_init);
+EXPORT_SYMBOL_GPL(ata_host_alloc);
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
+EXPORT_SYMBOL_GPL(ata_host_start);
+EXPORT_SYMBOL_GPL(ata_host_register);
+EXPORT_SYMBOL_GPL(ata_host_activate);
+EXPORT_SYMBOL_GPL(ata_host_detach);
 EXPORT_SYMBOL_GPL(ata_sg_init);
 EXPORT_SYMBOL_GPL(ata_sg_init_one);
 EXPORT_SYMBOL_GPL(ata_hsm_move);
@@ -6022,18 +6818,17 @@
 EXPORT_SYMBOL_GPL(ata_tf_read);
 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
 EXPORT_SYMBOL_GPL(ata_std_dev_select);
+EXPORT_SYMBOL_GPL(sata_print_link_status);
 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
 EXPORT_SYMBOL_GPL(ata_check_status);
 EXPORT_SYMBOL_GPL(ata_altstatus);
 EXPORT_SYMBOL_GPL(ata_exec_command);
 EXPORT_SYMBOL_GPL(ata_port_start);
-EXPORT_SYMBOL_GPL(ata_port_stop);
-EXPORT_SYMBOL_GPL(ata_host_stop);
 EXPORT_SYMBOL_GPL(ata_interrupt);
-EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
-EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
-EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_data_xfer);
+EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
 EXPORT_SYMBOL_GPL(ata_qc_prep);
 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -6047,6 +6842,7 @@
 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
 EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(ata_dev_disable);
 EXPORT_SYMBOL_GPL(sata_set_spd);
 EXPORT_SYMBOL_GPL(sata_phy_debounce);
 EXPORT_SYMBOL_GPL(sata_phy_resume);
@@ -6055,22 +6851,22 @@
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_std_prereset);
 EXPORT_SYMBOL_GPL(ata_std_softreset);
+EXPORT_SYMBOL_GPL(sata_port_hardreset);
 EXPORT_SYMBOL_GPL(sata_std_hardreset);
 EXPORT_SYMBOL_GPL(ata_std_postreset);
-EXPORT_SYMBOL_GPL(ata_dev_revalidate);
 EXPORT_SYMBOL_GPL(ata_dev_classify);
 EXPORT_SYMBOL_GPL(ata_dev_pair);
 EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_wait_register);
 EXPORT_SYMBOL_GPL(ata_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_wait_ready);
 EXPORT_SYMBOL_GPL(ata_port_queue_task);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
-EXPORT_SYMBOL_GPL(ata_scsi_release);
 EXPORT_SYMBOL_GPL(ata_host_intr);
 EXPORT_SYMBOL_GPL(sata_scr_valid);
 EXPORT_SYMBOL_GPL(sata_scr_read);
@@ -6078,10 +6874,14 @@
 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
 EXPORT_SYMBOL_GPL(ata_port_online);
 EXPORT_SYMBOL_GPL(ata_port_offline);
-EXPORT_SYMBOL_GPL(ata_host_set_suspend);
-EXPORT_SYMBOL_GPL(ata_host_set_resume);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL_GPL(ata_host_suspend);
+EXPORT_SYMBOL_GPL(ata_host_resume);
+#endif /* CONFIG_PM */
 EXPORT_SYMBOL_GPL(ata_id_string);
 EXPORT_SYMBOL_GPL(ata_id_c_string);
+EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
+EXPORT_SYMBOL_GPL(ata_device_blacklisted);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
@@ -6090,21 +6890,21 @@
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_host_stop);
-EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
+EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
+EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
 EXPORT_SYMBOL_GPL(ata_pci_init_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#ifdef CONFIG_PM
 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
+#endif /* CONFIG_PM */
 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
 #endif /* CONFIG_PCI */
 
-EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
-EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
-
 EXPORT_SYMBOL_GPL(ata_eng_timeout);
 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
 EXPORT_SYMBOL_GPL(ata_port_abort);
@@ -6114,3 +6914,13 @@
 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
 EXPORT_SYMBOL_GPL(ata_do_eh);
+EXPORT_SYMBOL_GPL(ata_irq_on);
+EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
+EXPORT_SYMBOL_GPL(ata_irq_ack);
+EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
+EXPORT_SYMBOL_GPL(ata_dev_try_classify);
+
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
+EXPORT_SYMBOL_GPL(ata_cable_sata);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata-eh.c linux-2.6.18.x86_64.p4/drivers/ata/libata-eh.c
--- linux-2.6.18.x86_64.p3/drivers/ata/libata-eh.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata-eh.c	2007-06-06 10:08:00.000000000 -0400
@@ -44,10 +44,46 @@
 
 #include "libata.h"
 
+enum {
+	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
+	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
+	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
+};
+
+/* Waiting in ->prereset can never be reliable.  It's sometimes nice
+ * to wait there but it can't be depended upon; otherwise, we wouldn't
+ * be resetting.  Just give it enough time for most drives to spin up.
+ */
+enum {
+	ATA_EH_PRERESET_TIMEOUT		= 10 * HZ,
+};
+
+/* The following table determines how we sequence resets.  Each entry
+ * represents timeout for that try.  The first try can be soft or
+ * hardreset.  All others are hardreset if available.  In most cases
+ * the first reset w/ 10sec timeout should succeed.  Following entries
+ * are mostly for error handling, hotplug and retarded devices.
+ */
+static const unsigned long ata_eh_reset_timeouts[] = {
+	10 * HZ,	/* most drives spin up by 10sec */
+	10 * HZ,	/* > 99% working drives spin up before 20sec */
+	35 * HZ,	/* give > 30 secs of idleness for retarded devices */
+	5 * HZ,		/* and sweet one last chance */
+	/* > 1 min has elapsed, give up */
+};
+
 static void __ata_port_freeze(struct ata_port *ap);
 static void ata_eh_finish(struct ata_port *ap);
+#ifdef CONFIG_PM
 static void ata_eh_handle_port_suspend(struct ata_port *ap);
 static void ata_eh_handle_port_resume(struct ata_port *ap);
+#else /* CONFIG_PM */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{ }
+
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{ }
+#endif /* CONFIG_PM */
 
 static void ata_ering_record(struct ata_ering *ering, int is_io,
 			     unsigned int err_mask)
@@ -65,12 +101,9 @@
 	ent->timestamp = get_jiffies_64();
 }
 
-static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
+static void ata_ering_clear(struct ata_ering *ering)
 {
-	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
-	if (!ent->err_mask)
-		return NULL;
-	return ent;
+	memset(ering, 0, sizeof(*ering));
 }
 
 static int ata_ering_map(struct ata_ering *ering,
@@ -199,7 +232,7 @@
 	/* synchronize with port task */
 	ata_port_flush_task(ap);
 
-	/* synchronize with host_set lock and sort out timeouts */
+	/* synchronize with host lock and sort out timeouts */
 
 	/* For new EH, all qcs are finished in one of three ways -
 	 * normal completion, error completion, and SCSI timeout.
@@ -332,7 +365,7 @@
 	if (ap->pflags & ATA_PFLAG_LOADING)
 		ap->pflags &= ~ATA_PFLAG_LOADING;
 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-		queue_work(ata_aux_wq, &ap->hotplug_task);
+		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
 
 	if (ap->pflags & ATA_PFLAG_RECOVERED)
 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
@@ -376,7 +409,7 @@
 	spin_unlock_irqrestore(ap->lock, flags);
 
 	/* make sure SCSI EH is complete */
-	if (scsi_host_in_recovery(ap->host)) {
+	if (scsi_host_in_recovery(ap->scsi_host)) {
 		msleep(10);
 		goto retry;
 	}
@@ -485,7 +518,7 @@
  *	other commands are drained.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
@@ -512,14 +545,17 @@
  *	all commands are drained.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_port_schedule_eh(struct ata_port *ap)
 {
 	WARN_ON(!ap->ops->error_handler);
 
+	if (ap->pflags & ATA_PFLAG_INITIALIZING)
+		return;
+
 	ap->pflags |= ATA_PFLAG_EH_PENDING;
-	scsi_schedule_eh(ap->host);
+	scsi_schedule_eh(ap->scsi_host);
 
 	DPRINTK("port EH scheduled\n");
 }
@@ -531,7 +567,7 @@
  *	Abort all active qc's of @ap and schedule EH.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Number of aborted qc's.
@@ -574,7 +610,7 @@
  *	is frozen.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 static void __ata_port_freeze(struct ata_port *ap)
 {
@@ -585,7 +621,7 @@
 
 	ap->pflags |= ATA_PFLAG_FROZEN;
 
-	DPRINTK("ata%u port frozen\n", ap->id);
+	DPRINTK("ata%u port frozen\n", ap->print_id);
 }
 
 /**
@@ -595,7 +631,7 @@
  *	Abort and freeze @ap.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Number of aborted commands.
@@ -658,7 +694,7 @@
 
 	spin_unlock_irqrestore(ap->lock, flags);
 
-	DPRINTK("ata%u port thawed\n", ap->id);
+	DPRINTK("ata%u port thawed\n", ap->print_id);
 }
 
 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
@@ -954,26 +990,27 @@
  *	RETURNS:
  *	0 on success, AC_ERR_* mask on failure
  */
-static unsigned int atapi_eh_request_sense(struct ata_device *dev,
-					   unsigned char *sense_buf)
+static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
 {
+	struct ata_device *dev = qc->dev;
+	unsigned char *sense_buf = qc->scsicmd->sense_buffer;
 	struct ata_port *ap = dev->ap;
 	struct ata_taskfile tf;
 	u8 cdb[ATAPI_CDB_LEN];
 
 	DPRINTK("ATAPI request sense\n");
 
-	ata_tf_init(dev, &tf);
-
 	/* FIXME: is this needed? */
 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
 
-	/* XXX: why tf_read here? */
-	ap->ops->tf_read(ap, &tf);
-
-	/* fill these in, for the case where they are -not- overwritten */
+	/* initialize sense_buf with the error register,
+	 * for the case where they are -not- overwritten
+	 */
 	sense_buf[0] = 0x70;
-	sense_buf[2] = tf.feature >> 4;
+	sense_buf[2] = qc->result_tf.feature >> 4;
+
+	/* some devices time out if garbage left in tf */
+	ata_tf_init(dev, &tf);
 
 	memset(cdb, 0, ATAPI_CDB_LEN);
 	cdb[0] = REQUEST_SENSE;
@@ -1027,7 +1064,7 @@
 	}
 	if (serror & SERR_INTERNAL) {
 		err_mask |= AC_ERR_SYSTEM;
-		action |= ATA_EH_SOFTRESET;
+		action |= ATA_EH_HARDRESET;
 	}
 	if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
 		ata_ehi_hotplugged(&ehc->i);
@@ -1122,7 +1159,9 @@
 		return ATA_EH_SOFTRESET;
 	}
 
-	if (!(qc->err_mask & AC_ERR_DEV))
+	if (stat & (ATA_ERR | ATA_DF))
+		qc->err_mask |= AC_ERR_DEV;
+	else
 		return 0;
 
 	switch (qc->dev->class) {
@@ -1136,19 +1175,20 @@
 		break;
 
 	case ATA_DEV_ATAPI:
-		tmp = atapi_eh_request_sense(qc->dev,
-					     qc->scsicmd->sense_buffer);
-		if (!tmp) {
-			/* ATA_QCFLAG_SENSE_VALID is used to tell
-			 * atapi_qc_complete() that sense data is
-			 * already valid.
-			 *
-			 * TODO: interpret sense data and set
-			 * appropriate err_mask.
-			 */
-			qc->flags |= ATA_QCFLAG_SENSE_VALID;
-		} else
-			qc->err_mask |= tmp;
+		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
+			tmp = atapi_eh_request_sense(qc);
+			if (!tmp) {
+				/* ATA_QCFLAG_SENSE_VALID is used to
+				 * tell atapi_qc_complete() that sense
+				 * data is already valid.
+				 *
+				 * TODO: interpret sense data and set
+				 * appropriate err_mask.
+				 */
+				qc->flags |= ATA_QCFLAG_SENSE_VALID;
+			} else
+				qc->err_mask |= tmp;
+		}
 	}
 
 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
@@ -1157,87 +1197,99 @@
 	return action;
 }
 
-static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
+static int ata_eh_categorize_error(int is_io, unsigned int err_mask)
 {
-	if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
+	if (err_mask & AC_ERR_ATA_BUS)
 		return 1;
 
-	if (ent->is_io) {
-		if (ent->err_mask & AC_ERR_HSM)
-			return 1;
-		if ((ent->err_mask &
-		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+	if (err_mask & AC_ERR_TIMEOUT)
+		return 2;
+
+	if (is_io) {
+		if (err_mask & AC_ERR_HSM)
 			return 2;
+		if ((err_mask &
+		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+			return 3;
 	}
 
 	return 0;
 }
 
-struct speed_down_needed_arg {
+struct speed_down_verdict_arg {
 	u64 since;
-	int nr_errors[3];
+	int nr_errors[4];
 };
 
-static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
+static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
 {
-	struct speed_down_needed_arg *arg = void_arg;
+	struct speed_down_verdict_arg *arg = void_arg;
+	int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask);
 
 	if (ent->timestamp < arg->since)
 		return -1;
 
-	arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
+	arg->nr_errors[cat]++;
 	return 0;
 }
 
 /**
- *	ata_eh_speed_down_needed - Determine wheter speed down is necessary
+ *	ata_eh_speed_down_verdict - Determine speed down verdict
  *	@dev: Device of interest
  *
  *	This function examines error ring of @dev and determines
- *	whether speed down is necessary.  Speed down is necessary if
- *	there have been more than 3 of Cat-1 errors or 10 of Cat-2
- *	errors during last 15 minutes.
+ *	whether NCQ needs to be turned off, transfer speed should be
+ *	stepped down, or falling back to PIO is necessary.
  *
- *	Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
- *	violation for known supported commands.
+ *	Cat-1 is ATA_BUS error for any command.
  *
- *	Cat-2 errors are unclassified DEV error for known supported
+ *	Cat-2 is TIMEOUT for any command or HSM violation for known
+ *	supported commands.
+ *
+ *	Cat-3 is is unclassified DEV error for known supported
  *	command.
  *
+ *	NCQ needs to be turned off if there have been more than 3
+ *	Cat-2 + Cat-3 errors during last 10 minutes.
+ *
+ *	Speed down is necessary if there have been more than 3 Cat-1 +
+ *	Cat-2 errors or 10 Cat-3 errors during last 10 minutes.
+ *
+ *	Falling back to PIO mode is necessary if there have been more
+ *	than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes.
+ *
  *	LOCKING:
  *	Inherited from caller.
  *
  *	RETURNS:
- *	1 if speed down is necessary, 0 otherwise
+ *	OR of ATA_EH_SPDN_* flags.
  */
-static int ata_eh_speed_down_needed(struct ata_device *dev)
+static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
 {
-	const u64 interval = 15LLU * 60 * HZ;
-	static const int err_limits[3] = { -1, 3, 10 };
-	struct speed_down_needed_arg arg;
-	struct ata_ering_entry *ent;
-	int err_cat;
-	u64 j64;
+	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
+	u64 j64 = get_jiffies_64();
+	struct speed_down_verdict_arg arg;
+	unsigned int verdict = 0;
 
-	ent = ata_ering_top(&dev->ering);
-	if (!ent)
-		return 0;
+	/* scan past 10 mins of error history */
+	memset(&arg, 0, sizeof(arg));
+	arg.since = j64 - min(j64, j10mins);
+	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
 
-	err_cat = ata_eh_categorize_ering_entry(ent);
-	if (err_cat == 0)
-		return 0;
+	if (arg.nr_errors[2] + arg.nr_errors[3] > 3)
+		verdict |= ATA_EH_SPDN_NCQ_OFF;
+	if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10)
+		verdict |= ATA_EH_SPDN_SPEED_DOWN;
 
+	/* scan past 3 mins of error history */
 	memset(&arg, 0, sizeof(arg));
+	arg.since = j64 - min(j64, j5mins);
+	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
 
-	j64 = get_jiffies_64();
-	if (j64 >= interval)
-		arg.since = j64 - interval;
-	else
-		arg.since = 0;
+	if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10)
+		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
 
-	ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
-
-	return arg.nr_errors[err_cat] > err_limits[err_cat];
+	return verdict;
 }
 
 /**
@@ -1255,31 +1307,80 @@
  *	Kernel thread context (may sleep).
  *
  *	RETURNS:
- *	0 on success, -errno otherwise
+ *	Determined recovery action.
  */
-static int ata_eh_speed_down(struct ata_device *dev, int is_io,
-			     unsigned int err_mask)
+static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
+				      unsigned int err_mask)
 {
-	if (!err_mask)
+	unsigned int verdict;
+	unsigned int action = 0;
+
+	/* don't bother if Cat-0 error */
+	if (ata_eh_categorize_error(is_io, err_mask) == 0)
 		return 0;
 
 	/* record error and determine whether speed down is necessary */
 	ata_ering_record(&dev->ering, is_io, err_mask);
+	verdict = ata_eh_speed_down_verdict(dev);
 
-	if (!ata_eh_speed_down_needed(dev))
-		return 0;
+	/* turn off NCQ? */
+	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
+	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
+			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
+		dev->flags |= ATA_DFLAG_NCQ_OFF;
+		ata_dev_printk(dev, KERN_WARNING,
+			       "NCQ disabled due to excessive errors\n");
+		goto done;
+	}
 
-	/* speed down SATA link speed if possible */
-	if (sata_down_spd_limit(dev->ap) == 0)
-		return ATA_EH_HARDRESET;
+	/* speed down? */
+	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
+		/* speed down SATA link speed if possible */
+		if (sata_down_spd_limit(dev->ap) == 0) {
+			action |= ATA_EH_HARDRESET;
+			goto done;
+		}
 
-	/* lower transfer mode */
-	if (ata_down_xfermask_limit(dev, 0) == 0)
-		return ATA_EH_SOFTRESET;
+		/* lower transfer mode */
+		if (dev->spdn_cnt < 2) {
+			static const int dma_dnxfer_sel[] =
+				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
+			static const int pio_dnxfer_sel[] =
+				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
+			int sel;
+
+			if (dev->xfer_shift != ATA_SHIFT_PIO)
+				sel = dma_dnxfer_sel[dev->spdn_cnt];
+			else
+				sel = pio_dnxfer_sel[dev->spdn_cnt];
+
+			dev->spdn_cnt++;
+
+			if (ata_down_xfermask_limit(dev, sel) == 0) {
+				action |= ATA_EH_SOFTRESET;
+				goto done;
+			}
+		}
+	}
+
+	/* Fall back to PIO?  Slowing down to PIO is meaningless for
+	 * SATA.  Consider it only for PATA.
+	 */
+	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
+	    (dev->ap->cbl != ATA_CBL_SATA) &&
+	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
+		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
+			dev->spdn_cnt = 0;
+			action |= ATA_EH_SOFTRESET;
+			goto done;
+		}
+	}
 
-	ata_dev_printk(dev, KERN_ERR,
-		       "speed down requested but no transfer mode left\n");
 	return 0;
+ done:
+	/* device has been slowed down, blow error history */
+	ata_ering_clear(&dev->ering);
+	return action;
 }
 
 /**
@@ -1433,28 +1534,46 @@
 	}
 
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+		static const char *dma_str[] = {
+			[DMA_BIDIRECTIONAL]	= "bidi",
+			[DMA_TO_DEVICE]		= "out",
+			[DMA_FROM_DEVICE]	= "in",
+			[DMA_NONE]		= "",
+		};
 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
 
 		if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
 			continue;
 
-		ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
-			       "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
-			       qc->tag, qc->tf.command, qc->err_mask,
-			       qc->result_tf.command, qc->result_tf.feature,
-			       ata_err_string(qc->err_mask));
+		ata_dev_printk(qc->dev, KERN_ERR,
+			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"tag %d cdb 0x%x data %u %s\n         "
+			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+			"Emask 0x%x (%s)\n",
+			cmd->command, cmd->feature, cmd->nsect,
+			cmd->lbal, cmd->lbam, cmd->lbah,
+			cmd->hob_feature, cmd->hob_nsect,
+			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
+			cmd->device, qc->tag, qc->cdb[0], qc->nbytes,
+			dma_str[qc->dma_dir],
+			res->command, res->feature, res->nsect,
+			res->lbal, res->lbam, res->lbah,
+			res->hob_feature, res->hob_nsect,
+			res->hob_lbal, res->hob_lbam, res->hob_lbah,
+			res->device, qc->err_mask, ata_err_string(qc->err_mask));
 	}
 }
 
 static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
-			unsigned int *classes)
+			unsigned int *classes, unsigned long deadline)
 {
 	int i, rc;
 
 	for (i = 0; i < ATA_MAX_DEVICES; i++)
 		classes[i] = ATA_DEV_UNKNOWN;
 
-	rc = reset(ap, classes);
+	rc = reset(ap, classes, deadline);
 	if (rc)
 		return rc;
 
@@ -1492,8 +1611,9 @@
 {
 	struct ata_eh_context *ehc = &ap->eh_context;
 	unsigned int *classes = ehc->classes;
-	int tries = ATA_EH_RESET_TRIES;
 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
+	int try = 0;
+	unsigned long deadline;
 	unsigned int action;
 	ata_reset_fn_t reset;
 	int i, did_followup_srst, rc;
@@ -1513,9 +1633,19 @@
 		ehc->i.action |= ATA_EH_HARDRESET;
 
 	if (prereset) {
-		rc = prereset(ap);
+		rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT);
 		if (rc) {
-			ata_port_printk(ap, KERN_ERR,
+			if (rc == -ENOENT) {
+				ata_port_printk(ap, KERN_DEBUG,
+						"port disabled. ignoring.\n");
+				ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
+
+				for (i = 0; i < ATA_MAX_DEVICES; i++)
+					classes[i] = ATA_DEV_NONE;
+
+				rc = 0;
+			} else
+				ata_port_printk(ap, KERN_ERR,
 					"prereset failed (errno=%d)\n", rc);
 			return rc;
 		}
@@ -1544,15 +1674,20 @@
 	}
 
  retry:
+	deadline = jiffies + ata_eh_reset_timeouts[try++];
+
 	/* shut up during boot probing */
 	if (verbose)
 		ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
 				reset == softreset ? "soft" : "hard");
 
 	/* mark that this EH session started with reset */
-	ehc->i.flags |= ATA_EHI_DID_RESET;
+	if (reset == hardreset)
+		ehc->i.flags |= ATA_EHI_DID_HARDRESET;
+	else
+		ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
 
-	rc = ata_do_reset(ap, reset, classes);
+	rc = ata_do_reset(ap, reset, classes, deadline);
 
 	did_followup_srst = 0;
 	if (reset == hardreset &&
@@ -1569,7 +1704,7 @@
 		}
 
 		ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
-		rc = ata_do_reset(ap, reset, classes);
+		rc = ata_do_reset(ap, reset, classes, deadline);
 
 		if (rc == 0 && classify &&
 		    classes[0] == ATA_DEV_UNKNOWN) {
@@ -1579,22 +1714,21 @@
 		}
 	}
 
-	if (rc && --tries) {
-		const char *type;
+	if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) {
+		unsigned long now = jiffies;
 
-		if (reset == softreset) {
-			if (did_followup_srst)
-				type = "follow-up soft";
-			else
-				type = "soft";
-		} else
-			type = "hard";
+		if (time_before(now, deadline)) {
+			unsigned long delta = deadline - jiffies;
 
-		ata_port_printk(ap, KERN_WARNING,
-				"%sreset failed, retrying in 5 secs\n", type);
-		ssleep(5);
+			ata_port_printk(ap, KERN_WARNING, "reset failed "
+				"(errno=%d), retrying in %u secs\n",
+				rc, (jiffies_to_msecs(delta) + 999) / 1000);
 
-		if (reset == hardreset)
+			schedule_timeout_uninterruptible(delta);
+		}
+
+		if (reset == hardreset &&
+		    try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
 			sata_down_spd_limit(ap);
 		if (hardreset)
 			reset = hardreset;
@@ -1624,31 +1758,43 @@
 {
 	struct ata_eh_context *ehc = &ap->eh_context;
 	struct ata_device *dev;
+	unsigned int new_mask = 0;
 	unsigned long flags;
 	int i, rc = 0;
 
 	DPRINTK("ENTER\n");
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		unsigned int action;
+	/* For PATA drive side cable detection to work, IDENTIFY must
+	 * be done backwards such that PDIAG- is released by the slave
+	 * device before the master device is identified.
+	 */
+	for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
+		unsigned int action, readid_flags = 0;
 
 		dev = &ap->device[i];
 		action = ata_eh_dev_action(dev);
 
-		if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
+		if (ehc->i.flags & ATA_EHI_DID_RESET)
+			readid_flags |= ATA_READID_POSTRESET;
+
+		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
 			if (ata_port_offline(ap)) {
 				rc = -EIO;
-				break;
+				goto err;
 			}
 
 			ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
-			rc = ata_dev_revalidate(dev,
-					ehc->i.flags & ATA_EHI_DID_RESET);
+			rc = ata_dev_revalidate(dev, readid_flags);
 			if (rc)
-				break;
+				goto err;
 
 			ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
 
+			/* Configuration may have changed, reconfigure
+			 * transfer mode.
+			 */
+			ehc->i.flags |= ATA_EHI_SETMODE;
+
 			/* schedule the scsi_rescan_device() here */
 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
 		} else if (dev->class == ATA_DEV_UNKNOWN &&
@@ -1656,184 +1802,60 @@
 			   ata_class_enabled(ehc->classes[dev->devno])) {
 			dev->class = ehc->classes[dev->devno];
 
-			rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
-			if (rc == 0)
-				rc = ata_dev_configure(dev, 1);
-
-			if (rc) {
+			rc = ata_dev_read_id(dev, &dev->class, readid_flags,
+					     dev->id);
+			switch (rc) {
+			case 0:
+				new_mask |= 1 << i;
+				break;
+			case -ENOENT:
+				/* IDENTIFY was issued to non-existent
+				 * device.  No need to reset.  Just
+				 * thaw and kill the device.
+				 */
+				ata_eh_thaw_port(ap);
 				dev->class = ATA_DEV_UNKNOWN;
 				break;
+			default:
+				dev->class = ATA_DEV_UNKNOWN;
+				goto err;
 			}
-
-			spin_lock_irqsave(ap->lock, flags);
-			ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
-			spin_unlock_irqrestore(ap->lock, flags);
 		}
 	}
 
-	if (rc)
-		*r_failed_dev = dev;
-
-	DPRINTK("EXIT\n");
-	return rc;
-}
-
-/**
- *	ata_eh_suspend - handle suspend EH action
- *	@ap: target host port
- *	@r_failed_dev: result parameter to indicate failing device
- *
- *	Handle suspend EH action.  Disk devices are spinned down and
- *	other types of devices are just marked suspended.  Once
- *	suspended, no EH action to the device is allowed until it is
- *	resumed.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- *
- *	RETURNS:
- *	0 on success, -errno otherwise
- */
-static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
-{
-	struct ata_device *dev;
-	int i, rc = 0;
-
-	DPRINTK("ENTER\n");
+	/* PDIAG- should have been released, ask cable type if post-reset */
+	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect)
+		ap->cbl = ap->ops->cable_detect(ap);
 
+	/* Configure new devices forward such that user doesn't see
+	 * device detection messages backwards.
+	 */
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		unsigned long flags;
-		unsigned int action, err_mask;
-
 		dev = &ap->device[i];
-		action = ata_eh_dev_action(dev);
 
-		if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
+		if (!(new_mask & (1 << i)))
 			continue;
 
-		WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
-
-		ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
-
-		if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
-			/* flush cache */
-			rc = ata_flush_cache(dev);
-			if (rc)
-				break;
-
-			/* spin down */
-			err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
-			if (err_mask) {
-				ata_dev_printk(dev, KERN_ERR, "failed to "
-					       "spin down (err_mask=0x%x)\n",
-					       err_mask);
-				rc = -EIO;
-				break;
-			}
-		}
+		ehc->i.flags |= ATA_EHI_PRINTINFO;
+		rc = ata_dev_configure(dev);
+		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
+		if (rc)
+			goto err;
 
 		spin_lock_irqsave(ap->lock, flags);
-		dev->flags |= ATA_DFLAG_SUSPENDED;
+		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
 		spin_unlock_irqrestore(ap->lock, flags);
 
-		ata_eh_done(ap, dev, ATA_EH_SUSPEND);
+		/* new device discovered, configure xfermode */
+		ehc->i.flags |= ATA_EHI_SETMODE;
 	}
 
-	if (rc)
-		*r_failed_dev = dev;
-
-	DPRINTK("EXIT\n");
 	return 0;
-}
-
-/**
- *	ata_eh_prep_resume - prep for resume EH action
- *	@ap: target host port
- *
- *	Clear SUSPENDED in preparation for scheduled resume actions.
- *	This allows other parts of EH to access the devices being
- *	resumed.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- */
-static void ata_eh_prep_resume(struct ata_port *ap)
-{
-	struct ata_device *dev;
-	unsigned long flags;
-	int i;
-
-	DPRINTK("ENTER\n");
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		unsigned int action;
-
-		dev = &ap->device[i];
-		action = ata_eh_dev_action(dev);
-
-		if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
-			continue;
-
-		spin_lock_irqsave(ap->lock, flags);
-		dev->flags &= ~ATA_DFLAG_SUSPENDED;
-		spin_unlock_irqrestore(ap->lock, flags);
-	}
-
-	DPRINTK("EXIT\n");
-}
-
-/**
- *	ata_eh_resume - handle resume EH action
- *	@ap: target host port
- *	@r_failed_dev: result parameter to indicate failing device
- *
- *	Handle resume EH action.  Target devices are already reset and
- *	revalidated.  Spinning up is the only operation left.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- *
- *	RETURNS:
- *	0 on success, -errno otherwise
- */
-static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
-{
-	struct ata_device *dev;
-	int i, rc = 0;
-
-	DPRINTK("ENTER\n");
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		unsigned int action, err_mask;
-
-		dev = &ap->device[i];
-		action = ata_eh_dev_action(dev);
 
-		if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
-			continue;
-
-		ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
-
-		if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
-			err_mask = ata_do_simple_cmd(dev,
-						     ATA_CMD_IDLEIMMEDIATE);
-			if (err_mask) {
-				ata_dev_printk(dev, KERN_ERR, "failed to "
-					       "spin up (err_mask=0x%x)\n",
-					       err_mask);
-				rc = -EIO;
-				break;
-			}
-		}
-
-		ata_eh_done(ap, dev, ATA_EH_RESUME);
-	}
-
-	if (rc)
-		*r_failed_dev = dev;
-
-	DPRINTK("EXIT\n");
-	return 0;
+ err:
+	*r_failed_dev = dev;
+	DPRINTK("EXIT rc=%d\n", rc);
+	return rc;
 }
 
 static int ata_port_nr_enabled(struct ata_port *ap)
@@ -1861,17 +1883,6 @@
 	struct ata_eh_context *ehc = &ap->eh_context;
 	int i;
 
-	/* skip if all possible devices are suspended */
-	for (i = 0; i < ata_port_max_devices(ap); i++) {
-		struct ata_device *dev = &ap->device[i];
-
-		if (!(dev->flags & ATA_DFLAG_SUSPENDED))
-			break;
-	}
-
-	if (i == ata_port_max_devices(ap))
-		return 1;
-
 	/* thaw frozen port, resume link and recover failed devices */
 	if ((ap->pflags & ATA_PFLAG_FROZEN) ||
 	    (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
@@ -1916,7 +1927,7 @@
 {
 	struct ata_eh_context *ehc = &ap->eh_context;
 	struct ata_device *dev;
-	int down_xfermask, i, rc;
+	int i, rc;
 
 	DPRINTK("ENTER\n");
 
@@ -1926,6 +1937,10 @@
 
 		ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
 
+		/* collect port action mask recorded in dev actions */
+		ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK;
+		ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK;
+
 		/* process hotplug request */
 		if (dev->flags & ATA_DFLAG_DETACH)
 			ata_eh_detach_dev(dev);
@@ -1941,16 +1956,12 @@
 	}
 
  retry:
-	down_xfermask = 0;
 	rc = 0;
 
 	/* if UNLOADING, finish immediately */
 	if (ap->pflags & ATA_PFLAG_UNLOADING)
 		goto out;
 
-	/* prep for resume */
-	ata_eh_prep_resume(ap);
-
 	/* skip EH if possible. */
 	if (ata_eh_skip_recovery(ap))
 		ehc->i.action = 0;
@@ -1978,42 +1989,34 @@
 	if (rc)
 		goto dev_fail;
 
-	/* resume devices */
-	rc = ata_eh_resume(ap, &dev);
-	if (rc)
-		goto dev_fail;
-
-	/* configure transfer mode if the port has been reset */
-	if (ehc->i.flags & ATA_EHI_DID_RESET) {
+	/* configure transfer mode if necessary */
+	if (ehc->i.flags & ATA_EHI_SETMODE) {
 		rc = ata_set_mode(ap, &dev);
-		if (rc) {
-			down_xfermask = 1;
+		if (rc)
 			goto dev_fail;
-		}
+		ehc->i.flags &= ~ATA_EHI_SETMODE;
 	}
 
-	/* suspend devices */
-	rc = ata_eh_suspend(ap, &dev);
-	if (rc)
-		goto dev_fail;
-
 	goto out;
 
  dev_fail:
+	ehc->tries[dev->devno]--;
+
 	switch (rc) {
 	case -ENODEV:
-		/* device missing, schedule probing */
+		/* device missing or wrong IDENTIFY data, schedule probing */
 		ehc->i.probe_mask |= (1 << dev->devno);
 	case -EINVAL:
-		ehc->tries[dev->devno] = 0;
-		break;
+		/* give it just one more chance */
+		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
 	case -EIO:
-		sata_down_spd_limit(ap);
-	default:
-		ehc->tries[dev->devno]--;
-		if (down_xfermask &&
-		    ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
-			ehc->tries[dev->devno] = 0;
+		if (ehc->tries[dev->devno] == 1) {
+			/* This is the last chance, better to slow
+			 * down than lose it.
+			 */
+			sata_down_spd_limit(ap);
+			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		}
 	}
 
 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
@@ -2128,6 +2131,7 @@
 	ata_eh_finish(ap);
 }
 
+#ifdef CONFIG_PM
 /**
  *	ata_eh_handle_port_suspend - perform port suspend operation
  *	@ap: port to suspend
@@ -2184,22 +2188,13 @@
  *
  *	Resume @ap.
  *
- *	This function also waits upto one second until all devices
- *	hanging off this port requests resume EH action.  This is to
- *	prevent invoking EH and thus reset multiple times on resume.
- *
- *	On DPM resume, where some of devices might not be resumed
- *	together, this may delay port resume upto one second, but such
- *	DPM resumes are rare and 1 sec delay isn't too bad.
- *
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
 static void ata_eh_handle_port_resume(struct ata_port *ap)
 {
-	unsigned long timeout;
 	unsigned long flags;
-	int i, rc = 0;
+	int rc = 0;
 
 	/* are we resuming? */
 	spin_lock_irqsave(ap->lock, flags);
@@ -2210,31 +2205,12 @@
 	}
 	spin_unlock_irqrestore(ap->lock, flags);
 
-	/* spurious? */
-	if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
-		goto done;
+	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
 
 	if (ap->ops->port_resume)
 		rc = ap->ops->port_resume(ap);
 
-	/* give devices time to request EH */
-	timeout = jiffies + HZ; /* 1s max */
-	while (1) {
-		for (i = 0; i < ATA_MAX_DEVICES; i++) {
-			struct ata_device *dev = &ap->device[i];
-			unsigned int action = ata_eh_dev_action(dev);
-
-			if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
-			    !(action & ATA_EH_RESUME))
-				break;
-		}
-
-		if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
-			break;
-		msleep(10);
-	}
-
- done:
+	/* report result */
 	spin_lock_irqsave(ap->lock, flags);
 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
 	if (ap->pm_result) {
@@ -2243,3 +2219,4 @@
 	}
 	spin_unlock_irqrestore(ap->lock, flags);
 }
+#endif /* CONFIG_PM */
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata.h linux-2.6.18.x86_64.p4/drivers/ata/libata.h
--- linux-2.6.18.x86_64.p3/drivers/ata/libata.h	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata.h	2007-06-06 10:08:00.000000000 -0400
@@ -29,7 +29,6 @@
 #define __LIBATA_H__
 
 #define DRV_NAME	"libata"
-#define DRV_VERSION	"2.00"	/* must be exactly four chars */
 
 struct ata_scsi_args {
 	struct ata_device	*dev;
@@ -39,25 +38,51 @@
 };
 
 /* libata-core.c */
+enum {
+	/* flags for ata_dev_read_id() */
+	ATA_READID_POSTRESET	= (1 << 0), /* reading ID after reset */
+
+	/* selector for ata_down_xfermask_limit() */
+	ATA_DNXFER_PIO		= 0,	/* speed down PIO */
+	ATA_DNXFER_DMA		= 1,	/* speed down DMA */
+	ATA_DNXFER_40C		= 2,	/* apply 40c cable limit */
+	ATA_DNXFER_FORCE_PIO	= 3,	/* force PIO */
+	ATA_DNXFER_FORCE_PIO0	= 4,	/* force PIO0 */
+
+	ATA_DNXFER_QUIET	= (1 << 31),
+};
+
+extern unsigned int ata_print_id;
 extern struct workqueue_struct *ata_aux_wq;
 extern int atapi_enabled;
 extern int atapi_dmadir;
 extern int libata_fua;
+extern int libata_noacpi;
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
-extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
+extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+			   u64 block, u32 n_block, unsigned int tf_flags,
+			   unsigned int tag);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern void ata_dev_disable(struct ata_device *dev);
 extern void ata_port_flush_task(struct ata_port *ap);
 extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
 				  int dma_dir, void *buf, unsigned int buflen);
+extern unsigned ata_exec_internal_sg(struct ata_device *dev,
+				     struct ata_taskfile *tf, const u8 *cdb,
+				     int dma_dir, struct scatterlist *sg,
+				     unsigned int n_elem);
 extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
 extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
-			   int post_reset, u16 *id);
-extern int ata_dev_configure(struct ata_device *dev, int print_info);
+			   unsigned int flags, u16 *id);
+extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
+extern int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags);
+extern int ata_dev_configure(struct ata_device *dev);
 extern int sata_down_spd_limit(struct ata_port *ap);
 extern int sata_set_spd_needed(struct ata_port *ap);
-extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
+extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
 extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern void ata_sg_clean(struct ata_queued_cmd *qc);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern void ata_qc_issue(struct ata_queued_cmd *qc);
 extern void __ata_qc_complete(struct ata_queued_cmd *qc);
@@ -69,16 +94,29 @@
 extern void ata_dev_init(struct ata_device *dev);
 extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
 extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
-extern void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
-			  const struct ata_probe_ent *ent, unsigned int port_no);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
 
+/* libata-acpi.c */
+#ifdef CONFIG_ATA_ACPI
+extern int ata_acpi_exec_tfs(struct ata_port *ap);
+extern int ata_acpi_push_id(struct ata_device *dev);
+#else
+static inline int ata_acpi_exec_tfs(struct ata_port *ap)
+{
+	return 0;
+}
+static inline int ata_acpi_push_id(struct ata_device *dev)
+{
+	return 0;
+}
+#endif
 
 /* libata-scsi.c */
-extern struct scsi_transport_template ata_scsi_transport_template;
-
+extern int ata_scsi_add_hosts(struct ata_host *host,
+			      struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_hotplug(void *data);
+extern void ata_scsi_hotplug(void *);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
 			       unsigned int buflen);
 
@@ -108,7 +146,7 @@
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
-extern void ata_scsi_dev_rescan(void *data);
+extern void ata_scsi_dev_rescan(void *);
 extern int ata_bus_probe(struct ata_port *ap);
 
 /* libata-eh.c */
@@ -117,4 +155,8 @@
 extern void ata_port_wait_eh(struct ata_port *ap);
 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
 
+/* libata-sff.c */
+extern u8 ata_irq_on(struct ata_port *ap);
+
+
 #endif /* __LIBATA_H__ */
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata-scsi.c linux-2.6.18.x86_64.p4/drivers/ata/libata-scsi.c
--- linux-2.6.18.x86_64.p3/drivers/ata/libata-scsi.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata-scsi.c	2007-06-06 10:08:00.000000000 -0400
@@ -51,7 +51,7 @@
 
 #define SECTOR_SIZE	512
 
-typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
+typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
 
 static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
 					const struct scsi_device *scsidev);
@@ -104,7 +104,7 @@
  * libata transport template.  libata doesn't do real transport stuff.
  * It just needs the eh_timed_out hook.
  */
-struct scsi_transport_template ata_scsi_transport_template = {
+static struct scsi_transport_template ata_scsi_transport_template = {
 	.eh_strategy_handler	= ata_scsi_error,
 	.eh_timed_out		= ata_scsi_timed_out,
 	.user_scan		= ata_scsi_user_scan,
@@ -149,6 +149,45 @@
 }
 
 /**
+ *	ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
+ *	@sdev: SCSI device to get identify data for
+ *	@arg: User buffer area for identify data
+ *
+ *	LOCKING:
+ *	Defined by the SCSI layer.  We don't really care.
+ *
+ *	RETURNS:
+ *	Zero on success, negative errno on error.
+ */
+static int ata_get_identity(struct scsi_device *sdev, void __user *arg)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
+	u16 __user *dst = arg;
+	char buf[40];
+
+	if (!dev)
+		return -ENOMSG;
+
+	if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
+	if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
+	if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
+		return -EFAULT;
+
+	ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+	if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
+		return -EFAULT;
+
+	return 0;
+}
+
+/**
  *	ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
  *	@scsidev: Device to which we are issuing command
  *	@arg: User provided data for issuing command
@@ -159,15 +198,14 @@
  *	RETURNS:
  *	Zero on success, negative errno on error.
  */
-
 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 {
 	int rc = 0;
 	u8 scsi_cmd[MAX_COMMAND_SIZE];
-	u8 args[4], *argbuf = NULL;
+	u8 args[4], *argbuf = NULL, *sensebuf = NULL;
 	int argsize = 0;
-	struct scsi_sense_hdr sshdr;
 	enum dma_data_direction data_dir;
+	int cmd_result;
 
 	if (arg == NULL)
 		return -EINVAL;
@@ -175,6 +213,10 @@
 	if (copy_from_user(args, arg, sizeof(args)))
 		return -EFAULT;
 
+	sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+	if (!sensebuf)
+		return -ENOMEM;
+
 	memset(scsi_cmd, 0, sizeof(scsi_cmd));
 
 	if (args[3]) {
@@ -191,7 +233,7 @@
 		data_dir = DMA_FROM_DEVICE;
 	} else {
 		scsi_cmd[1]  = (3 << 1); /* Non-data */
-		/* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
+		scsi_cmd[2]  = 0x20;     /* cc but no off.line or data xfer */
 		data_dir = DMA_NONE;
 	}
 
@@ -210,18 +252,46 @@
 
 	/* Good values for timeout and retries?  Values below
 	   from scsi_ioctl_send_command() for default case... */
-	if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
-			     &sshdr, (10*HZ), 5)) {
+	cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
+	                          sensebuf, (10*HZ), 5, 0);
+
+	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+		u8 *desc = sensebuf + 8;
+		cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+		/* If we set cc then ATA pass-through will cause a
+		 * check condition even if no error. Filter that. */
+		if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+			struct scsi_sense_hdr sshdr;
+			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
+			                      &sshdr);
+			if (sshdr.sense_key==0 &&
+			    sshdr.asc==0 && sshdr.ascq==0)
+				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+		}
+
+		/* Send userspace a few ATA registers (same as drivers/ide) */
+		if (sensebuf[0] == 0x72 &&     /* format is "descriptor" */
+		    desc[0] == 0x09 ) {        /* code is "ATA Descriptor" */
+			args[0] = desc[13];    /* status */
+			args[1] = desc[3];     /* error */
+			args[2] = desc[5];     /* sector count (0:7) */
+			if (copy_to_user(arg, args, sizeof(args)))
+				rc = -EFAULT;
+		}
+	}
+
+
+	if (cmd_result) {
 		rc = -EIO;
 		goto error;
 	}
 
-	/* Need code to retrieve data from check condition? */
-
 	if ((argbuf)
 	 && copy_to_user(arg + sizeof(args), argbuf, argsize))
 		rc = -EFAULT;
 error:
+	kfree(sensebuf);
 	kfree(argbuf);
 	return rc;
 }
@@ -263,39 +333,39 @@
 	scsi_cmd[8]  = args[3];
 	scsi_cmd[10] = args[4];
 	scsi_cmd[12] = args[5];
-	scsi_cmd[13] = args[6] & 0x0f;
+	scsi_cmd[13] = args[6] & 0x4f;
 	scsi_cmd[14] = args[0];
 
 	/* Good values for timeout and retries?  Values below
 	   from scsi_ioctl_send_command() for default case... */
 	cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
-				  sensebuf, (10*HZ), 5, 0);
+				sensebuf, (10*HZ), 5, 0);
 
 	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
 		u8 *desc = sensebuf + 8;
 		cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
-		
-		/* If we set cc the ATA pass-through will cause a
+
+		/* If we set cc then ATA pass-through will cause a
 		 * check condition even if no error. Filter that. */
 		if (cmd_result & SAM_STAT_CHECK_CONDITION) {
 			struct scsi_sense_hdr sshdr;
 			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
-					     &sshdr);
-			if (sshdr.sense_key==0 && 
-			    sshdr.asc==0 && sshdr.ascq==0)
+						&sshdr);
+			if (sshdr.sense_key==0 &&
+				sshdr.asc==0 && sshdr.ascq==0)
 				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
 		}
 
 		/* Send userspace ATA registers */
-		if (sensebuf[0] == 0x72 &&     /* format is "descriptor" */
-		    desc[0] ==0x09) {          /* code is "ATA Descriptor" */
-			args[0] = desc[13];    /* status */
-			args[1] = desc[3];     /* error */
-			args[2] = desc[5];     /* sector count (0:7) */
-			args[3] = desc[7];     /* lbal */
-			args[4] = desc[9];     /* lbam */
-			args[5] = desc[11];    /* lbah */
-			args[6] = desc[12];    /* select */
+		if (sensebuf[0] == 0x72 &&	/* format is "descriptor" */
+				desc[0] == 0x09) {/* code is "ATA Descriptor" */
+			args[0] = desc[13];	/* status */
+			args[1] = desc[3];	/* error */
+			args[2] = desc[5];	/* sector count (0:7) */
+			args[3] = desc[7];	/* lbal */
+			args[4] = desc[9];	/* lbam */
+			args[5] = desc[11];	/* lbah */
+			args[6] = desc[12];	/* select */
 			if (copy_to_user(arg, args, sizeof(args)))
 				rc = -EFAULT;
 		}
@@ -328,6 +398,9 @@
 			return -EINVAL;
 		return 0;
 
+	case HDIO_GET_IDENTITY:
+		return ata_get_identity(scsidev, arg);
+
 	case HDIO_DRIVE_CMD:
 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
 			return -EACCES;
@@ -361,14 +434,14 @@
  *	current command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Command allocated, or %NULL if none available.
  */
-struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
-				       struct scsi_cmnd *cmd,
-				       void (*done)(struct scsi_cmnd *))
+static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+					      struct scsi_cmnd *cmd,
+					      void (*done)(struct scsi_cmnd *))
 {
 	struct ata_queued_cmd *qc;
 
@@ -380,7 +453,7 @@
 		if (cmd->use_sg) {
 			qc->__sg = (struct scatterlist *) cmd->request_buffer;
 			qc->n_elem = cmd->use_sg;
-		} else {
+		} else if (cmd->request_bufflen) {
 			qc->__sg = &qc->sgent;
 			qc->n_elem = 1;
 		}
@@ -404,7 +477,7 @@
  *	LOCKING:
  *	inherited from caller
  */
-void ata_dump_status(unsigned id, struct ata_taskfile *tf)
+static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
 {
 	u8 stat = tf->command, err = tf->feature;
 
@@ -438,131 +511,6 @@
 }
 
 /**
- *	ata_scsi_device_suspend - suspend ATA device associated with sdev
- *	@sdev: the SCSI device to suspend
- *	@state: target power management state
- *
- *	Request suspend EH action on the ATA device associated with
- *	@sdev and wait for the operation to complete.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
-{
-	struct ata_port *ap = ata_shost_to_port(sdev->host);
-	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
-	unsigned long flags;
-	unsigned int action;
-	int rc = 0;
-
-	if (!dev)
-		goto out;
-
-	spin_lock_irqsave(ap->lock, flags);
-
-	/* wait for the previous resume to complete */
-	while (dev->flags & ATA_DFLAG_SUSPENDED) {
-		spin_unlock_irqrestore(ap->lock, flags);
-		ata_port_wait_eh(ap);
-		spin_lock_irqsave(ap->lock, flags);
-	}
-
-	/* if @sdev is already detached, nothing to do */
-	if (sdev->sdev_state == SDEV_OFFLINE ||
-	    sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
-		goto out_unlock;
-
-	/* request suspend */
-	action = ATA_EH_SUSPEND;
-	if (state.event != PM_EVENT_SUSPEND)
-		action |= ATA_EH_PM_FREEZE;
-	ap->eh_info.dev_action[dev->devno] |= action;
-	ap->eh_info.flags |= ATA_EHI_QUIET;
-	ata_port_schedule_eh(ap);
-
-	spin_unlock_irqrestore(ap->lock, flags);
-
-	/* wait for EH to do the job */
-	ata_port_wait_eh(ap);
-
-	spin_lock_irqsave(ap->lock, flags);
-
-	/* If @sdev is still attached but the associated ATA device
-	 * isn't suspended, the operation failed.
-	 */
-	if (sdev->sdev_state != SDEV_OFFLINE &&
-	    sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
-	    !(dev->flags & ATA_DFLAG_SUSPENDED))
-		rc = -EIO;
-
- out_unlock:
-	spin_unlock_irqrestore(ap->lock, flags);
- out:
-	if (rc == 0)
-		sdev->sdev_gendev.power.power_state = state;
-	return rc;
-}
-
-/**
- *	ata_scsi_device_resume - resume ATA device associated with sdev
- *	@sdev: the SCSI device to resume
- *
- *	Request resume EH action on the ATA device associated with
- *	@sdev and return immediately.  This enables parallel
- *	wakeup/spinup of devices.
- *
- *	LOCKING:
- *	Kernel thread context (may sleep).
- *
- *	RETURNS:
- *	0.
- */
-int ata_scsi_device_resume(struct scsi_device *sdev)
-{
-	struct ata_port *ap = ata_shost_to_port(sdev->host);
-	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
-	struct ata_eh_info *ehi = &ap->eh_info;
-	unsigned long flags;
-	unsigned int action;
-
-	if (!dev)
-		goto out;
-
-	spin_lock_irqsave(ap->lock, flags);
-
-	/* if @sdev is already detached, nothing to do */
-	if (sdev->sdev_state == SDEV_OFFLINE ||
-	    sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
-		goto out_unlock;
-
-	/* request resume */
-	action = ATA_EH_RESUME;
-	if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
-		__ata_ehi_hotplugged(ehi);
-	else
-		action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
-	ehi->dev_action[dev->devno] |= action;
-
-	/* We don't want autopsy and verbose EH messages.  Disable
-	 * those if we're the only device on this link.
-	 */
-	if (ata_port_max_devices(ap) == 1)
-		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
-
-	ata_port_schedule_eh(ap);
-
- out_unlock:
-	spin_unlock_irqrestore(ap->lock, flags);
- out:
-	sdev->sdev_gendev.power.power_state = PMSG_ON;
-	return 0;
-}
-
-/**
  *	ata_to_sense_error - convert ATA error to SCSI error
  *	@id: ATA device number
  *	@drv_stat: value contained in ATA status register
@@ -577,10 +525,10 @@
  *	format sense blocks.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
-			u8 *ascq, int verbose)
+static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
+			       u8 *asc, u8 *ascq, int verbose)
 {
 	int i;
 
@@ -679,7 +627,7 @@
 }
 
 /*
- *	ata_gen_ata_desc_sense - Generate check condition sense block.
+ *	ata_gen_passthru_sense - Generate check condition sense block.
  *	@qc: Command that completed.
  *
  *	This function is specific to the ATA descriptor format sense
@@ -689,9 +637,9 @@
  *	block. Clear sense key, ASC & ASCQ if there is no error.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	None.
  */
-void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
+static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->result_tf;
@@ -709,7 +657,7 @@
 	 */
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
-		ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
+		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
 				   &sb[1], &sb[2], &sb[3], verbose);
 		sb[1] &= 0x0f;
 	}
@@ -721,12 +669,9 @@
 
 	desc[0] = 0x09;
 
-	/*
-	 * Set length of additional sense data.
-	 * Since we only populate descriptor 0, the total
-	 * length is the same (fixed) length as descriptor 0.
-	 */
-	desc[1] = sb[7] = 14;
+	/* set length of additional sense data */
+	sb[7] = 14;
+	desc[1] = 12;
 
 	/*
 	 * Copy registers into sense buffer.
@@ -754,56 +699,56 @@
 }
 
 /**
- *	ata_gen_fixed_sense - generate a SCSI fixed sense block
+ *	ata_gen_ata_sense - generate a SCSI fixed sense block
  *	@qc: Command that we are erroring out
  *
- *	Leverage ata_to_sense_error() to give us the codes.  Fit our
- *	LBA in here if there's room.
+ *	Generate sense block for a failed ATA command @qc.  Descriptor
+ *	format is used to accomodate LBA48 block address.
  *
  *	LOCKING:
- *	inherited from caller
+ *	None.
  */
-void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
+static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
 {
+	struct ata_device *dev = qc->dev;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->result_tf;
 	unsigned char *sb = cmd->sense_buffer;
+	unsigned char *desc = sb + 8;
 	int verbose = qc->ap->ops->error_handler == NULL;
+	u64 block;
 
 	memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
 
 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
-	/*
-	 * Use ata_to_sense_error() to map status register bits
+	/* sense data is current and format is descriptor */
+	sb[0] = 0x72;
+
+	/* Use ata_to_sense_error() to map status register bits
 	 * onto sense key, asc & ascq.
 	 */
 	if (qc->err_mask ||
 	    tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
-		ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
-				   &sb[2], &sb[12], &sb[13], verbose);
-		sb[2] &= 0x0f;
-	}
-
-	sb[0] = 0x70;
-	sb[7] = 0x0a;
-
-	if (tf->flags & ATA_TFLAG_LBA48) {
-		/* TODO: find solution for LBA48 descriptors */
+		ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+				   &sb[1], &sb[2], &sb[3], verbose);
+		sb[1] &= 0x0f;
 	}
 
-	else if (tf->flags & ATA_TFLAG_LBA) {
-		/* A small (28b) LBA will fit in the 32b info field */
-		sb[0] |= 0x80;		/* set valid bit */
-		sb[3] = tf->device & 0x0f;
-		sb[4] = tf->lbah;
-		sb[5] = tf->lbam;
-		sb[6] = tf->lbal;
-	}
+	block = ata_tf_read_block(&qc->result_tf, dev);
 
-	else {
-		/* TODO: C/H/S */
-	}
+	/* information sense data descriptor */
+	sb[7] = 12;
+	desc[0] = 0x00;
+	desc[1] = 10;
+
+	desc[2] |= 0x80;	/* valid */
+	desc[6] = block >> 40;
+	desc[7] = block >> 32;
+	desc[8] = block >> 24;
+	desc[9] = block >> 16;
+	desc[10] = block >> 8;
+	desc[11] = block;
 }
 
 static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -815,23 +760,10 @@
 static void ata_scsi_dev_config(struct scsi_device *sdev,
 				struct ata_device *dev)
 {
-	unsigned int max_sectors;
-
-	/* TODO: 2048 is an arbitrary number, not the
-	 * hardware maximum.  This should be increased to
-	 * 65534 when Jens Axboe's patch for dynamically
-	 * determining max_sectors is merged.
-	 */
-	max_sectors = ATA_MAX_SECTORS;
-	if (dev->flags & ATA_DFLAG_LBA48)
-		max_sectors = ATA_MAX_SECTORS_LBA48;
-	if (dev->max_sectors)
-		max_sectors = dev->max_sectors;
+	/* configure max sectors */
+	blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
 
-	blk_queue_max_sectors(sdev->request_queue, max_sectors);
-
-	/*
-	 * SATA DMA transfers must be multiples of 4 byte, so
+	/* SATA DMA transfers must be multiples of 4 byte, so
 	 * we need to pad ATAPI transfers using an extra sg.
 	 * Decrement max hw segments accordingly.
 	 */
@@ -870,6 +802,8 @@
 
 	blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
 
+	sdev->manage_start_stop = 1;
+
 	if (dev)
 		ata_scsi_dev_config(sdev, dev);
 
@@ -929,28 +863,56 @@
 {
 	struct ata_port *ap = ata_shost_to_port(sdev->host);
 	struct ata_device *dev;
-	int max_depth;
+	unsigned long flags;
 
-	if (queue_depth < 1)
+	if (queue_depth < 1 || queue_depth == sdev->queue_depth)
 		return sdev->queue_depth;
 
 	dev = ata_scsi_find_dev(ap, sdev);
 	if (!dev || !ata_dev_enabled(dev))
 		return sdev->queue_depth;
 
-	max_depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
-	max_depth = min(ATA_MAX_QUEUE - 1, max_depth);
-	if (queue_depth > max_depth)
-		queue_depth = max_depth;
+	/* NCQ enabled? */
+	spin_lock_irqsave(ap->lock, flags);
+	dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+	if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
+		dev->flags |= ATA_DFLAG_NCQ_OFF;
+		queue_depth = 1;
+	}
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* limit and apply queue depth */
+	queue_depth = min(queue_depth, sdev->host->can_queue);
+	queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+	queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
+
+	if (sdev->queue_depth == queue_depth)
+		return -EINVAL;
 
 	scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
 	return queue_depth;
 }
 
+/* XXX: for spindown warning */
+static void ata_delayed_done_timerfn(unsigned long arg)
+{
+	struct scsi_cmnd *scmd = (void *)arg;
+
+	scmd->scsi_done(scmd);
+}
+
+/* XXX: for spindown warning */
+static void ata_delayed_done(struct scsi_cmnd *scmd)
+{
+	static struct timer_list timer;
+
+	setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
+	mod_timer(&timer, jiffies + 5 * HZ);
+}
+
 /**
  *	ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
  *	@qc: Storage for translated ATA taskfile
- *	@scsicmd: SCSI command to translate
  *
  *	Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
  *	(to start). Perhaps these commands should be preceded by
@@ -958,27 +920,30 @@
  *	[See SAT revision 5 at www.t10.org]
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, non-zero on error.
  */
-
-static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
-					     const u8 *scsicmd)
+static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
 {
+	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->tf;
+	const u8 *cdb = scmd->cmnd;
+
+	if (scmd->cmd_len < 5)
+		goto invalid_fld;
 
 	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
 	tf->protocol = ATA_PROT_NODATA;
-	if (scsicmd[1] & 0x1) {
+	if (cdb[1] & 0x1) {
 		;	/* ignore IMMED bit, violates sat-r05 */
 	}
-	if (scsicmd[4] & 0x2)
+	if (cdb[4] & 0x2)
 		goto invalid_fld;       /* LOEJ bit set not supported */
-	if (((scsicmd[4] >> 4) & 0xf) != 0)
+	if (((cdb[4] >> 4) & 0xf) != 0)
 		goto invalid_fld;       /* power conditions not supported */
-	if (scsicmd[4] & 0x1) {
+	if (cdb[4] & 0x1) {
 		tf->nsect = 1;	/* 1 sector, lba=0 */
 
 		if (qc->dev->flags & ATA_DFLAG_LBA) {
@@ -997,10 +962,37 @@
 
 		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
 	} else {
-		tf->nsect = 0;	/* time period value (0 implies now) */
-		tf->command = ATA_CMD_STANDBY;
-		/* Consider: ATA STANDBY IMMEDIATE command */
+		/* XXX: This is for backward compatibility, will be
+		 * removed.  Read Documentation/feature-removal-schedule.txt
+		 * for more info.
+		 */
+		if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
+		    (system_state == SYSTEM_HALT ||
+		     system_state == SYSTEM_POWER_OFF)) {
+			static unsigned long warned = 0;
+
+			if (!test_and_set_bit(0, &warned)) {
+				ata_dev_printk(qc->dev, KERN_WARNING,
+					"DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
+					"UPDATE SHUTDOWN UTILITY\n");
+				ata_dev_printk(qc->dev, KERN_WARNING,
+					"For more info, visit "
+					"http://linux-ata.org/shutdown.html\n");
+
+				/* ->scsi_done is not used, use it for
+				 * delayed completion.
+				 */
+				scmd->scsi_done = qc->scsidone;
+				qc->scsidone = ata_delayed_done;
+			}
+			scmd->result = SAM_STAT_GOOD;
+			return 1;
+		}
+
+		/* Issue ATA STANDBY IMMEDIATE command */
+		tf->command = ATA_CMD_STANDBYNOW1;
 	}
+
 	/*
 	 * Standby and Idle condition timers could be implemented but that
 	 * would require libata to implement the Power condition mode page
@@ -1011,7 +1003,7 @@
 	return 0;
 
 invalid_fld:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
 	/* "Invalid field in cbd" */
 	return 1;
 }
@@ -1020,27 +1012,24 @@
 /**
  *	ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
  *	@qc: Storage for translated ATA taskfile
- *	@scsicmd: SCSI command to translate (ignored)
  *
  *	Sets up an ATA taskfile to issue FLUSH CACHE or
  *	FLUSH CACHE EXT.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, non-zero on error.
  */
-
-static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
+static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
 {
 	struct ata_taskfile *tf = &qc->tf;
 
 	tf->flags |= ATA_TFLAG_DEVICE;
 	tf->protocol = ATA_PROT_NODATA;
 
-	if ((qc->dev->flags & ATA_DFLAG_LBA48) &&
-	    (ata_id_has_flush_ext(qc->dev->id)))
+	if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
 		tf->command = ATA_CMD_FLUSH_EXT;
 	else
 		tf->command = ATA_CMD_FLUSH;
@@ -1050,7 +1039,7 @@
 
 /**
  *	scsi_6_lba_len - Get LBA and transfer length
- *	@scsicmd: SCSI command to translate
+ *	@cdb: SCSI command to translate
  *
  *	Calculate LBA and transfer length for 6-byte commands.
  *
@@ -1058,18 +1047,18 @@
  *	@plba: the LBA
  *	@plen: the transfer length
  */
-
-static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
 {
 	u64 lba = 0;
-	u32 len = 0;
+	u32 len;
 
 	VPRINTK("six-byte command\n");
 
-	lba |= ((u64)scsicmd[2]) << 8;
-	lba |= ((u64)scsicmd[3]);
+	lba |= ((u64)(cdb[1] & 0x1f)) << 16;
+	lba |= ((u64)cdb[2]) << 8;
+	lba |= ((u64)cdb[3]);
 
-	len |= ((u32)scsicmd[4]);
+	len = cdb[4];
 
 	*plba = lba;
 	*plen = len;
@@ -1077,7 +1066,7 @@
 
 /**
  *	scsi_10_lba_len - Get LBA and transfer length
- *	@scsicmd: SCSI command to translate
+ *	@cdb: SCSI command to translate
  *
  *	Calculate LBA and transfer length for 10-byte commands.
  *
@@ -1085,21 +1074,20 @@
  *	@plba: the LBA
  *	@plen: the transfer length
  */
-
-static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
 {
 	u64 lba = 0;
 	u32 len = 0;
 
 	VPRINTK("ten-byte command\n");
 
-	lba |= ((u64)scsicmd[2]) << 24;
-	lba |= ((u64)scsicmd[3]) << 16;
-	lba |= ((u64)scsicmd[4]) << 8;
-	lba |= ((u64)scsicmd[5]);
+	lba |= ((u64)cdb[2]) << 24;
+	lba |= ((u64)cdb[3]) << 16;
+	lba |= ((u64)cdb[4]) << 8;
+	lba |= ((u64)cdb[5]);
 
-	len |= ((u32)scsicmd[7]) << 8;
-	len |= ((u32)scsicmd[8]);
+	len |= ((u32)cdb[7]) << 8;
+	len |= ((u32)cdb[8]);
 
 	*plba = lba;
 	*plen = len;
@@ -1107,7 +1095,7 @@
 
 /**
  *	scsi_16_lba_len - Get LBA and transfer length
- *	@scsicmd: SCSI command to translate
+ *	@cdb: SCSI command to translate
  *
  *	Calculate LBA and transfer length for 16-byte commands.
  *
@@ -1115,27 +1103,26 @@
  *	@plba: the LBA
  *	@plen: the transfer length
  */
-
-static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
+static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
 {
 	u64 lba = 0;
 	u32 len = 0;
 
 	VPRINTK("sixteen-byte command\n");
 
-	lba |= ((u64)scsicmd[2]) << 56;
-	lba |= ((u64)scsicmd[3]) << 48;
-	lba |= ((u64)scsicmd[4]) << 40;
-	lba |= ((u64)scsicmd[5]) << 32;
-	lba |= ((u64)scsicmd[6]) << 24;
-	lba |= ((u64)scsicmd[7]) << 16;
-	lba |= ((u64)scsicmd[8]) << 8;
-	lba |= ((u64)scsicmd[9]);
-
-	len |= ((u32)scsicmd[10]) << 24;
-	len |= ((u32)scsicmd[11]) << 16;
-	len |= ((u32)scsicmd[12]) << 8;
-	len |= ((u32)scsicmd[13]);
+	lba |= ((u64)cdb[2]) << 56;
+	lba |= ((u64)cdb[3]) << 48;
+	lba |= ((u64)cdb[4]) << 40;
+	lba |= ((u64)cdb[5]) << 32;
+	lba |= ((u64)cdb[6]) << 24;
+	lba |= ((u64)cdb[7]) << 16;
+	lba |= ((u64)cdb[8]) << 8;
+	lba |= ((u64)cdb[9]);
+
+	len |= ((u32)cdb[10]) << 24;
+	len |= ((u32)cdb[11]) << 16;
+	len |= ((u32)cdb[12]) << 8;
+	len |= ((u32)cdb[13]);
 
 	*plba = lba;
 	*plen = len;
@@ -1144,33 +1131,37 @@
 /**
  *	ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
  *	@qc: Storage for translated ATA taskfile
- *	@scsicmd: SCSI command to translate
  *
  *	Converts SCSI VERIFY command to an ATA READ VERIFY command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, non-zero on error.
  */
-
-static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
+static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
 {
+	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_device *dev = qc->dev;
 	u64 dev_sectors = qc->dev->n_sectors;
+	const u8 *cdb = scmd->cmnd;
 	u64 block;
 	u32 n_block;
 
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->protocol = ATA_PROT_NODATA;
 
-	if (scsicmd[0] == VERIFY)
-		scsi_10_lba_len(scsicmd, &block, &n_block);
-	else if (scsicmd[0] == VERIFY_16)
-		scsi_16_lba_len(scsicmd, &block, &n_block);
-	else
+	if (cdb[0] == VERIFY) {
+		if (scmd->cmd_len < 10)
+			goto invalid_fld;
+		scsi_10_lba_len(cdb, &block, &n_block);
+	} else if (cdb[0] == VERIFY_16) {
+		if (scmd->cmd_len < 16)
+			goto invalid_fld;
+		scsi_16_lba_len(cdb, &block, &n_block);
+	} else
 		goto invalid_fld;
 
 	if (!n_block)
@@ -1245,24 +1236,23 @@
 	return 0;
 
 invalid_fld:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
 	/* "Invalid field in cbd" */
 	return 1;
 
 out_of_range:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
 	/* "Logical Block Address out of range" */
 	return 1;
 
 nothing_to_do:
-	qc->scsicmd->result = SAM_STAT_GOOD;
+	scmd->result = SAM_STAT_GOOD;
 	return 1;
 }
 
 /**
  *	ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
  *	@qc: Storage for translated ATA taskfile
- *	@scsicmd: SCSI command to translate
  *
  *	Converts any of six SCSI read/write commands into the
  *	ATA counterpart, including starting sector (LBA),
@@ -1273,37 +1263,38 @@
  *	%WRITE_16 are currently supported.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, non-zero on error.
  */
-
-static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 {
-	struct ata_taskfile *tf = &qc->tf;
-	struct ata_device *dev = qc->dev;
+	struct scsi_cmnd *scmd = qc->scsicmd;
+	const u8 *cdb = scmd->cmnd;
+	unsigned int tf_flags = 0;
 	u64 block;
 	u32 n_block;
+	int rc;
 
-	qc->flags |= ATA_QCFLAG_IO;
-	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-
-	if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
-	    scsicmd[0] == WRITE_16)
-		tf->flags |= ATA_TFLAG_WRITE;
+	if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
+		tf_flags |= ATA_TFLAG_WRITE;
 
 	/* Calculate the SCSI LBA, transfer length and FUA. */
-	switch (scsicmd[0]) {
+	switch (cdb[0]) {
 	case READ_10:
 	case WRITE_10:
-		scsi_10_lba_len(scsicmd, &block, &n_block);
-		if (unlikely(scsicmd[1] & (1 << 3)))
-			tf->flags |= ATA_TFLAG_FUA;
+		if (unlikely(scmd->cmd_len < 10))
+			goto invalid_fld;
+		scsi_10_lba_len(cdb, &block, &n_block);
+		if (unlikely(cdb[1] & (1 << 3)))
+			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	case READ_6:
 	case WRITE_6:
-		scsi_6_lba_len(scsicmd, &block, &n_block);
+		if (unlikely(scmd->cmd_len < 6))
+			goto invalid_fld;
+		scsi_6_lba_len(cdb, &block, &n_block);
 
 		/* for 6-byte r/w commands, transfer length 0
 		 * means 256 blocks of data, not 0 block.
@@ -1313,9 +1304,11 @@
 		break;
 	case READ_16:
 	case WRITE_16:
-		scsi_16_lba_len(scsicmd, &block, &n_block);
-		if (unlikely(scsicmd[1] & (1 << 3)))
-			tf->flags |= ATA_TFLAG_FUA;
+		if (unlikely(scmd->cmd_len < 16))
+			goto invalid_fld;
+		scsi_16_lba_len(cdb, &block, &n_block);
+		if (unlikely(cdb[1] & (1 << 3)))
+			tf_flags |= ATA_TFLAG_FUA;
 		break;
 	default:
 		DPRINTK("no-byte command\n");
@@ -1333,122 +1326,35 @@
 		 */
 		goto nothing_to_do;
 
-	if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
-		/* yay, NCQ */
-		if (!lba_48_ok(block, n_block))
-			goto out_of_range;
-
-		tf->protocol = ATA_PROT_NCQ;
-		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-
-		if (tf->flags & ATA_TFLAG_WRITE)
-			tf->command = ATA_CMD_FPDMA_WRITE;
-		else
-			tf->command = ATA_CMD_FPDMA_READ;
-
-		qc->nsect = n_block;
-
-		tf->nsect = qc->tag << 3;
-		tf->hob_feature = (n_block >> 8) & 0xff;
-		tf->feature = n_block & 0xff;
-
-		tf->hob_lbah = (block >> 40) & 0xff;
-		tf->hob_lbam = (block >> 32) & 0xff;
-		tf->hob_lbal = (block >> 24) & 0xff;
-		tf->lbah = (block >> 16) & 0xff;
-		tf->lbam = (block >> 8) & 0xff;
-		tf->lbal = block & 0xff;
-
-		tf->device = 1 << 6;
-		if (tf->flags & ATA_TFLAG_FUA)
-			tf->device |= 1 << 7;
-	} else if (dev->flags & ATA_DFLAG_LBA) {
-		tf->flags |= ATA_TFLAG_LBA;
-
-		if (lba_28_ok(block, n_block)) {
-			/* use LBA28 */
-			tf->device |= (block >> 24) & 0xf;
-		} else if (lba_48_ok(block, n_block)) {
-			if (!(dev->flags & ATA_DFLAG_LBA48))
-				goto out_of_range;
-
-			/* use LBA48 */
-			tf->flags |= ATA_TFLAG_LBA48;
-
-			tf->hob_nsect = (n_block >> 8) & 0xff;
-
-			tf->hob_lbah = (block >> 40) & 0xff;
-			tf->hob_lbam = (block >> 32) & 0xff;
-			tf->hob_lbal = (block >> 24) & 0xff;
-		} else
-			/* request too large even for LBA48 */
-			goto out_of_range;
-
-		if (unlikely(ata_rwcmd_protocol(qc) < 0))
-			goto invalid_fld;
-
-		qc->nsect = n_block;
-		tf->nsect = n_block & 0xff;
-
-		tf->lbah = (block >> 16) & 0xff;
-		tf->lbam = (block >> 8) & 0xff;
-		tf->lbal = block & 0xff;
-
-		tf->device |= ATA_LBA;
-	} else {
-		/* CHS */
-		u32 sect, head, cyl, track;
-
-		/* The request -may- be too large for CHS addressing. */
-		if (!lba_28_ok(block, n_block))
-			goto out_of_range;
-
-		if (unlikely(ata_rwcmd_protocol(qc) < 0))
-			goto invalid_fld;
-
-		/* Convert LBA to CHS */
-		track = (u32)block / dev->sectors;
-		cyl   = track / dev->heads;
-		head  = track % dev->heads;
-		sect  = (u32)block % dev->sectors + 1;
-
-		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
-			(u32)block, track, cyl, head, sect);
-
-		/* Check whether the converted CHS can fit.
-		   Cylinder: 0-65535
-		   Head: 0-15
-		   Sector: 1-255*/
-		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
-			goto out_of_range;
-
-		qc->nsect = n_block;
-		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
-		tf->lbal = sect;
-		tf->lbam = cyl;
-		tf->lbah = cyl >> 8;
-		tf->device |= head;
-	}
+	qc->flags |= ATA_QCFLAG_IO;
+	qc->nbytes = n_block * ATA_SECT_SIZE;
 
-	return 0;
+	rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
+			     qc->tag);
+	if (likely(rc == 0))
+		return 0;
 
+	if (rc == -ERANGE)
+		goto out_of_range;
+	/* treat all other errors as -EINVAL, fall through */
 invalid_fld:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
 	/* "Invalid field in cbd" */
 	return 1;
 
 out_of_range:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
 	/* "Logical Block Address out of range" */
 	return 1;
 
 nothing_to_do:
-	qc->scsicmd->result = SAM_STAT_GOOD;
+	scmd->result = SAM_STAT_GOOD;
 	return 1;
 }
 
 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
 {
+	struct ata_port *ap = qc->ap;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	u8 *cdb = cmd->cmnd;
  	int need_sense = (qc->err_mask != 0);
@@ -1457,11 +1363,12 @@
 	 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
 	 * cache
 	 */
-	if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
+	if (ap->ops->error_handler &&
+	    !need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
 	    ((qc->tf.feature == SETFEATURES_WC_ON) ||
 	     (qc->tf.feature == SETFEATURES_WC_OFF))) {
-		qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
-		ata_port_schedule_eh(qc->ap);
+		ap->eh_info.action |= ATA_EH_REVALIDATE;
+		ata_port_schedule_eh(ap);
 	}
 
 	/* For ATA pass thru (SAT) commands, generate a sense block if
@@ -1473,7 +1380,7 @@
 	 */
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
  	    ((cdb[2] & 0x20) || need_sense)) {
- 		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	} else {
 		if (!need_sense) {
 			cmd->result = SAM_STAT_GOOD;
@@ -1484,12 +1391,20 @@
 			 * good for smaller LBA (and maybe CHS?)
 			 * devices.
 			 */
-			ata_gen_fixed_sense(qc);
+			ata_gen_ata_sense(qc);
 		}
 	}
 
-	if (need_sense && !qc->ap->ops->error_handler)
-		ata_dump_status(qc->ap->id, &qc->result_tf);
+	/* XXX: track spindown state for spindown skipping and warning */
+	if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
+		     qc->tf.command == ATA_CMD_STANDBYNOW1))
+		qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
+	else if (likely(system_state != SYSTEM_HALT &&
+			system_state != SYSTEM_POWER_OFF))
+		qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
+
+	if (need_sense && !ap->ops->error_handler)
+		ata_dump_status(ap->print_id, &qc->result_tf);
 
 	qc->scsidone(cmd);
 
@@ -1507,7 +1422,7 @@
  *	issued to @dev.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	1 if deferring is needed, 0 otherwise.
@@ -1515,11 +1430,9 @@
 static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
 {
 	struct ata_port *ap = dev->ap;
+	int is_ncq = is_io && ata_ncq_enabled(dev);
 
-	if (!(dev->flags & ATA_DFLAG_NCQ))
-		return 0;
-
-	if (is_io) {
+	if (is_ncq) {
 		if (!ata_tag_valid(ap->active_tag))
 			return 0;
 	} else {
@@ -1550,7 +1463,7 @@
  *	termination.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
@@ -1561,7 +1474,6 @@
 			      ata_xlat_func_t xlat_func)
 {
 	struct ata_queued_cmd *qc;
-	u8 *scsicmd = cmd->cmnd;
 	int is_io = xlat_func == ata_scsi_rw_xlat;
 
 	VPRINTK("ENTER\n");
@@ -1593,7 +1505,7 @@
 
 	qc->complete_fn = ata_scsi_qc_complete;
 
-	if (xlat_func(qc, scsicmd))
+	if (xlat_func(qc))
 		goto early_finish;
 
 	/* select device, send command to hardware */
@@ -1604,15 +1516,15 @@
 
 early_finish:
         ata_qc_free(qc);
-	done(cmd);
+	qc->scsidone(cmd);
 	DPRINTK("EXIT - early finish (good or error)\n");
 	return 0;
 
 err_did:
 	ata_qc_free(qc);
-err_mem:
 	cmd->result = (DID_ERROR << 16);
-	done(cmd);
+	qc->scsidone(cmd);
+err_mem:
 	DPRINTK("EXIT - internal\n");
 	return 0;
 
@@ -1629,7 +1541,7 @@
  *	Maps buffer contained within SCSI command @cmd.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Length of response buffer.
@@ -1644,7 +1556,7 @@
 		struct scatterlist *sg;
 
 		sg = (struct scatterlist *) cmd->request_buffer;
-		buf = kmap_atomic(sg->page, KM_USER0) + sg->offset;
+		buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
 		buflen = sg->length;
 	} else {
 		buf = cmd->request_buffer;
@@ -1663,7 +1575,7 @@
  *	Unmaps response buffer contained within @cmd.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
@@ -1672,7 +1584,7 @@
 		struct scatterlist *sg;
 
 		sg = (struct scatterlist *) cmd->request_buffer;
-		kunmap_atomic(buf - sg->offset, KM_USER0);
+		kunmap_atomic(buf - sg->offset, KM_IRQ0);
 	}
 }
 
@@ -1689,7 +1601,7 @@
  *	and sense buffer are assumed to be set).
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
@@ -1711,6 +1623,22 @@
 }
 
 /**
+ *	ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
+ *	@idx: byte index into SCSI response buffer
+ *	@val: value to set
+ *
+ *	To be used by SCSI command simulator functions.  This macros
+ *	expects two local variables, u8 *rbuf and unsigned int buflen,
+ *	are in scope.
+ *
+ *	LOCKING:
+ *	None.
+ */
+#define ATA_SCSI_RBUF_SET(idx, val) do { \
+		if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
+	} while (0)
+
+/**
  *	ata_scsiop_inq_std - Simulate INQUIRY command
  *	@args: device IDENTIFY data / SCSI command of interest.
  *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -1720,7 +1648,7 @@
  *	with non-VPD INQUIRY command output.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -1744,8 +1672,8 @@
 
 	if (buflen > 35) {
 		memcpy(&rbuf[8], "ATA     ", 8);
-		ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16);
-		ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4);
+		ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+		ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
 		if (rbuf[32] == 0 || rbuf[32] == ' ')
 			memcpy(&rbuf[32], "n/a ", 4);
 	}
@@ -1776,7 +1704,7 @@
  *	Returns list of inquiry VPD pages available.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
@@ -1804,7 +1732,7 @@
  *	Returns ATA device serial number.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
@@ -1814,13 +1742,13 @@
 		0,
 		0x80,			/* this page code */
 		0,
-		ATA_SERNO_LEN,		/* page len */
+		ATA_ID_SERNO_LEN,	/* page len */
 	};
 	memcpy(rbuf, hdr, sizeof(hdr));
 
-	if (buflen > (ATA_SERNO_LEN + 4 - 1))
+	if (buflen > (ATA_ID_SERNO_LEN + 4 - 1))
 		ata_id_string(args->id, (unsigned char *) &rbuf[4],
-			      ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
+			      ATA_ID_SERNO, ATA_ID_SERNO_LEN);
 
 	return 0;
 }
@@ -1837,7 +1765,7 @@
  *	   name ("ATA     "), model and serial numbers.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
@@ -1845,19 +1773,18 @@
 {
 	int num;
 	const int sat_model_serial_desc_len = 68;
-	const int ata_model_byte_len = 40;
 
 	rbuf[1] = 0x83;			/* this page code */
 	num = 4;
 
-	if (buflen > (ATA_SERNO_LEN + num + 3)) {
+	if (buflen > (ATA_ID_SERNO_LEN + num + 3)) {
 		/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
 		rbuf[num + 0] = 2;
-		rbuf[num + 3] = ATA_SERNO_LEN;
+		rbuf[num + 3] = ATA_ID_SERNO_LEN;
 		num += 4;
 		ata_id_string(args->id, (unsigned char *) rbuf + num,
-			      ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
-		num += ATA_SERNO_LEN;
+			      ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+		num += ATA_ID_SERNO_LEN;
 	}
 	if (buflen > (sat_model_serial_desc_len + num + 3)) {
 		/* SAT defined lu model and serial numbers descriptor */
@@ -1869,11 +1796,11 @@
 		memcpy(rbuf + num, "ATA     ", 8);
 		num += 8;
 		ata_id_string(args->id, (unsigned char *) rbuf + num,
-			      ATA_ID_PROD_OFS, ata_model_byte_len);
-		num += ata_model_byte_len;
+			      ATA_ID_PROD, ATA_ID_PROD_LEN);
+		num += ATA_ID_PROD_LEN;
 		ata_id_string(args->id, (unsigned char *) rbuf + num,
-			      ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
-		num += ATA_SERNO_LEN;
+			      ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+		num += ATA_ID_SERNO_LEN;
 	}
 	rbuf[3] = num - 4;    /* page len (assume less than 256 bytes) */
 	return 0;
@@ -1889,7 +1816,7 @@
  *	that the caller should successfully complete this SCSI command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
@@ -2001,15 +1928,15 @@
  */
 static int ata_dev_supports_fua(u16 *id)
 {
-	unsigned char model[41], fw[9];
+	unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
 
 	if (!libata_fua)
 		return 0;
 	if (!ata_id_has_fua(id))
 		return 0;
 
-	ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model));
-	ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw));
+	ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+	ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
 
 	if (strcmp(model, "Maxtor"))
 		return 1;
@@ -2030,7 +1957,7 @@
  *	descriptor for other device types.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
@@ -2169,67 +2096,42 @@
  *	Simulate READ CAPACITY commands.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	None.
  */
-
 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
 			        unsigned int buflen)
 {
-	u64 n_sectors;
-	u32 tmp;
+	u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
 
 	VPRINTK("ENTER\n");
 
-	if (ata_id_has_lba(args->id)) {
-		if (ata_id_has_lba48(args->id))
-			n_sectors = ata_id_u64(args->id, 100);
-		else
-			n_sectors = ata_id_u32(args->id, 60);
-	} else {
-		/* CHS default translation */
-		n_sectors = args->id[1] * args->id[3] * args->id[6];
-
-		if (ata_id_current_chs_valid(args->id))
-			/* CHS current translation */
-			n_sectors = ata_id_u32(args->id, 57);
-	}
-
-	n_sectors--;		/* ATA TotalUserSectors - 1 */
-
 	if (args->cmd->cmnd[0] == READ_CAPACITY) {
-		if( n_sectors >= 0xffffffffULL )
-			tmp = 0xffffffff ;  /* Return max count on overflow */
-		else
-			tmp = n_sectors ;
+		if (last_lba >= 0xffffffffULL)
+			last_lba = 0xffffffff;
 
 		/* sector count, 32-bit */
-		rbuf[0] = tmp >> (8 * 3);
-		rbuf[1] = tmp >> (8 * 2);
-		rbuf[2] = tmp >> (8 * 1);
-		rbuf[3] = tmp;
+		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3));
+		ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2));
+		ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1));
+		ATA_SCSI_RBUF_SET(3, last_lba);
 
 		/* sector size */
-		tmp = ATA_SECT_SIZE;
-		rbuf[6] = tmp >> 8;
-		rbuf[7] = tmp;
-
+		ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
+		ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE);
 	} else {
 		/* sector count, 64-bit */
-		tmp = n_sectors >> (8 * 4);
-		rbuf[2] = tmp >> (8 * 3);
-		rbuf[3] = tmp >> (8 * 2);
-		rbuf[4] = tmp >> (8 * 1);
-		rbuf[5] = tmp;
-		tmp = n_sectors;
-		rbuf[6] = tmp >> (8 * 3);
-		rbuf[7] = tmp >> (8 * 2);
-		rbuf[8] = tmp >> (8 * 1);
-		rbuf[9] = tmp;
+		ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
+		ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6));
+		ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5));
+		ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4));
+		ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3));
+		ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2));
+		ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1));
+		ATA_SCSI_RBUF_SET(7, last_lba);
 
 		/* sector size */
-		tmp = ATA_SECT_SIZE;
-		rbuf[12] = tmp >> 8;
-		rbuf[13] = tmp;
+		ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
+		ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE);
 	}
 
 	return 0;
@@ -2244,7 +2146,7 @@
  *	Simulate REPORT LUNS command.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
@@ -2296,7 +2198,7 @@
  *	and the specified additional sense codes.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
@@ -2315,7 +2217,7 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	}
 
 	qc->scsidone(qc->scsicmd);
@@ -2390,7 +2292,7 @@
 			 * sense descriptors, since that's only
 			 * correct for ATA, not ATAPI
 			 */
-			ata_gen_ata_desc_sense(qc);
+			ata_gen_passthru_sense(qc);
 		}
 
 		/* SCSI EH automatically locks door if sdev->locked is
@@ -2423,7 +2325,7 @@
 		 * a sense descriptors, since that's only
 		 * correct for ATA, not ATAPI
 		 */
-		ata_gen_ata_desc_sense(qc);
+		ata_gen_passthru_sense(qc);
 	} else {
 		u8 *scsicmd = cmd->cmnd;
 
@@ -2458,33 +2360,32 @@
 /**
  *	atapi_xlat - Initialize PACKET taskfile
  *	@qc: command structure to be initialized
- *	@scsicmd: SCSI CDB associated with this PACKET command
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Zero on success, non-zero on failure.
  */
-
-static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd)
+static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
 {
-	struct scsi_cmnd *cmd = qc->scsicmd;
+	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_device *dev = qc->dev;
 	int using_pio = (dev->flags & ATA_DFLAG_PIO);
-	int nodata = (cmd->sc_data_direction == DMA_NONE);
+	int nodata = (scmd->sc_data_direction == DMA_NONE);
 
 	if (!using_pio)
 		/* Check whether ATAPI DMA is safe */
 		if (ata_check_atapi_dma(qc))
 			using_pio = 1;
 
-	memcpy(&qc->cdb, scsicmd, dev->cdb_len);
+	memset(qc->cdb, 0, dev->cdb_len);
+	memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
 
 	qc->complete_fn = atapi_qc_complete;
 
 	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
 		qc->tf.flags |= ATA_TFLAG_WRITE;
 		DPRINTK("direction: write\n");
 	}
@@ -2506,12 +2407,12 @@
 		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
 		qc->tf.feature |= ATAPI_PKT_DMA;
 
-		if (atapi_dmadir && (cmd->sc_data_direction != DMA_TO_DEVICE))
+		if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE))
 			/* some SATA bridges need us to indicate data xfer direction */
 			qc->tf.feature |= ATAPI_DMADIR;
 	}
 
-	qc->nbytes = cmd->request_bufflen;
+	qc->nbytes = scmd->request_bufflen;
 
 	return 0;
 }
@@ -2540,7 +2441,7 @@
  *	Determine if commands should be sent to the specified device.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	0 if commands are not allowed / 1 if commands are allowed
@@ -2574,7 +2475,7 @@
  *	SCSI command to be sent.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	Associated ATA device, or %NULL if not found.
@@ -2631,28 +2532,27 @@
 /**
  *	ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
  *	@qc: command structure to be initialized
- *	@scsicmd: SCSI command to convert
  *
  *	Handles either 12 or 16-byte versions of the CDB.
  *
  *	RETURNS:
  *	Zero on success, non-zero on failure.
  */
-static unsigned int
-ata_scsi_pass_thru(struct ata_queued_cmd *qc, const u8 *scsicmd)
+static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 {
 	struct ata_taskfile *tf = &(qc->tf);
-	struct scsi_cmnd *cmd = qc->scsicmd;
+	struct scsi_cmnd *scmd = qc->scsicmd;
 	struct ata_device *dev = qc->dev;
+	const u8 *cdb = scmd->cmnd;
 
-	if ((tf->protocol = ata_scsi_map_proto(scsicmd[1])) == ATA_PROT_UNKNOWN)
+	if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
 		goto invalid_fld;
 
 	/* We may not issue DMA commands if no DMA mode is set */
 	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
 		goto invalid_fld;
 
-	if (scsicmd[1] & 0xe0)
+	if (cdb[1] & 0xe0)
 		/* PIO multi not supported yet */
 		goto invalid_fld;
 
@@ -2660,18 +2560,18 @@
 	 * 12 and 16 byte CDBs use different offsets to
 	 * provide the various register values.
 	 */
-	if (scsicmd[0] == ATA_16) {
+	if (cdb[0] == ATA_16) {
 		/*
 		 * 16-byte CDB - may contain extended commands.
 		 *
 		 * If that is the case, copy the upper byte register values.
 		 */
-		if (scsicmd[1] & 0x01) {
-			tf->hob_feature = scsicmd[3];
-			tf->hob_nsect = scsicmd[5];
-			tf->hob_lbal = scsicmd[7];
-			tf->hob_lbam = scsicmd[9];
-			tf->hob_lbah = scsicmd[11];
+		if (cdb[1] & 0x01) {
+			tf->hob_feature = cdb[3];
+			tf->hob_nsect = cdb[5];
+			tf->hob_lbal = cdb[7];
+			tf->hob_lbam = cdb[9];
+			tf->hob_lbah = cdb[11];
 			tf->flags |= ATA_TFLAG_LBA48;
 		} else
 			tf->flags &= ~ATA_TFLAG_LBA48;
@@ -2679,26 +2579,26 @@
 		/*
 		 * Always copy low byte, device and command registers.
 		 */
-		tf->feature = scsicmd[4];
-		tf->nsect = scsicmd[6];
-		tf->lbal = scsicmd[8];
-		tf->lbam = scsicmd[10];
-		tf->lbah = scsicmd[12];
-		tf->device = scsicmd[13];
-		tf->command = scsicmd[14];
+		tf->feature = cdb[4];
+		tf->nsect = cdb[6];
+		tf->lbal = cdb[8];
+		tf->lbam = cdb[10];
+		tf->lbah = cdb[12];
+		tf->device = cdb[13];
+		tf->command = cdb[14];
 	} else {
 		/*
 		 * 12-byte CDB - incapable of extended commands.
 		 */
 		tf->flags &= ~ATA_TFLAG_LBA48;
 
-		tf->feature = scsicmd[3];
-		tf->nsect = scsicmd[4];
-		tf->lbal = scsicmd[5];
-		tf->lbam = scsicmd[6];
-		tf->lbah = scsicmd[7];
-		tf->device = scsicmd[8];
-		tf->command = scsicmd[9];
+		tf->feature = cdb[3];
+		tf->nsect = cdb[4];
+		tf->lbal = cdb[5];
+		tf->lbam = cdb[6];
+		tf->lbah = cdb[7];
+		tf->device = cdb[8];
+		tf->command = cdb[9];
 	}
 	/*
 	 * If slave is possible, enforce correct master/slave bit
@@ -2707,6 +2607,18 @@
 		tf->device = qc->dev->devno ?
 			tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
 
+	/* READ/WRITE LONG use a non-standard sect_size */
+	qc->sect_size = ATA_SECT_SIZE;
+	switch (tf->command) {
+	case ATA_CMD_READ_LONG:
+	case ATA_CMD_READ_LONG_ONCE:
+	case ATA_CMD_WRITE_LONG:
+	case ATA_CMD_WRITE_LONG_ONCE:
+		if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+			goto invalid_fld;
+		qc->sect_size = scmd->request_bufflen;
+	}
+
 	/*
 	 * Filter SET_FEATURES - XFER MODE command -- otherwise,
 	 * SET_FEATURES - XFER MODE must be preceded/succeeded
@@ -2725,7 +2637,7 @@
 	 */
 	tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
 
-	if (cmd->sc_data_direction == DMA_TO_DEVICE)
+	if (scmd->sc_data_direction == DMA_TO_DEVICE)
 		tf->flags |= ATA_TFLAG_WRITE;
 
 	/*
@@ -2734,7 +2646,7 @@
 	 * TODO: find out if we need to do more here to
 	 *       cover scatter/gather case.
 	 */
-	qc->nsect = cmd->request_bufflen / ATA_SECT_SIZE;
+	qc->nbytes = scmd->request_bufflen;
 
 	/* request result TF */
 	qc->flags |= ATA_QCFLAG_RESULT_TF;
@@ -2742,7 +2654,7 @@
 	return 0;
 
  invalid_fld:
-	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x00);
+	ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
 	/* "Invalid field in cdb" */
 	return 1;
 }
@@ -2807,7 +2719,7 @@
 	u8 *scsicmd = cmd->cmnd;
 
 	DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
-		ap->id,
+		ap->print_id,
 		scsidev->channel, scsidev->id, scsidev->lun,
 		scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
 		scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
@@ -2815,22 +2727,30 @@
 #endif
 }
 
-static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
+static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
 				      void (*done)(struct scsi_cmnd *),
 				      struct ata_device *dev)
 {
 	int rc = 0;
 
+	if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) {
+		DPRINTK("bad CDB len=%u, max=%u\n",
+			scmd->cmd_len, dev->cdb_len);
+		scmd->result = DID_ERROR << 16;
+		done(scmd);
+		return 0;
+	}
+
 	if (dev->class == ATA_DEV_ATA) {
 		ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
-							      cmd->cmnd[0]);
+							      scmd->cmnd[0]);
 
 		if (xlat_func)
-			rc = ata_scsi_translate(dev, cmd, done, xlat_func);
+			rc = ata_scsi_translate(dev, scmd, done, xlat_func);
 		else
-			ata_scsi_simulate(dev, cmd, done);
+			ata_scsi_simulate(dev, scmd, done);
 	} else
-		rc = ata_scsi_translate(dev, cmd, done, atapi_xlat);
+		rc = ata_scsi_translate(dev, scmd, done, atapi_xlat);
 
 	return rc;
 }
@@ -2848,7 +2768,7 @@
  *	ATA and ATAPI devices appearing as SCSI devices.
  *
  *	LOCKING:
- *	Releases scsi-layer-held lock, and obtains host_set lock.
+ *	Releases scsi-layer-held lock, and obtains host lock.
  *
  *	RETURNS:
  *	Return value from __ata_scsi_queuecmd() if @cmd can be queued,
@@ -2892,7 +2812,7 @@
  *	that can be handled internally.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
@@ -2970,6 +2890,48 @@
 	}
 }
 
+int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+{
+	int i, rc;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct Scsi_Host *shost;
+
+		rc = -ENOMEM;
+		shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
+		if (!shost)
+			goto err_alloc;
+
+		*(struct ata_port **)&shost->hostdata[0] = ap;
+		ap->scsi_host = shost;
+
+		shost->transportt = &ata_scsi_transport_template;
+		shost->unique_id = ap->print_id;
+		shost->max_id = 16;
+		shost->max_lun = 1;
+		shost->max_channel = 1;
+		shost->max_cmd_len = 16;
+
+		rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+		if (rc)
+			goto err_add;
+	}
+
+	return 0;
+
+ err_add:
+	scsi_host_put(host->ports[i]->scsi_host);
+ err_alloc:
+	while (--i >= 0) {
+		struct Scsi_Host *shost = host->ports[i]->scsi_host;
+
+		scsi_remove_host(shost);
+		scsi_host_put(shost);
+	}
+	return rc;
+}
+
 void ata_scsi_scan_host(struct ata_port *ap)
 {
 	unsigned int i;
@@ -2984,7 +2946,7 @@
 		if (!ata_dev_enabled(dev) || dev->sdev)
 			continue;
 
-		sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
+		sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
 		if (!IS_ERR(sdev)) {
 			dev->sdev = sdev;
 			scsi_device_put(sdev);
@@ -2998,11 +2960,11 @@
  *
  *	This function is called from ata_eh_hotplug() and responsible
  *	for taking the SCSI device attached to @dev offline.  This
- *	function is called with host_set lock which protects dev->sdev
+ *	function is called with host lock which protects dev->sdev
  *	against clearing.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  *
  *	RETURNS:
  *	1 if attached SCSI device exists, 0 otherwise.
@@ -3038,16 +3000,16 @@
 	 * be removed if there is __scsi_device_get() interface which
 	 * increments reference counts regardless of device state.
 	 */
-	mutex_lock(&ap->host->scan_mutex);
+	mutex_lock(&ap->scsi_host->scan_mutex);
 	spin_lock_irqsave(ap->lock, flags);
 
-	/* clearing dev->sdev is protected by host_set lock */
+	/* clearing dev->sdev is protected by host lock */
 	sdev = dev->sdev;
 	dev->sdev = NULL;
 
 	if (sdev) {
 		/* If user initiated unplug races with us, sdev can go
-		 * away underneath us after the host_set lock and
+		 * away underneath us after the host lock and
 		 * scan_mutex are released.  Hold onto it.
 		 */
 		if (scsi_device_get(sdev) == 0) {
@@ -3064,7 +3026,7 @@
 	}
 
 	spin_unlock_irqrestore(ap->lock, flags);
-	mutex_unlock(&ap->host->scan_mutex);
+	mutex_unlock(&ap->scsi_host->scan_mutex);
 
 	if (sdev) {
 		ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
@@ -3077,7 +3039,7 @@
 
 /**
  *	ata_scsi_hotplug - SCSI part of hotplug
- *	@data: Pointer to ATA port to perform SCSI hotplug on
+ *	@work: Pointer to ATA port to perform SCSI hotplug on
  *
  *	Perform SCSI part of hotplug.  It's executed from a separate
  *	workqueue after EH completes.  This is necessary because SCSI
@@ -3087,9 +3049,9 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(void *_data)
 {
-	struct ata_port *ap = data;
+	struct ata_port *ap = _data;
 	int i;
 
 	if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3124,7 +3086,8 @@
 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
 		struct ata_device *dev = &ap->device[i];
 		if (ata_dev_enabled(dev) && !dev->sdev) {
-			queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ);
+			queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+				round_jiffies_relative(HZ));
 			break;
 		}
 	}
@@ -3178,17 +3141,19 @@
 			rc = -EINVAL;
 	}
 
-	if (rc == 0)
+	if (rc == 0) {
 		ata_port_schedule_eh(ap);
-
-	spin_unlock_irqrestore(ap->lock, flags);
+		spin_unlock_irqrestore(ap->lock, flags);
+		ata_port_wait_eh(ap);
+	} else
+		spin_unlock_irqrestore(ap->lock, flags);
 
 	return rc;
 }
 
 /**
  *	ata_scsi_dev_rescan - initiate scsi_rescan_device()
- *	@data: Pointer to ATA port to perform scsi_rescan_device()
+ *	@work: Pointer to ATA port to perform scsi_rescan_device()
  *
  *	After ATA pass thru (SAT) commands are executed successfully,
  *	libata need to propagate the changes to SCSI layer.  This
@@ -3198,50 +3163,37 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(void *_data)
 {
-	struct ata_port *ap = data;
-	struct ata_device *dev;
+	struct ata_port *ap = _data;
+	unsigned long flags;
 	unsigned int i;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
+	spin_lock_irqsave(ap->lock, flags);
 
-		if (ata_dev_enabled(dev) && dev->sdev)
-			scsi_rescan_device(&(dev->sdev->sdev_gendev));
-	}
-}
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		struct scsi_device *sdev = dev->sdev;
 
-static struct ata_probe_ent *
-ata_sas_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
-{
-	struct ata_probe_ent *probe_ent;
+		if (!ata_dev_enabled(dev) || !sdev)
+			continue;
+		if (scsi_device_get(sdev))
+			continue;
 
-	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (!probe_ent) {
-		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
-		       kobject_name(&(dev->kobj)));
-		return NULL;
+		spin_unlock_irqrestore(ap->lock, flags);
+		scsi_rescan_device(&(sdev->sdev_gendev));
+		scsi_device_put(sdev);
+		spin_lock_irqsave(ap->lock, flags);
 	}
 
-	INIT_LIST_HEAD(&probe_ent->node);
-	probe_ent->dev = dev;
-
-	probe_ent->sht = port->sht;
-	probe_ent->host_flags = port->host_flags;
-	probe_ent->pio_mask = port->pio_mask;
-	probe_ent->mwdma_mask = port->mwdma_mask;
-	probe_ent->udma_mask = port->udma_mask;
-	probe_ent->port_ops = port->port_ops;
-
-	return probe_ent;
+	spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
  *	ata_sas_port_alloc - Allocate port for a SAS attached SATA device
- *	@pdev: PCI device that the scsi device is attached to
+ *	@host: ATA host container for all SAS ports
  *	@port_info: Information from low-level host driver
- *	@host: SCSI host that the scsi device is attached to
+ *	@shost: SCSI host that the scsi device is attached to
  *
  *	LOCKING:
  *	PCI/etc. bus probe sem.
@@ -3250,25 +3202,25 @@
  *	ata_port pointer on success / NULL on failure.
  */
 
-struct ata_port *ata_sas_port_alloc(struct ata_host_set *host_set,
+struct ata_port *ata_sas_port_alloc(struct ata_host *host,
 				    struct ata_port_info *port_info,
-				    struct Scsi_Host *host)
+				    struct Scsi_Host *shost)
 {
-	struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
-	struct ata_probe_ent *ent;
+	struct ata_port *ap;
 
+	ap = ata_port_alloc(host);
 	if (!ap)
 		return NULL;
 
-	ent = ata_sas_probe_ent_alloc(host_set->dev, port_info);
-	if (!ent) {
-		kfree(ap);
-		return NULL;
-	}
+	ap->port_no = 0;
+	ap->lock = shost->host_lock;
+	ap->pio_mask = port_info->pio_mask;
+	ap->mwdma_mask = port_info->mwdma_mask;
+	ap->udma_mask = port_info->udma_mask;
+	ap->flags |= port_info->flags;
+	ap->ops = port_info->port_ops;
+	ap->cbl = ATA_CBL_SATA;
 
-	ata_port_init(ap, host_set, ent, 0);
-	ap->lock = host->host_lock;
-	kfree(ent);
 	return ap;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
@@ -3324,8 +3276,10 @@
 {
 	int rc = ap->ops->port_start(ap);
 
-	if (!rc)
+	if (!rc) {
+		ap->print_id = ata_print_id++;
 		rc = ata_bus_probe(ap);
+	}
 
 	return rc;
 }
@@ -3339,7 +3293,8 @@
 
 void ata_sas_port_destroy(struct ata_port *ap)
 {
-	ap->ops->port_stop(ap);
+	if (ap->ops->port_stop)
+		ap->ops->port_stop(ap);
 	kfree(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
@@ -3368,20 +3323,23 @@
  *	@ap:	ATA port to which the command is being sent
  *
  *	RETURNS:
- *	Zero.
+ *	Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ *	0 otherwise.
  */
 
 int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
 		     struct ata_port *ap)
 {
+	int rc = 0;
+
 	ata_scsi_dump_cdb(ap, cmd);
 
 	if (likely(ata_scsi_dev_enabled(ap->device)))
-		__ata_scsi_queuecmd(cmd, done, ap->device);
+		rc = __ata_scsi_queuecmd(cmd, done, ap->device);
 	else {
 		cmd->result = (DID_BAD_TARGET << 16);
 		done(cmd);
 	}
-	return 0;
+	return rc;
 }
 EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/libata-sff.c linux-2.6.18.x86_64.p4/drivers/ata/libata-sff.c
--- linux-2.6.18.x86_64.p3/drivers/ata/libata-sff.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/libata-sff.c	2007-06-06 10:08:00.000000000 -0400
@@ -39,91 +39,99 @@
 #include "libata.h"
 
 /**
- *	ata_tf_load_pio - send taskfile registers to host controller
- *	@ap: Port to which output is sent
- *	@tf: ATA taskfile register set
+ *	ata_irq_on - Enable interrupts on a port.
+ *	@ap: Port on which interrupts are enabled.
  *
- *	Outputs ATA taskfile to standard ATA host controller.
+ *	Enable interrupts on a legacy IDE device using MMIO or PIO,
+ *	wait for idle, clear any pending interrupts.
  *
  *	LOCKING:
  *	Inherited from caller.
  */
-
-static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
+u8 ata_irq_on(struct ata_port *ap)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
-	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+	u8 tmp;
 
-	if (tf->ctl != ap->last_ctl) {
-		outb(tf->ctl, ioaddr->ctl_addr);
-		ap->last_ctl = tf->ctl;
-		ata_wait_idle(ap);
-	}
+	ap->ctl &= ~ATA_NIEN;
+	ap->last_ctl = ap->ctl;
 
-	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-		outb(tf->hob_feature, ioaddr->feature_addr);
-		outb(tf->hob_nsect, ioaddr->nsect_addr);
-		outb(tf->hob_lbal, ioaddr->lbal_addr);
-		outb(tf->hob_lbam, ioaddr->lbam_addr);
-		outb(tf->hob_lbah, ioaddr->lbah_addr);
-		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
-			tf->hob_feature,
-			tf->hob_nsect,
-			tf->hob_lbal,
-			tf->hob_lbam,
-			tf->hob_lbah);
-	}
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
+	tmp = ata_wait_idle(ap);
 
-	if (is_addr) {
-		outb(tf->feature, ioaddr->feature_addr);
-		outb(tf->nsect, ioaddr->nsect_addr);
-		outb(tf->lbal, ioaddr->lbal_addr);
-		outb(tf->lbam, ioaddr->lbam_addr);
-		outb(tf->lbah, ioaddr->lbah_addr);
-		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
-			tf->feature,
-			tf->nsect,
-			tf->lbal,
-			tf->lbam,
-			tf->lbah);
-	}
+	ap->ops->irq_clear(ap);
 
-	if (tf->flags & ATA_TFLAG_DEVICE) {
-		outb(tf->device, ioaddr->device_addr);
-		VPRINTK("device 0x%X\n", tf->device);
-	}
+	return tmp;
+}
 
-	ata_wait_idle(ap);
+u8 ata_dummy_irq_on (struct ata_port *ap) 	{ return 0; }
+
+/**
+ *	ata_irq_ack - Acknowledge a device interrupt.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
+ *	or BUSY+DRQ clear).  Obtain dma status and port status from
+ *	device.  Clear the interrupt.  Return port status.
+ *
+ *	LOCKING:
+ */
+
+u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
+{
+	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
+	u8 host_stat, post_stat, status;
+
+	status = ata_busy_wait(ap, bits, 1000);
+	if (status & bits)
+		if (ata_msg_err(ap))
+			printk(KERN_ERR "abnormal status 0x%X\n", status);
+
+	/* get controller status; clear intr, err bits */
+	host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+		 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+	post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+	if (ata_msg_intr(ap))
+		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
+			__FUNCTION__,
+			host_stat, post_stat, status);
+
+	return status;
 }
 
+u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }
+
 /**
- *	ata_tf_load_mmio - send taskfile registers to host controller
+ *	ata_tf_load - send taskfile registers to host controller
  *	@ap: Port to which output is sent
  *	@tf: ATA taskfile register set
  *
- *	Outputs ATA taskfile to standard ATA host controller using MMIO.
+ *	Outputs ATA taskfile to standard ATA host controller.
  *
  *	LOCKING:
  *	Inherited from caller.
  */
 
-static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
 
 	if (tf->ctl != ap->last_ctl) {
-		writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
+		iowrite8(tf->ctl, ioaddr->ctl_addr);
 		ap->last_ctl = tf->ctl;
 		ata_wait_idle(ap);
 	}
 
 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-		writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
-		writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
-		writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
-		writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
-		writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
+		iowrite8(tf->hob_feature, ioaddr->feature_addr);
+		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
+		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
+		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
+		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
 			tf->hob_feature,
 			tf->hob_nsect,
@@ -133,11 +141,11 @@
 	}
 
 	if (is_addr) {
-		writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
-		writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
-		writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
-		writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
-		writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
+		iowrite8(tf->feature, ioaddr->feature_addr);
+		iowrite8(tf->nsect, ioaddr->nsect_addr);
+		iowrite8(tf->lbal, ioaddr->lbal_addr);
+		iowrite8(tf->lbam, ioaddr->lbam_addr);
+		iowrite8(tf->lbah, ioaddr->lbah_addr);
 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
 			tf->feature,
 			tf->nsect,
@@ -147,232 +155,65 @@
 	}
 
 	if (tf->flags & ATA_TFLAG_DEVICE) {
-		writeb(tf->device, (void __iomem *) ioaddr->device_addr);
+		iowrite8(tf->device, ioaddr->device_addr);
 		VPRINTK("device 0x%X\n", tf->device);
 	}
 
 	ata_wait_idle(ap);
 }
 
-
-/**
- *	ata_tf_load - send taskfile registers to host controller
- *	@ap: Port to which output is sent
- *	@tf: ATA taskfile register set
- *
- *	Outputs ATA taskfile to standard ATA host controller using MMIO
- *	or PIO as indicated by the ATA_FLAG_MMIO flag.
- *	Writes the control, feature, nsect, lbal, lbam, and lbah registers.
- *	Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
- *	hob_lbal, hob_lbam, and hob_lbah.
- *
- *	This function waits for idle (!BUSY and !DRQ) after writing
- *	registers.  If the control register has a new value, this
- *	function also waits for idle after writing control and before
- *	writing the remaining registers.
- *
- *	May be used as the tf_load() entry in ata_port_operations.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
-{
-	if (ap->flags & ATA_FLAG_MMIO)
-		ata_tf_load_mmio(ap, tf);
-	else
-		ata_tf_load_pio(ap, tf);
-}
-
-/**
- *	ata_exec_command_pio - issue ATA command to host controller
- *	@ap: port to which command is being issued
- *	@tf: ATA taskfile register set
- *
- *	Issues PIO write to ATA command register, with proper
- *	synchronization with interrupt handler / other threads.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-
-static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
-{
-	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
-
-       	outb(tf->command, ap->ioaddr.command_addr);
-	ata_pause(ap);
-}
-
-
-/**
- *	ata_exec_command_mmio - issue ATA command to host controller
- *	@ap: port to which command is being issued
- *	@tf: ATA taskfile register set
- *
- *	Issues MMIO write to ATA command register, with proper
- *	synchronization with interrupt handler / other threads.
- *
- *	FIXME: missing write posting for 400nS delay enforcement
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-
-static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
-{
-	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
-
-       	writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
-	ata_pause(ap);
-}
-
-
 /**
  *	ata_exec_command - issue ATA command to host controller
  *	@ap: port to which command is being issued
  *	@tf: ATA taskfile register set
  *
- *	Issues PIO/MMIO write to ATA command register, with proper
- *	synchronization with interrupt handler / other threads.
+ *	Issues ATA command, with proper synchronization with interrupt
+ *	handler / other threads.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
 {
-	if (ap->flags & ATA_FLAG_MMIO)
-		ata_exec_command_mmio(ap, tf);
-	else
-		ata_exec_command_pio(ap, tf);
-}
-
-/**
- *	ata_tf_read_pio - input device's ATA taskfile shadow registers
- *	@ap: Port from which input is read
- *	@tf: ATA taskfile register set for storing input
- *
- *	Reads ATA taskfile registers for currently-selected device
- *	into @tf.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-
-	tf->command = ata_check_status(ap);
-	tf->feature = inb(ioaddr->error_addr);
-	tf->nsect = inb(ioaddr->nsect_addr);
-	tf->lbal = inb(ioaddr->lbal_addr);
-	tf->lbam = inb(ioaddr->lbam_addr);
-	tf->lbah = inb(ioaddr->lbah_addr);
-	tf->device = inb(ioaddr->device_addr);
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
 
-	if (tf->flags & ATA_TFLAG_LBA48) {
-		outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
-		tf->hob_feature = inb(ioaddr->error_addr);
-		tf->hob_nsect = inb(ioaddr->nsect_addr);
-		tf->hob_lbal = inb(ioaddr->lbal_addr);
-		tf->hob_lbam = inb(ioaddr->lbam_addr);
-		tf->hob_lbah = inb(ioaddr->lbah_addr);
-	}
+	iowrite8(tf->command, ap->ioaddr.command_addr);
+	ata_pause(ap);
 }
 
 /**
- *	ata_tf_read_mmio - input device's ATA taskfile shadow registers
+ *	ata_tf_read - input device's ATA taskfile shadow registers
  *	@ap: Port from which input is read
  *	@tf: ATA taskfile register set for storing input
  *
  *	Reads ATA taskfile registers for currently-selected device
- *	into @tf via MMIO.
+ *	into @tf.
  *
  *	LOCKING:
  *	Inherited from caller.
  */
-
-static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 
 	tf->command = ata_check_status(ap);
-	tf->feature = readb((void __iomem *)ioaddr->error_addr);
-	tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
-	tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
-	tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
-	tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
-	tf->device = readb((void __iomem *)ioaddr->device_addr);
+	tf->feature = ioread8(ioaddr->error_addr);
+	tf->nsect = ioread8(ioaddr->nsect_addr);
+	tf->lbal = ioread8(ioaddr->lbal_addr);
+	tf->lbam = ioread8(ioaddr->lbam_addr);
+	tf->lbah = ioread8(ioaddr->lbah_addr);
+	tf->device = ioread8(ioaddr->device_addr);
 
 	if (tf->flags & ATA_TFLAG_LBA48) {
-		writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
-		tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
-		tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
-		tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
-		tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
-		tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
+		iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = ioread8(ioaddr->error_addr);
+		tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
 	}
 }
 
-
-/**
- *	ata_tf_read - input device's ATA taskfile shadow registers
- *	@ap: Port from which input is read
- *	@tf: ATA taskfile register set for storing input
- *
- *	Reads ATA taskfile registers for currently-selected device
- *	into @tf.
- *
- *	Reads nsect, lbal, lbam, lbah, and device.  If ATA_TFLAG_LBA48
- *	is set, also reads the hob registers.
- *
- *	May be used as the tf_read() entry in ata_port_operations.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
-	if (ap->flags & ATA_FLAG_MMIO)
-		ata_tf_read_mmio(ap, tf);
-	else
-		ata_tf_read_pio(ap, tf);
-}
-
-/**
- *	ata_check_status_pio - Read device status reg & clear interrupt
- *	@ap: port where the device is
- *
- *	Reads ATA taskfile status register for currently-selected device
- *	and return its value. This also clears pending interrupts
- *      from this device
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-static u8 ata_check_status_pio(struct ata_port *ap)
-{
-	return inb(ap->ioaddr.status_addr);
-}
-
-/**
- *	ata_check_status_mmio - Read device status reg & clear interrupt
- *	@ap: port where the device is
- *
- *	Reads ATA taskfile status register for currently-selected device
- *	via MMIO and return its value. This also clears pending interrupts
- *      from this device
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-static u8 ata_check_status_mmio(struct ata_port *ap)
-{
-       	return readb((void __iomem *) ap->ioaddr.status_addr);
-}
-
-
 /**
  *	ata_check_status - Read device status reg & clear interrupt
  *	@ap: port where the device is
@@ -381,19 +222,14 @@
  *	and return its value. This also clears pending interrupts
  *      from this device
  *
- *	May be used as the check_status() entry in ata_port_operations.
- *
  *	LOCKING:
  *	Inherited from caller.
  */
 u8 ata_check_status(struct ata_port *ap)
 {
-	if (ap->flags & ATA_FLAG_MMIO)
-		return ata_check_status_mmio(ap);
-	return ata_check_status_pio(ap);
+	return ioread8(ap->ioaddr.status_addr);
 }
 
-
 /**
  *	ata_altstatus - Read device alternate status reg
  *	@ap: port where the device is
@@ -412,58 +248,52 @@
 	if (ap->ops->check_altstatus)
 		return ap->ops->check_altstatus(ap);
 
-	if (ap->flags & ATA_FLAG_MMIO)
-		return readb((void __iomem *)ap->ioaddr.altstatus_addr);
-	return inb(ap->ioaddr.altstatus_addr);
+	return ioread8(ap->ioaddr.altstatus_addr);
 }
 
 /**
- *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
+ *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
  *	@qc: Info associated with this ATA transaction.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
-static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 	u8 dmactl;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 
 	/* load PRD table addr. */
 	mb();	/* make sure PRD table writes are visible to controller */
-	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
 
 	/* specify data direction, triple-check start bit is clear */
-	dmactl = readb(mmio + ATA_DMA_CMD);
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
 	if (!rw)
 		dmactl |= ATA_DMA_WR;
-	writeb(dmactl, mmio + ATA_DMA_CMD);
+	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 
 	/* issue r/w command */
 	ap->ops->exec_command(ap, &qc->tf);
 }
 
 /**
- *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
+ *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
  *	@qc: Info associated with this ATA transaction.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
-static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
+void ata_bmdma_start (struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 	u8 dmactl;
 
 	/* start host DMA transaction */
-	dmactl = readb(mmio + ATA_DMA_CMD);
-	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 
 	/* Strictly, one may wish to issue a readb() here, to
 	 * flush the mmio write.  However, control also passes
@@ -479,96 +309,6 @@
 }
 
 /**
- *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
- *	@qc: Info associated with this ATA transaction.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-
-static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
-	u8 dmactl;
-
-	/* load PRD table addr. */
-	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
-
-	/* specify data direction, triple-check start bit is clear */
-	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
-	if (!rw)
-		dmactl |= ATA_DMA_WR;
-	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
-	/* issue r/w command */
-	ap->ops->exec_command(ap, &qc->tf);
-}
-
-/**
- *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
- *	@qc: Info associated with this ATA transaction.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-
-static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
-{
-	struct ata_port *ap = qc->ap;
-	u8 dmactl;
-
-	/* start host DMA transaction */
-	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-	outb(dmactl | ATA_DMA_START,
-	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-}
-
-
-/**
- *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
- *	@qc: Info associated with this ATA transaction.
- *
- *	Writes the ATA_DMA_START flag to the DMA command register.
- *
- *	May be used as the bmdma_start() entry in ata_port_operations.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-void ata_bmdma_start(struct ata_queued_cmd *qc)
-{
-	if (qc->ap->flags & ATA_FLAG_MMIO)
-		ata_bmdma_start_mmio(qc);
-	else
-		ata_bmdma_start_pio(qc);
-}
-
-
-/**
- *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
- *	@qc: Info associated with this ATA transaction.
- *
- *	Writes address of PRD table to device's PRD Table Address
- *	register, sets the DMA control register, and calls
- *	ops->exec_command() to start the transfer.
- *
- *	May be used as the bmdma_setup() entry in ata_port_operations.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
- */
-void ata_bmdma_setup(struct ata_queued_cmd *qc)
-{
-	if (qc->ap->flags & ATA_FLAG_MMIO)
-		ata_bmdma_setup_mmio(qc);
-	else
-		ata_bmdma_setup_pio(qc);
-}
-
-
-/**
  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
  *	@ap: Port associated with this ATA transaction.
  *
@@ -577,25 +317,18 @@
  *	May be used as the irq_clear() entry in ata_port_operations.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
 void ata_bmdma_irq_clear(struct ata_port *ap)
 {
-	if (!ap->ioaddr.bmdma_addr)
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
 		return;
 
-	if (ap->flags & ATA_FLAG_MMIO) {
-		void __iomem *mmio =
-		      ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
-		writeb(readb(mmio), mmio);
-	} else {
-		unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
-		outb(inb(addr), addr);
-	}
+	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
 }
 
-
 /**
  *	ata_bmdma_status - Read PCI IDE BMDMA status
  *	@ap: Port associated with this ATA transaction.
@@ -605,21 +338,13 @@
  *	May be used as the bmdma_status() entry in ata_port_operations.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
 u8 ata_bmdma_status(struct ata_port *ap)
 {
-	u8 host_stat;
-	if (ap->flags & ATA_FLAG_MMIO) {
-		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
-		host_stat = readb(mmio + ATA_DMA_STATUS);
-	} else
-		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-	return host_stat;
+	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
 }
 
-
 /**
  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
  *	@qc: Command we are ending DMA for
@@ -629,23 +354,16 @@
  *	May be used as the bmdma_stop() entry in ata_port_operations.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
-
 void ata_bmdma_stop(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	if (ap->flags & ATA_FLAG_MMIO) {
-		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
 
-		/* clear start/stop bit */
-		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
-			mmio + ATA_DMA_CMD);
-	} else {
-		/* clear start/stop bit */
-		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
-			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-	}
+	/* clear start/stop bit */
+	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+		 mmio + ATA_DMA_CMD);
 
 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
 	ata_altstatus(ap);        /* dummy read */
@@ -667,10 +385,15 @@
 	ap->ctl |= ATA_NIEN;
 	ap->last_ctl = ap->ctl;
 
-	if (ap->flags & ATA_FLAG_MMIO)
-		writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
-	else
-		outb(ap->ctl, ioaddr->ctl_addr);
+	iowrite8(ap->ctl, ioaddr->ctl_addr);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ata_chk_status(ap);
+
+	ap->ops->irq_clear(ap);
 }
 
 /**
@@ -687,8 +410,7 @@
 	/* clear & re-enable interrupts */
 	ata_chk_status(ap);
 	ap->ops->irq_clear(ap);
-	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
-		ata_irq_on(ap);
+	ap->ops->irq_on(ap);
 }
 
 /**
@@ -714,7 +436,6 @@
 			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
 			ata_postreset_fn_t postreset)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
 	struct ata_queued_cmd *qc;
 	unsigned long flags;
 	int thaw = 0;
@@ -732,16 +453,14 @@
 		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
 		u8 host_stat;
 
-		host_stat = ata_bmdma_status(ap);
-
-		ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
+		host_stat = ap->ops->bmdma_status(ap);
 
 		/* BMDMA controllers indicate host bus error by
 		 * setting DMA_ERR bit and timing out.  As it wasn't
 		 * really a timeout event, adjust error mask and
 		 * cancel frozen state.
 		 */
-		if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
+		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
 			qc->err_mask = AC_ERR_HOST_BUS;
 			thaw = 1;
 		}
@@ -793,154 +512,422 @@
  */
 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
 {
-	ata_bmdma_stop(qc);
+	if (qc->ap->ioaddr.bmdma_addr)
+		ata_bmdma_stop(qc);
 }
 
 #ifdef CONFIG_PCI
-static struct ata_probe_ent *
-ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
+
+static int ata_resources_present(struct pci_dev *pdev, int port)
 {
-	struct ata_probe_ent *probe_ent;
+	int i;
 
-	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (!probe_ent) {
-		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
-		       kobject_name(&(dev->kobj)));
-		return NULL;
+	/* Check the PCI resources for this channel are enabled */
+	port = port * 2;
+	for (i = 0; i < 2; i ++) {
+		if (pci_resource_start(pdev, port + i) == 0 ||
+		    pci_resource_len(pdev, port + i) == 0)
+			return 0;
 	}
+	return 1;
+}
+
+/**
+ *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
+ *	@host: target ATA host
+ *
+ *	Acquire PCI BMDMA resources and initialize @host accordingly.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_pci_init_bmdma(struct ata_host *host)
+{
+	struct device *gdev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(gdev);
+	int i, rc;
 
-	INIT_LIST_HEAD(&probe_ent->node);
-	probe_ent->dev = dev;
+	/* TODO: If we get no DMA mask we should fall back to PIO */
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
 
-	probe_ent->sht = port->sht;
-	probe_ent->host_flags = port->host_flags;
-	probe_ent->pio_mask = port->pio_mask;
-	probe_ent->mwdma_mask = port->mwdma_mask;
-	probe_ent->udma_mask = port->udma_mask;
-	probe_ent->port_ops = port->port_ops;
+	/* request and iomap DMA region */
+	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
+	if (rc) {
+		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
+		return -ENOMEM;
+	}
+	host->iomap = pcim_iomap_table(pdev);
 
-	return probe_ent;
-}
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *bmdma = host->iomap[4] + 8 * i;
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		ap->ioaddr.bmdma_addr = bmdma;
+		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
+		    (ioread8(bmdma + 2) & 0x80))
+			host->flags |= ATA_HOST_SIMPLEX;
+	}
 
+	return 0;
+}
 
 /**
- *	ata_pci_init_native_mode - Initialize native-mode driver
- *	@pdev:  pci device to be initialized
- *	@port:  array[2] of pointers to port info structures.
- *	@ports: bitmap of ports present
+ *	ata_pci_init_native_host - acquire native ATA resources and init host
+ *	@host: target ATA host
+ *
+ *	Acquire native PCI ATA resources for @host and initialize the
+ *	first two ports of @host accordingly.  Ports marked dummy are
+ *	skipped and allocation failure makes the port dummy.
  *
- *	Utility function which allocates and initializes an
- *	ata_probe_ent structure for a standard dual-port
- *	PIO-based IDE controller.  The returned ata_probe_ent
- *	structure can be passed to ata_device_add().  The returned
- *	ata_probe_ent structure should then be freed with kfree().
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
  *
- *	The caller need only pass the address of the primary port, the
- *	secondary will be deduced automatically. If the device has non
- *	standard secondary port mappings this function can be called twice,
- *	once for each interface.
+ *	RETURNS:
+ *	0 if at least one port is initialized, -ENODEV if no port is
+ *	available.
  */
-
-struct ata_probe_ent *
-ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
+int ata_pci_init_native_host(struct ata_host *host)
 {
-	struct ata_probe_ent *probe_ent =
-		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
-	int p = 0;
-	unsigned long bmdma;
+	struct device *gdev = host->dev;
+	struct pci_dev *pdev = to_pci_dev(gdev);
+	unsigned int mask = 0;
+	int i, rc;
+
+	/* request, iomap BARs and init port addresses accordingly */
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+		int base = i * 2;
+		void __iomem * const *iomap;
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		/* Discard disabled ports.  Some controllers show
+		 * their unused channels this way.  Disabled ports are
+		 * made dummy.
+		 */
+		if (!ata_resources_present(pdev, i)) {
+			ap->ops = &ata_dummy_port_ops;
+			continue;
+		}
+
+		rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
+		if (rc) {
+			dev_printk(KERN_WARNING, gdev,
+				   "failed to request/iomap BARs for port %d "
+				   "(errno=%d)\n", i, rc);
+			if (rc == -EBUSY)
+				pcim_pin_device(pdev);
+			ap->ops = &ata_dummy_port_ops;
+			continue;
+		}
+		host->iomap = iomap = pcim_iomap_table(pdev);
 
-	if (!probe_ent)
-		return NULL;
+		ap->ioaddr.cmd_addr = iomap[base];
+		ap->ioaddr.altstatus_addr =
+		ap->ioaddr.ctl_addr = (void __iomem *)
+			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
+		ata_std_ports(&ap->ioaddr);
 
-	probe_ent->irq = pdev->irq;
-	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->private_data = port[0]->private_data;
+		mask |= 1 << i;
+	}
 
-	if (ports & ATA_PORT_PRIMARY) {
-		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
-		probe_ent->port[p].altstatus_addr =
-		probe_ent->port[p].ctl_addr =
-			pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
-		bmdma = pci_resource_start(pdev, 4);
-		if (bmdma) {
-			if (inb(bmdma + 2) & 0x80)
-				probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
-			probe_ent->port[p].bmdma_addr = bmdma;
-		}
-		ata_std_ports(&probe_ent->port[p]);
-		p++;
+	if (!mask) {
+		dev_printk(KERN_ERR, gdev, "no available native port\n");
+		return -ENODEV;
 	}
 
-	if (ports & ATA_PORT_SECONDARY) {
-		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
-		probe_ent->port[p].altstatus_addr =
-		probe_ent->port[p].ctl_addr =
-			pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
-		bmdma = pci_resource_start(pdev, 4);
-		if (bmdma) {
-			bmdma += 8;
-			if(inb(bmdma + 2) & 0x80)
-			probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
-			probe_ent->port[p].bmdma_addr = bmdma;
-		}
-		ata_std_ports(&probe_ent->port[p]);
-		p++;
+	return 0;
+}
+
+/**
+ *	ata_pci_prepare_native_host - helper to prepare native PCI ATA host
+ *	@pdev: target PCI device
+ *	@ppi: array of port_info, must be enough for two ports
+ *	@r_host: out argument for the initialized ATA host
+ *
+ *	Helper to allocate ATA host for @pdev, acquire all native PCI
+ *	resources and initialize it accordingly in one go.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int ata_pci_prepare_native_host(struct pci_dev *pdev,
+				const struct ata_port_info * const * ppi,
+				struct ata_host **r_host)
+{
+	struct ata_host *host;
+	int rc;
+
+	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "failed to allocate ATA host\n");
+		rc = -ENOMEM;
+		goto err_out;
 	}
 
-	probe_ent->n_ports = p;
-	return probe_ent;
+	rc = ata_pci_init_native_host(host);
+	if (rc)
+		goto err_out;
+
+	/* init DMA related stuff */
+	rc = ata_pci_init_bmdma(host);
+	if (rc)
+		goto err_bmdma;
+
+	devres_remove_group(&pdev->dev, NULL);
+	*r_host = host;
+	return 0;
+
+ err_bmdma:
+	/* This is necessary because PCI and iomap resources are
+	 * merged and releasing the top group won't release the
+	 * acquired resources if some of those have been acquired
+	 * before entering this function.
+	 */
+	pcim_iounmap_regions(pdev, 0xf);
+ err_out:
+	devres_release_group(&pdev->dev, NULL);
+	return rc;
 }
 
+struct ata_legacy_devres {
+	unsigned int	mask;
+	unsigned long	cmd_port[2];
+	void __iomem *	cmd_addr[2];
+	void __iomem *	ctl_addr[2];
+	unsigned int	irq[2];
+	void *		irq_dev_id[2];
+};
 
-static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
-				struct ata_port_info *port, int port_num)
+static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
 {
-	struct ata_probe_ent *probe_ent;
-	unsigned long bmdma;
+	int i;
 
-	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
-	if (!probe_ent)
-		return NULL;
+	for (i = 0; i < 2; i++) {
+		if (!legacy_dr->irq[i])
+			continue;
 
-	probe_ent->legacy_mode = 1;
-	probe_ent->n_ports = 1;
-	probe_ent->hard_port_no = port_num;
-	probe_ent->private_data = port->private_data;
+		free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
+		legacy_dr->irq[i] = 0;
+		legacy_dr->irq_dev_id[i] = NULL;
+	}
+}
 
-	switch(port_num)
-	{
-		case 0:
-			probe_ent->irq = 14;
-			probe_ent->port[0].cmd_addr = 0x1f0;
-			probe_ent->port[0].altstatus_addr =
-			probe_ent->port[0].ctl_addr = 0x3f6;
-			break;
-		case 1:
-			probe_ent->irq = 15;
-			probe_ent->port[0].cmd_addr = 0x170;
-			probe_ent->port[0].altstatus_addr =
-			probe_ent->port[0].ctl_addr = 0x376;
-			break;
+static void ata_legacy_release(struct device *gdev, void *res)
+{
+	struct ata_legacy_devres *this = res;
+	int i;
+
+	ata_legacy_free_irqs(this);
+
+	for (i = 0; i < 2; i++) {
+		if (this->cmd_addr[i])
+			ioport_unmap(this->cmd_addr[i]);
+		if (this->ctl_addr[i])
+			ioport_unmap(this->ctl_addr[i]);
+		if (this->cmd_port[i])
+			release_region(this->cmd_port[i], 8);
+	}
+}
+
+static int ata_init_legacy_port(struct ata_port *ap,
+				struct ata_legacy_devres *legacy_dr)
+{
+	struct ata_host *host = ap->host;
+	int port_no = ap->port_no;
+	unsigned long cmd_port, ctl_port;
+
+	if (port_no == 0) {
+		cmd_port = ATA_PRIMARY_CMD;
+		ctl_port = ATA_PRIMARY_CTL;
+	} else {
+		cmd_port = ATA_SECONDARY_CMD;
+		ctl_port = ATA_SECONDARY_CTL;
+	}
+
+	/* request cmd_port */
+	if (request_region(cmd_port, 8, "libata"))
+		legacy_dr->cmd_port[port_no] = cmd_port;
+	else {
+		dev_printk(KERN_WARNING, host->dev,
+			   "0x%0lX IDE port busy\n", cmd_port);
+		return -EBUSY;
+	}
+
+	/* iomap cmd and ctl ports */
+	legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
+	legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
+	if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) {
+		dev_printk(KERN_WARNING, host->dev,
+			   "failed to map cmd/ctl ports\n");
+		return -ENOMEM;
+	}
+
+	/* init IO addresses */
+	ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
+	ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
+	ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
+	ata_std_ports(&ap->ioaddr);
+
+	return 0;
+}
+
+/**
+ *	ata_init_legacy_host - acquire legacy ATA resources and init ATA host
+ *	@host: target ATA host
+ *	@was_busy: out parameter, indicates whether any port was busy
+ *
+ *	Acquire legacy ATA resources for the first two ports of @host
+ *	and initialize it accordingly.  Ports marked dummy are skipped
+ *	and resource acquistion failure makes the port dummy.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 if at least one port is initialized, -ENODEV if no port is
+ *	available.
+ */
+static int ata_init_legacy_host(struct ata_host *host, int *was_busy)
+{
+	struct device *gdev = host->dev;
+	struct ata_legacy_devres *legacy_dr;
+	int i, rc;
+
+	if (!devres_open_group(gdev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
+	rc = -ENOMEM;
+	legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
+				 GFP_KERNEL);
+	if (!legacy_dr)
+		goto err_out;
+	devres_add(gdev, legacy_dr);
+
+	for (i = 0; i < 2; i++) {
+		if (ata_port_is_dummy(host->ports[i]))
+			continue;
+
+		rc = ata_init_legacy_port(host->ports[i], legacy_dr);
+		if (rc == 0)
+			legacy_dr->mask |= 1 << i;
+		else {
+			if (rc == -EBUSY)
+				(*was_busy)++;
+			host->ports[i]->ops = &ata_dummy_port_ops;
+		}
 	}
 
-	bmdma = pci_resource_start(pdev, 4);
-	if (bmdma != 0) {
-		bmdma += 8 * port_num;
-		probe_ent->port[0].bmdma_addr = bmdma;
-		if (inb(bmdma + 2) & 0x80)
-			probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
+	if (!legacy_dr->mask) {
+		dev_printk(KERN_ERR, gdev, "no available legacy port\n");
+		return -ENODEV;
 	}
-	ata_std_ports(&probe_ent->port[0]);
 
-	return probe_ent;
+	devres_remove_group(gdev, NULL);
+	return 0;
+
+ err_out:
+	devres_release_group(gdev, NULL);
+	return rc;
 }
 
+/**
+ *	ata_request_legacy_irqs - request legacy ATA IRQs
+ *	@host: target ATA host
+ *	@handler: array of IRQ handlers
+ *	@irq_flags: array of IRQ flags
+ *	@dev_id: array of IRQ dev_ids
+ *
+ *	Request legacy IRQs for non-dummy legacy ports in @host.  All
+ *	IRQ parameters are passed as array to allow ports to have
+ *	separate IRQ handlers.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int ata_request_legacy_irqs(struct ata_host *host,
+				   irq_handler_t const *handler,
+				   const unsigned int *irq_flags,
+				   void * const *dev_id)
+{
+	struct device *gdev = host->dev;
+	struct ata_legacy_devres *legacy_dr;
+	int i, rc;
+
+	legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
+	BUG_ON(!legacy_dr);
+
+	for (i = 0; i < 2; i++) {
+		unsigned int irq;
+
+		/* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
+		if (i == 0)
+			irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
+		else
+			irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));
+
+		if (!(legacy_dr->mask & (1 << i)))
+			continue;
+
+		if (!handler[i]) {
+			dev_printk(KERN_ERR, gdev,
+				   "NULL handler specified for port %d\n", i);
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
+				 dev_id[i]);
+		if (rc) {
+			dev_printk(KERN_ERR, gdev,
+				"irq %u request failed (errno=%d)\n", irq, rc);
+			goto err_out;
+		}
+
+		/* record irq allocation in legacy_dr */
+		legacy_dr->irq[i] = irq;
+		legacy_dr->irq_dev_id[i] = dev_id[i];
+
+		/* only used to print info */
+		if (i == 0)
+			host->irq = irq;
+		else
+			host->irq2 = irq;
+	}
+
+	return 0;
+
+ err_out:
+	ata_legacy_free_irqs(legacy_dr);
+	return rc;
+}
 
 /**
  *	ata_pci_init_one - Initialize/register PCI IDE host controller
  *	@pdev: Controller to be initialized
- *	@port_info: Information from low-level host driver
- *	@n_ports: Number of ports attached to host controller
+ *	@ppi: array of port_info, must be enough for two ports
  *
  *	This is a helper function which can be called from a driver's
  *	xxx_init_one() probe function if the hardware uses traditional
@@ -950,162 +937,137 @@
  *	regions, sets the dma mask, enables bus master mode, and calls
  *	ata_device_add()
  *
+ *	ASSUMPTION:
+ *	Nobody makes a single channel controller that appears solely as
+ *	the secondary legacy port on PCI.
+ *
  *	LOCKING:
  *	Inherited from PCI layer (may sleep).
  *
  *	RETURNS:
  *	Zero on success, negative on errno-based value on error.
  */
-
-int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
-		      unsigned int n_ports)
+int ata_pci_init_one(struct pci_dev *pdev,
+		     const struct ata_port_info * const * ppi)
 {
-	struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
-	struct ata_port_info *port[2];
-	u8 tmp8, mask;
-	unsigned int legacy_mode = 0;
-	int disable_dev_on_err = 1;
-	int rc;
+	struct device *dev = &pdev->dev;
+	const struct ata_port_info *pi = NULL;
+	struct ata_host *host = NULL;
+	u8 mask;
+	int legacy_mode = 0;
+	int i, rc;
 
 	DPRINTK("ENTER\n");
 
-	port[0] = port_info[0];
-	if (n_ports > 1)
-		port[1] = port_info[1];
-	else
-		port[1] = port[0];
-
-	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
-	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
-		/* TODO: What if one channel is in native mode ... */
-		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
-		mask = (1 << 2) | (1 << 0);
-		if ((tmp8 & mask) != mask)
-			legacy_mode = (1 << 3);
+	/* look up the first valid port_info */
+	for (i = 0; i < 2 && ppi[i]; i++) {
+		if (ppi[i]->port_ops != &ata_dummy_port_ops) {
+			pi = ppi[i];
+			break;
+		}
 	}
 
-	/* FIXME... */
-	if ((!legacy_mode) && (n_ports > 2)) {
-		printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
-		n_ports = 2;
-		/* For now */
+	if (!pi) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "no valid port_info specified\n");
+		return -EINVAL;
 	}
 
+	if (!devres_open_group(dev, NULL, GFP_KERNEL))
+		return -ENOMEM;
+
 	/* FIXME: Really for ATA it isn't safe because the device may be
 	   multi-purpose and we want to leave it alone if it was already
 	   enabled. Secondly for shared use as Arjan says we want refcounting
 
 	   Checking dev->is_enabled is insufficient as this is not set at
 	   boot for the primary video which is BIOS enabled
-         */
+	  */
 
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
-		return rc;
-
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		disable_dev_on_err = 0;
 		goto err_out;
-	}
 
-	/* FIXME: Should use platform specific mappers for legacy port ranges */
-	if (legacy_mode) {
-		if (!request_region(0x1f0, 8, "libata")) {
-			struct resource *conflict, res;
-			res.start = 0x1f0;
-			res.end = 0x1f0 + 8 - 1;
-			conflict = ____request_resource(&ioport_resource, &res);
-			if (!strcmp(conflict->name, "libata"))
-				legacy_mode |= (1 << 0);
-			else {
-				disable_dev_on_err = 0;
-				printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
-			}
-		} else
-			legacy_mode |= (1 << 0);
-
-		if (!request_region(0x170, 8, "libata")) {
-			struct resource *conflict, res;
-			res.start = 0x170;
-			res.end = 0x170 + 8 - 1;
-			conflict = ____request_resource(&ioport_resource, &res);
-			if (!strcmp(conflict->name, "libata"))
-				legacy_mode |= (1 << 1);
-			else {
-				disable_dev_on_err = 0;
-				printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
-			}
-		} else
-			legacy_mode |= (1 << 1);
-	}
-
-	/* we have legacy mode, but all ports are unavailable */
-	if (legacy_mode == (1 << 3)) {
-		rc = -EBUSY;
-		goto err_out_regions;
+	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+		u8 tmp8;
+
+		/* TODO: What if one channel is in native mode ... */
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+		mask = (1 << 2) | (1 << 0);
+		if ((tmp8 & mask) != mask)
+			legacy_mode = 1;
+#if defined(CONFIG_NO_ATA_LEGACY)
+		/* Some platforms with PCI limits cannot address compat
+		   port space. In that case we punt if their firmware has
+		   left a device in compatibility mode */
+		if (legacy_mode) {
+			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
+			rc = -EOPNOTSUPP;
+			goto err_out;
+		}
+#endif
 	}
 
-	/* FIXME: If we get no DMA mask we should fall back to PIO */
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
+	/* alloc and init host */
+	host = ata_host_alloc_pinfo(dev, ppi, 2);
+	if (!host) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "failed to allocate ATA host\n");
+		rc = -ENOMEM;
+		goto err_out;
+	}
 
-	if (legacy_mode) {
-		if (legacy_mode & (1 << 0))
-			probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
-		if (legacy_mode & (1 << 1))
-			probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
+	if (!legacy_mode) {
+		rc = ata_pci_init_native_host(host);
+		if (rc)
+			goto err_out;
 	} else {
-		if (n_ports == 2)
-			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
-		else
-			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
-	}
-	if (!probe_ent && !probe_ent2) {
-		rc = -ENOMEM;
-		goto err_out_regions;
+		int was_busy = 0;
+
+		rc = ata_init_legacy_host(host, &was_busy);
+		if (was_busy)
+			pcim_pin_device(pdev);
+		if (rc)
+			goto err_out;
+
+		/* request respective PCI regions, may fail */
+		rc = pci_request_region(pdev, 1, DRV_NAME);
+		rc = pci_request_region(pdev, 3, DRV_NAME);
 	}
 
+	/* init BMDMA, may fail */
+	ata_pci_init_bmdma(host);
 	pci_set_master(pdev);
 
-	/* FIXME: check ata_device_add return */
-	if (legacy_mode) {
-		struct device *dev = &pdev->dev;
-		struct ata_host_set *host_set = NULL;
-
-		if (legacy_mode & (1 << 0)) {
-			ata_device_add(probe_ent);
-			host_set = dev_get_drvdata(dev);
-		}
+	/* start host and request IRQ */
+	rc = ata_host_start(host);
+	if (rc)
+		goto err_out;
 
-		if (legacy_mode & (1 << 1)) {
-			ata_device_add(probe_ent2);
-			if (host_set) {
-				host_set->next = dev_get_drvdata(dev);
-				dev_set_drvdata(dev, host_set);
-			}
-		}
-	} else
-		ata_device_add(probe_ent);
+	if (!legacy_mode)
+		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
+				      IRQF_SHARED, DRV_NAME, host);
+	else {
+		irq_handler_t handler[2] = { host->ops->irq_handler,
+					     host->ops->irq_handler };
+		unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
+		void *dev_id[2] = { host, host };
 
-	kfree(probe_ent);
-	kfree(probe_ent2);
+		rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
+	}
+	if (rc)
+		goto err_out;
+
+	/* register */
+	rc = ata_host_register(host, pi->sht);
+	if (rc)
+		goto err_out;
 
+	devres_remove_group(dev, NULL);
 	return 0;
 
-err_out_regions:
-	if (legacy_mode & (1 << 0))
-		release_region(0x1f0, 8);
-	if (legacy_mode & (1 << 1))
-		release_region(0x170, 8);
-	pci_release_regions(pdev);
 err_out:
-	if (disable_dev_on_err)
-		pci_disable_device(pdev);
+	devres_release_group(dev, NULL);
 	return rc;
 }
 
@@ -1135,12 +1097,12 @@
 	return 0;
 }
 
-unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
+unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
 {
 	/* Filter out DMA modes if the device has been configured by
 	   the BIOS as PIO only */
 
-	if (ap->ioaddr.bmdma_addr == 0)
+	if (adev->ap->ioaddr.bmdma_addr == 0)
 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
 	return xfer_mask;
 }
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/Makefile linux-2.6.18.x86_64.p4/drivers/ata/Makefile
--- linux-2.6.18.x86_64.p3/drivers/ata/Makefile	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/Makefile	2007-06-06 10:08:00.000000000 -0400
@@ -1,19 +1,72 @@
 
-obj-$(CONFIG_SCSI_SATA_AHCI)	+= libata.o ahci.o
-obj-$(CONFIG_SCSI_SATA_SVW)	+= libata.o sata_svw.o
-obj-$(CONFIG_SCSI_ATA_PIIX)	+= libata.o ata_piix.o
-obj-$(CONFIG_SCSI_SATA_PROMISE)	+= libata.o sata_promise.o
-obj-$(CONFIG_SCSI_SATA_QSTOR)	+= libata.o sata_qstor.o
-obj-$(CONFIG_SCSI_SATA_SIL)	+= libata.o sata_sil.o
-obj-$(CONFIG_SCSI_SATA_SIL24)	+= libata.o sata_sil24.o
-obj-$(CONFIG_SCSI_SATA_VIA)	+= libata.o sata_via.o
-obj-$(CONFIG_SCSI_SATA_VITESSE)	+= libata.o sata_vsc.o
-obj-$(CONFIG_SCSI_SATA_SIS)	+= libata.o sata_sis.o
-obj-$(CONFIG_SCSI_SATA_SX4)	+= libata.o sata_sx4.o
-obj-$(CONFIG_SCSI_SATA_NV)	+= libata.o sata_nv.o
-obj-$(CONFIG_SCSI_SATA_ULI)	+= libata.o sata_uli.o
-obj-$(CONFIG_SCSI_SATA_MV)	+= libata.o sata_mv.o
-obj-$(CONFIG_SCSI_PDC_ADMA)	+= libata.o pdc_adma.o
+obj-$(CONFIG_ATA)		+= libata.o
 
-libata-objs	:= libata-core.o libata-scsi.o libata-sff.o libata-eh.o
+obj-$(CONFIG_SATA_AHCI)		+= ahci.o
+obj-$(CONFIG_SATA_SVW)		+= sata_svw.o
+obj-$(CONFIG_ATA_PIIX)		+= ata_piix.o
+obj-$(CONFIG_SATA_PROMISE)	+= sata_promise.o
+obj-$(CONFIG_SATA_QSTOR)	+= sata_qstor.o
+obj-$(CONFIG_SATA_SIL)		+= sata_sil.o
+obj-$(CONFIG_SATA_SIL24)	+= sata_sil24.o
+obj-$(CONFIG_SATA_VIA)		+= sata_via.o
+obj-$(CONFIG_SATA_VITESSE)	+= sata_vsc.o
+obj-$(CONFIG_SATA_SIS)		+= sata_sis.o
+obj-$(CONFIG_SATA_SX4)		+= sata_sx4.o
+obj-$(CONFIG_SATA_NV)		+= sata_nv.o
+obj-$(CONFIG_SATA_ULI)		+= sata_uli.o
+obj-$(CONFIG_SATA_MV)		+= sata_mv.o
+obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
+obj-$(CONFIG_PDC_ADMA)		+= pdc_adma.o
+
+obj-$(CONFIG_PATA_ALI)		+= pata_ali.o
+obj-$(CONFIG_PATA_AMD)		+= pata_amd.o
+obj-$(CONFIG_PATA_ARTOP)	+= pata_artop.o
+obj-$(CONFIG_PATA_ATIIXP)	+= pata_atiixp.o
+obj-$(CONFIG_PATA_CMD640_PCI)	+= pata_cmd640.o
+obj-$(CONFIG_PATA_CMD64X)	+= pata_cmd64x.o
+obj-$(CONFIG_PATA_CS5520)	+= pata_cs5520.o
+obj-$(CONFIG_PATA_CS5530)	+= pata_cs5530.o
+obj-$(CONFIG_PATA_CS5535)	+= pata_cs5535.o
+obj-$(CONFIG_PATA_CYPRESS)	+= pata_cypress.o
+obj-$(CONFIG_PATA_EFAR)		+= pata_efar.o
+obj-$(CONFIG_PATA_HPT366)	+= pata_hpt366.o
+obj-$(CONFIG_PATA_HPT37X)	+= pata_hpt37x.o
+obj-$(CONFIG_PATA_HPT3X2N)	+= pata_hpt3x2n.o
+obj-$(CONFIG_PATA_HPT3X3)	+= pata_hpt3x3.o
+obj-$(CONFIG_PATA_ISAPNP)	+= pata_isapnp.o
+obj-$(CONFIG_PATA_IT821X)	+= pata_it821x.o
+obj-$(CONFIG_PATA_IT8213)	+= pata_it8213.o
+obj-$(CONFIG_PATA_JMICRON)	+= pata_jmicron.o
+obj-$(CONFIG_PATA_NETCELL)	+= pata_netcell.o
+obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
+obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
+obj-$(CONFIG_PATA_OPTIDMA)	+= pata_optidma.o
+obj-$(CONFIG_PATA_MPC52xx)	+= pata_mpc52xx.o
+obj-$(CONFIG_PATA_MARVELL)	+= pata_marvell.o
+obj-$(CONFIG_PATA_MPIIX)	+= pata_mpiix.o
+obj-$(CONFIG_PATA_OLDPIIX)	+= pata_oldpiix.o
+obj-$(CONFIG_PATA_PCMCIA)	+= pata_pcmcia.o
+obj-$(CONFIG_PATA_PDC2027X)	+= pata_pdc2027x.o
+obj-$(CONFIG_PATA_PDC_OLD)	+= pata_pdc202xx_old.o
+obj-$(CONFIG_PATA_QDI)		+= pata_qdi.o
+obj-$(CONFIG_PATA_RADISYS)	+= pata_radisys.o
+obj-$(CONFIG_PATA_RZ1000)	+= pata_rz1000.o
+obj-$(CONFIG_PATA_SC1200)	+= pata_sc1200.o
+obj-$(CONFIG_PATA_SERVERWORKS)	+= pata_serverworks.o
+obj-$(CONFIG_PATA_SIL680)	+= pata_sil680.o
+obj-$(CONFIG_PATA_VIA)		+= pata_via.o
+obj-$(CONFIG_PATA_WINBOND)	+= pata_sl82c105.o
+obj-$(CONFIG_PATA_WINBOND_VLB)	+= pata_winbond.o
+obj-$(CONFIG_PATA_SIS)		+= pata_sis.o
+obj-$(CONFIG_PATA_TRIFLEX)	+= pata_triflex.o
+obj-$(CONFIG_PATA_IXP4XX_CF)	+= pata_ixp4xx_cf.o
+obj-$(CONFIG_PATA_SCC)		+= pata_scc.o
+obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
+obj-$(CONFIG_PATA_ICSIDE)	+= pata_icside.o
+# Should be last but one libata driver
+obj-$(CONFIG_ATA_GENERIC)	+= ata_generic.o
+# Should be last libata driver
+obj-$(CONFIG_PATA_LEGACY)	+= pata_legacy.o
 
+libata-objs	:= libata-core.o libata-scsi.o libata-sff.o libata-eh.o
+libata-$(CONFIG_ATA_ACPI)	+= libata-acpi.o
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_ali.c linux-2.6.18.x86_64.p4/drivers/ata/pata_ali.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_ali.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_ali.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,663 @@
+/*
+ * pata_ali.c 	- ALI 15x3 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based in part upon
+ * linux/drivers/ide/pci/alim15x3.c		Version 0.17	2003/01/02
+ *
+ *  Copyright (C) 1998-2000 Michel Aubry, Maintainer
+ *  Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
+ *  Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
+ *
+ *  Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
+ *  May be copied or modified under the terms of the GNU General Public License
+ *  Copyright (C) 2002 Alan Cox <alan@redhat.com>
+ *  ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
+ *
+ *  Documentation
+ *	Chipset documentation available under NDA only
+ *
+ *  TODO/CHECK
+ *	Cannot have ATAPI on both master & slave for rev < c2 (???) but
+ *	otherwise should do atapi DMA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_ali"
+#define DRV_VERSION "0.7.4"
+
+/*
+ *	Cable special cases
+ */
+
+static struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "HP Pavilion N5430",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
+		},
+	},
+	{ }
+};
+
+static int ali_cable_override(struct pci_dev *pdev)
+{
+	/* Fujitsu P2000 */
+	if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
+	   	return 1;
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+	return 0;
+}
+
+/**
+ *	ali_c2_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection for C2 and later revisions
+ */
+
+static int ali_c2_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+
+	/* Certain laptops use short but suitable cables and don't
+	   implement the detect logic */
+
+	if (ali_cable_override(pdev))
+		return ATA_CBL_PATA40_SHORT;
+
+	/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
+	   Bit set for 40 pin */
+	pci_read_config_byte(pdev, 0x4A, &ata66);
+	if (ata66 & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	ali_20_filter		-	filter for earlier ALI DMA
+ *	@ap: ALi ATA port
+ *	@adev: attached device
+ *
+ *	Ensure that we do not do DMA on CD devices. We may be able to
+ *	fix that later on. Also ensure we do not do UDMA on WDC drives
+ */
+
+static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
+{
+	char model_num[ATA_ID_PROD_LEN + 1];
+	/* No DMA on anything but a disk for now */
+	if (adev->class != ATA_DEV_ATA)
+		mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+	if (strstr(model_num, "WDC"))
+		return mask &= ~ATA_MASK_UDMA;
+	return ata_pci_default_filter(adev, mask);
+}
+
+/**
+ *	ali_fifo_control	-	FIFO manager
+ *	@ap: ALi channel to control
+ *	@adev: device for FIFO control
+ *	@on: 0 for off 1 for on
+ *
+ *	Enable or disable the FIFO on a given device. Because of the way the
+ *	ALi FIFO works it provides a boost on ATA disk but can be confused by
+ *	ATAPI and we must therefore manage it.
+ */
+
+static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio_fifo = 0x54 + ap->port_no;
+	u8 fifo;
+	int shift = 4 * adev->devno;
+
+	/* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
+	   0x00. Not all the docs agree but the behaviour we now use is the
+	   one stated in the BIOS Programming Guide */
+
+	pci_read_config_byte(pdev, pio_fifo, &fifo);
+	fifo &= ~(0x0F << shift);
+	if (on)
+		fifo |= (on << shift);
+	pci_write_config_byte(pdev, pio_fifo, fifo);
+}
+
+/**
+ *	ali_program_modes	-	load mode registers
+ *	@ap: ALi channel to load
+ *	@adev: Device the timing is for
+ *	@cmd: Command timing
+ *	@data: Data timing
+ *	@ultra: UDMA timing or zero for off
+ *
+ *	Loads the timing registers for cmd/data and disable UDMA if
+ *	ultra is zero. If ultra is set then load and enable the UDMA
+ *	timing but do not touch the command/data timing.
+ */
+
+static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int cas = 0x58 + 4 * ap->port_no;	/* Command timing */
+	int cbt = 0x59 + 4 * ap->port_no;	/* Command timing */
+	int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
+	int udmat = 0x56 + ap->port_no;	/* UDMA timing */
+	int shift = 4 * adev->devno;
+	u8 udma;
+
+	if (t != NULL) {
+		t->setup = FIT(t->setup, 1, 8) & 7;
+		t->act8b = FIT(t->act8b, 1, 8) & 7;
+		t->rec8b = FIT(t->rec8b, 1, 16) & 15;
+		t->active = FIT(t->active, 1, 8) & 7;
+		t->recover = FIT(t->recover, 1, 16) & 15;
+
+		pci_write_config_byte(pdev, cas, t->setup);
+		pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
+		pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
+	}
+
+	/* Set up the UDMA enable */
+	pci_read_config_byte(pdev, udmat, &udma);
+	udma &= ~(0x0F << shift);
+	udma |= ultra << shift;
+	pci_write_config_byte(pdev, udmat, udma);
+}
+
+/**
+ *	ali_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the ALi registers for PIO mode. FIXME: add timings for
+ *	PIO5.
+ */
+
+static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	struct ata_timing t;
+	unsigned long T =  1000000000 / 33333;	/* PCI clock based */
+
+	ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
+	if (pair) {
+		struct ata_timing p;
+		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+		if (pair->dma_mode) {
+			ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+		}
+	}
+
+	/* PIO FIFO is only permitted on ATA disk */
+	if (adev->class != ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x00);
+	ali_program_modes(ap, adev, &t, 0);
+	if (adev->class == ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x05);
+
+}
+
+/**
+ *	ali_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	FIXME: MWDMA timings
+ */
+
+static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
+	struct ata_device *pair = ata_dev_pair(adev);
+	struct ata_timing t;
+	unsigned long T =  1000000000 / 33333;	/* PCI clock based */
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+
+	if (adev->class == ATA_DEV_ATA)
+		ali_fifo_control(ap, adev, 0x08);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
+		if (adev->dma_mode >= XFER_UDMA_3) {
+			u8 reg4b;
+			pci_read_config_byte(pdev, 0x4B, &reg4b);
+			reg4b |= 1;
+			pci_write_config_byte(pdev, 0x4B, reg4b);
+		}
+	} else {
+		ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
+		if (pair) {
+			struct ata_timing p;
+			ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+			if (pair->dma_mode) {
+				ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+				ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+			}
+		}
+		ali_program_modes(ap, adev, &t, 0);
+	}
+}
+
+/**
+ *	ali_lock_sectors	-	Keep older devices to 255 sector mode
+ *	@adev: Device
+ *
+ *	Called during the bus probe for each device that is found. We use
+ *	this call to lock the sector count of the device to 255 or less on
+ *	older ALi controllers. If we didn't do this then large I/O's would
+ *	require LBA48 commands which the older ALi requires are issued by
+ *	slower PIO methods
+ */
+
+static void ali_lock_sectors(struct ata_device *adev)
+{
+	adev->max_sectors = 255;
+}
+
+static struct scsi_host_template ali_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/*
+ *	Port operations for PIO only ALi
+ */
+
+static struct ata_port_operations ali_early_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= ali_set_piomode,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Port operations for DMA capable ALi without cable
+ *	detect
+ */
+static struct ata_port_operations ali_20_port_ops = {
+	.port_disable	= ata_port_disable,
+
+	.set_piomode	= ali_set_piomode,
+	.set_dmamode	= ali_set_dmamode,
+	.mode_filter	= ali_20_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+	.dev_config	= ali_lock_sectors,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect
+ */
+static struct ata_port_operations ali_c2_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= ali_set_piomode,
+	.set_dmamode	= ali_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+	.dev_config	= ali_lock_sectors,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ali_c2_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect and LBA48
+ */
+static struct ata_port_operations ali_c5_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= ali_set_piomode,
+	.set_dmamode	= ali_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ali_c2_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+
+/**
+ *	ali_init_chipset	-	chip setup function
+ *	@pdev: PCI device of ATA controller
+ *
+ *	Perform the setup on the device that must be done both at boot
+ *	and at resume time.
+ */
+
+static void ali_init_chipset(struct pci_dev *pdev)
+{
+	u8 rev, tmp;
+	struct pci_dev *north, *isa_bridge;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+
+	/*
+	 * The chipset revision selects the driver operations and
+	 * mode data.
+	 */
+
+	if (rev >= 0x20 && rev < 0xC2) {
+		/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+		pci_read_config_byte(pdev, 0x4B, &tmp);
+		/* Clear CD-ROM DMA write bit */
+		tmp &= 0x7F;
+		pci_write_config_byte(pdev, 0x4B, tmp);
+	} else if (rev >= 0xC2) {
+		/* Enable cable detection logic */
+		pci_read_config_byte(pdev, 0x4B, &tmp);
+		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
+	}
+	north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+	if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
+		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
+		   Set the south bridge enable bit */
+		pci_read_config_byte(isa_bridge, 0x79, &tmp);
+		if (rev == 0xC2)
+			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
+		else if (rev > 0xC2 && rev < 0xC5)
+			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
+	}
+	if (rev >= 0x20) {
+		/*
+		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
+		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
+		 * via 0x54/55.
+		 */
+		pci_read_config_byte(pdev, 0x53, &tmp);
+		if (rev <= 0x20)
+			tmp &= ~0x02;
+		if (rev >= 0xc7)
+			tmp |= 0x03;
+		else
+			tmp |= 0x01;	/* CD_ROM enable for DMA */
+		pci_write_config_byte(pdev, 0x53, tmp);
+	}
+	pci_dev_put(isa_bridge);
+	pci_dev_put(north);
+	ata_pci_clear_simplex(pdev);
+}
+/**
+ *	ali_init_one		-	discovery callback
+ *	@pdev: PCI device ID
+ *	@id: PCI table info
+ *
+ *	An ALi IDE interface has been discovered. Figure out what revision
+ *	and perform configuration work before handing it to the ATA layer
+ */
+
+static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_early = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &ali_early_port_ops
+	};
+	/* Revision 0x20 added DMA */
+	static const struct ata_port_info info_20 = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &ali_20_port_ops
+	};
+	/* Revision 0x20 with support logic added UDMA */
+	static const struct ata_port_info info_20_udma = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,	/* UDMA33 */
+		.port_ops = &ali_20_port_ops
+	};
+	/* Revision 0xC2 adds UDMA66 */
+	static const struct ata_port_info info_c2 = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x1f,
+		.port_ops = &ali_c2_port_ops
+	};
+	/* Revision 0xC3 is UDMA100 */
+	static const struct ata_port_info info_c3 = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &ali_c2_port_ops
+	};
+	/* Revision 0xC4 is UDMA133 */
+	static const struct ata_port_info info_c4 = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &ali_c2_port_ops
+	};
+	/* Revision 0xC5 is UDMA133 with LBA48 DMA */
+	static const struct ata_port_info info_c5 = {
+		.sht = &ali_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &ali_c5_port_ops
+	};
+
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	u8 rev, tmp;
+	struct pci_dev *isa_bridge;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+
+	/*
+	 * The chipset revision selects the driver operations and
+	 * mode data.
+	 */
+
+	if (rev < 0x20) {
+		ppi[0] = &info_early;
+	} else if (rev < 0xC2) {
+        	ppi[0] = &info_20;
+	} else if (rev == 0xC2) {
+        	ppi[0] = &info_c2;
+	} else if (rev == 0xC3) {
+        	ppi[0] = &info_c3;
+	} else if (rev == 0xC4) {
+        	ppi[0] = &info_c4;
+	} else
+        	ppi[0] = &info_c5;
+
+	ali_init_chipset(pdev);
+
+	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+	if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
+		/* Are we paired with a UDMA capable chip */
+		pci_read_config_byte(isa_bridge, 0x5E, &tmp);
+		if ((tmp & 0x1E) == 0x12)
+	        	ppi[0] = &info_20_udma;
+		pci_dev_put(isa_bridge);
+	}
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int ali_reinit_one(struct pci_dev *pdev)
+{
+	ali_init_chipset(pdev);
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id ali[] = {
+	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
+	{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
+
+	{ },
+};
+
+static struct pci_driver ali_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ali,
+	.probe 		= ali_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ali_reinit_one,
+#endif
+};
+
+static int __init ali_init(void)
+{
+	return pci_register_driver(&ali_pci_driver);
+}
+
+
+static void __exit ali_exit(void)
+{
+	pci_unregister_driver(&ali_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ALi PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ali);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ali_init);
+module_exit(ali_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_amd.c linux-2.6.18.x86_64.p4/drivers/ata/pata_amd.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_amd.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_amd.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,729 @@
+/*
+ * pata_amd.c 	- AMD PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ *  Based on pata-sil680. Errata information is taken from data sheets
+ *  and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
+ *  claimed by sata-nv.c.
+ *
+ *  TODO:
+ *	Variable system clock when/if it makes sense
+ *	Power management on ports
+ *
+ *
+ *  Documentation publically available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_amd"
+#define DRV_VERSION "0.3.8"
+
+/**
+ *	timing_setup		-	shared timing computation and load
+ *	@ap: ATA port being set up
+ *	@adev: drive being configured
+ *	@offset: port offset
+ *	@speed: target speed
+ *	@clock: clock multiplier (number of times 33MHz for this part)
+ *
+ *	Perform the actual timing set up for Nvidia or AMD PATA devices.
+ *	The actual devices vary so they all call into this helper function
+ *	providing the clock multipler and offset (because AMD and Nvidia put
+ *	the ports at different locations).
+ */
+
+static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
+{
+	static const unsigned char amd_cyc2udma[] = {
+		6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *peer = ata_dev_pair(adev);
+	int dn = ap->port_no * 2 + adev->devno;
+	struct ata_timing at, apeer;
+	int T, UT;
+	const int amd_clock = 33333;	/* KHz. */
+	u8 t;
+
+	T = 1000000000 / amd_clock;
+	UT = T / min_t(int, max_t(int, clock, 1), 2);
+
+	if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
+		dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
+		return;
+	}
+
+	if (peer) {
+		/* This may be over conservative */
+		if (peer->dma_mode) {
+			ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
+			ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+		}
+		ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
+		ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+	}
+
+	if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
+	if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
+
+	/*
+	 *	Now do the setup work
+	 */
+
+	/* Configure the address set up timing */
+	pci_read_config_byte(pdev, offset + 0x0C, &t);
+	t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
+	pci_write_config_byte(pdev, offset + 0x0C , t);
+
+	/* Configure the 8bit I/O timing */
+	pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
+		((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
+
+	/* Drive timing */
+	pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
+		((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
+
+	switch (clock) {
+		case 1:
+		t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
+		break;
+
+		case 2:
+		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
+		break;
+
+		case 3:
+		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
+		break;
+
+		case 4:
+		t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
+		break;
+
+		default:
+			return;
+	}
+
+	/* UDMA timing */
+	pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
+}
+
+/**
+ *	amd_probe_init		-	perform reset handling
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Reset sequence checking enable bits to see which ports are
+ *	active.
+ */
+
+static int amd_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits amd_enable_bits[] = {
+		{ 0x40, 1, 0x02, 0x02 },
+		{ 0x40, 1, 0x01, 0x01 }
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+static void amd_error_handler(struct ata_port *ap)
+{
+	return ata_bmdma_drive_eh(ap, amd_pre_reset,
+				      ata_std_softreset, NULL,
+				      ata_std_postreset);
+}
+
+static int amd_cable_detect(struct ata_port *ap)
+{
+	static const u32 bitmask[2] = {0x03, 0x0C};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+
+	pci_read_config_byte(pdev, 0x42, &ata66);
+	if (ata66 & bitmask[ap->port_no])
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	amd33_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the AMD registers for PIO mode.
+ */
+
+static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
+}
+
+static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
+}
+
+static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
+}
+
+static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
+}
+
+/**
+ *	amd33_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the AMD and Nvidia
+ *	chipset.
+ */
+
+static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
+}
+
+static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
+}
+
+static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
+}
+
+static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
+}
+
+
+/**
+ *	nv_probe_init	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection. The BIOS stores this in PCI config
+ *	space for us.
+ */
+
+static int nv_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits nv_enable_bits[] = {
+		{ 0x50, 1, 0x02, 0x02 },
+		{ 0x50, 1, 0x01, 0x01 }
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+static void nv_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, nv_pre_reset,
+			       ata_std_softreset, NULL,
+			       ata_std_postreset);
+}
+
+static int nv_cable_detect(struct ata_port *ap)
+{
+	static const u8 bitmask[2] = {0x03, 0x0C};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 ata66;
+	u16 udma;
+	int cbl;
+
+	pci_read_config_byte(pdev, 0x52, &ata66);
+	if (ata66 & bitmask[ap->port_no])
+		cbl = ATA_CBL_PATA80;
+	else
+		cbl = ATA_CBL_PATA40;
+
+ 	/* We now have to double check because the Nvidia boxes BIOS
+ 	   doesn't always set the cable bits but does set mode bits */
+ 	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
+ 	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
+		cbl = ATA_CBL_PATA80;
+	return cbl;
+}
+
+/**
+ *	nv100_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the AMD registers for PIO mode.
+ */
+
+static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
+}
+
+static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
+}
+
+/**
+ *	nv100_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the AMD and Nvidia
+ *	chipset.
+ */
+
+static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
+}
+
+static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
+}
+
+static struct scsi_host_template amd_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations amd33_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= amd33_set_piomode,
+	.set_dmamode	= amd33_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= amd_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations amd66_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= amd66_set_piomode,
+	.set_dmamode	= amd66_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= amd_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_unknown,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations amd100_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= amd100_set_piomode,
+	.set_dmamode	= amd100_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= amd_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_unknown,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations amd133_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= amd133_set_piomode,
+	.set_dmamode	= amd133_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= amd_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= amd_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations nv100_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= nv100_set_piomode,
+	.set_dmamode	= nv100_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= nv_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= nv_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations nv133_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= nv133_set_piomode,
+	.set_dmamode	= nv133_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= nv_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= nv_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[10] = {
+		{	/* 0: AMD 7401 */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,	/* No SWDMA */
+			.udma_mask = 0x07,	/* UDMA 33 */
+			.port_ops = &amd33_port_ops
+		},
+		{	/* 1: Early AMD7409 - no swdma */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x1f,	/* UDMA 66 */
+			.port_ops = &amd66_port_ops
+		},
+		{	/* 2: AMD 7409, no swdma errata */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x1f,	/* UDMA 66 */
+			.port_ops = &amd66_port_ops
+		},
+		{	/* 3: AMD 7411 */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,	/* UDMA 100 */
+			.port_ops = &amd100_port_ops
+		},
+		{	/* 4: AMD 7441 */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,	/* UDMA 100 */
+			.port_ops = &amd100_port_ops
+		},
+		{	/* 5: AMD 8111*/
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x7f,	/* UDMA 133, no swdma */
+			.port_ops = &amd133_port_ops
+		},
+		{	/* 6: AMD 8111 UDMA 100 (Serenade) */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,	/* UDMA 100, no swdma */
+			.port_ops = &amd133_port_ops
+		},
+		{	/* 7: Nvidia Nforce */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,	/* UDMA 100 */
+			.port_ops = &nv100_port_ops
+		},
+		{	/* 8: Nvidia Nforce2 and later */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x7f,	/* UDMA 133, no swdma */
+			.port_ops = &nv133_port_ops
+		},
+		{	/* 9: AMD CS5536 (Geode companion) */
+			.sht = &amd_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,	/* UDMA 100 */
+			.port_ops = &amd100_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	static int printed_version;
+	int type = id->driver_data;
+	u8 rev;
+	u8 fifo;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	pci_read_config_byte(pdev, 0x41, &fifo);
+
+	/* Check for AMD7409 without swdma errata and if found adjust type */
+	if (type == 1 && rev > 0x7)
+		type = 2;
+
+	/* Check for AMD7411 */
+	if (type == 3)
+		/* FIFO is broken */
+		pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
+	else
+		pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
+
+	/* Serenade ? */
+	if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
+			 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
+		type = 6;	/* UDMA 100 only */
+
+	if (type < 3)
+		ata_pci_clear_simplex(pdev);
+
+	/* And fire it up */
+	ppi[0] = &info[type];
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int amd_reinit_one(struct pci_dev *pdev)
+{
+	if (pdev->vendor == PCI_VENDOR_ID_AMD) {
+		u8 fifo;
+		pci_read_config_byte(pdev, 0x41, &fifo);
+		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
+			/* FIFO is broken */
+			pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
+		else
+			pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
+		if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
+		    pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
+		    	ata_pci_clear_simplex(pdev);
+	}
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id amd[] = {
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_COBRA_7401),		0 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7409),		1 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_VIPER_7411),		3 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_OPUS_7441),		4 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_8111_IDE),		5 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_IDE),	7 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE),	8 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
+
+	{ },
+};
+
+static struct pci_driver amd_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= amd,
+	.probe 		= amd_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= amd_reinit_one,
+#endif
+};
+
+static int __init amd_init(void)
+{
+	return pci_register_driver(&amd_pci_driver);
+}
+
+static void __exit amd_exit(void)
+{
+	pci_unregister_driver(&amd_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, amd);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(amd_init);
+module_exit(amd_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_artop.c linux-2.6.18.x86_64.p4/drivers/ata/pata_artop.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_artop.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_artop.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,525 @@
+/*
+ *    pata_artop.c - ARTOP ATA controller driver
+ *
+ *	(C) 2006 Red Hat <alan@redhat.com>
+ *
+ *    Based in part on drivers/ide/pci/aec62xx.c
+ *	Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
+ *	865/865R fixes for Macintosh card version from a patch to the old
+ *		driver by Thibaut VARENE <varenet@parisc-linux.org>
+ *	When setting the PCI latency we must set 0x80 or higher for burst
+ *		performance Alessandro Zummo <alessandro.zummo@towertech.it>
+ *
+ *	TODO
+ *	850 serialization once the core supports it
+ *	Investigate no_dsc on 850R
+ *	Clock detect
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_artop"
+#define DRV_VERSION	"0.4.3"
+
+/*
+ *	The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
+ *	get PCI bus speed functionality we leave this as 0. Its a variable
+ *	for when we get the functionality and also for folks wanting to
+ *	test stuff.
+ */
+
+static int clock = 0;
+
+static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	const struct pci_bits artop_enable_bits[] = {
+		{ 0x4AU, 1U, 0x02UL, 0x02UL },	/* port 0 */
+		{ 0x4AU, 1U, 0x04UL, 0x04UL },	/* port 1 */
+	};
+
+	if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	artop6210_error_handler - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, artop6210_pre_reset,
+				    ata_std_softreset, NULL,
+				    ata_std_postreset);
+}
+
+/**
+ *	artop6260_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	The ARTOP hardware reports the cable detect bits in register 0x49.
+ *	Nothing complicated needed here.
+ */
+
+static int artop6260_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits artop_enable_bits[] = {
+		{ 0x4AU, 1U, 0x02UL, 0x02UL },	/* port 0 */
+		{ 0x4AU, 1U, 0x04UL, 0x04UL },	/* port 1 */
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	/* Odd numbered device ids are the units with enable bits (the -R cards) */
+	if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	artop6260_cable_detect	-	identify cable type
+ *	@ap: Port
+ *
+ *	Identify the cable type for the ARTOP interface in question
+ */
+
+static int artop6260_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+	pci_read_config_byte(pdev, 0x49, &tmp);
+	if (tmp & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	artop6260_error_handler - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, artop6260_pre_reset,
+				    ata_std_softreset, NULL,
+				    ata_std_postreset);
+}
+
+/**
+ *	artop6210_load_piomode - Load a set of PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device
+ *	@pio: PIO mode
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	is used both to set PIO timings in PIO mode and also to set the
+ *	matching PIO clocking for UDMA, as well as the MWDMA timings.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	const u16 timing[2][5] = {
+		{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
+		{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
+
+	};
+	/* Load the PIO timing active/recovery bits */
+	pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
+}
+
+/**
+ *	artop6210_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space. For
+ *	ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ *	the event UDMA is used the later call to set_dmamode will set the
+ *	bits as required.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	u8 ultra;
+
+	artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+	/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+	pci_read_config_byte(pdev, 0x54, &ultra);
+	ultra &= ~(3 << (2 * dn));
+	pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ *	artop6260_load_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *	@pio: PIO mode
+ *
+ *	Set PIO mode for device, in host controller PCI config space. The
+ *	ARTOP6260 and relatives store the timing data differently.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	const u8 timing[2][5] = {
+		{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
+		{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
+
+	};
+	/* Load the PIO timing active/recovery bits */
+	pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
+}
+
+/**
+ *	artop6260_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space. For
+ *	ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ *	the event UDMA is used the later call to set_dmamode will set the
+ *	bits as required.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	u8 ultra;
+
+	artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+	/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+	pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+	ultra &= ~(7 << (4  * adev->devno));	/* One nibble per drive */
+	pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+/**
+ *	artop6210_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set DMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio;
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int dn = adev->devno + 2 * ap->port_no;
+	u8 ultra;
+
+	if (adev->dma_mode == XFER_MW_DMA_0)
+		pio = 1;
+	else
+		pio = 4;
+
+	/* Load the PIO timing active/recovery bits */
+	artop6210_load_piomode(ap, adev, pio);
+
+	pci_read_config_byte(pdev, 0x54, &ultra);
+	ultra &= ~(3 << (2 * dn));
+
+	/* Add ultra DMA bits if in UDMA mode */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
+		if (mode == 0)
+			mode = 1;
+		ultra |= (mode << (2 * dn));
+	}
+	pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ *	artop6260_set_dmamode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring
+ *
+ *	Set DMA mode for device, in host controller PCI config space. The
+ *	ARTOP6260 and relatives store the timing data differently.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	u8 ultra;
+
+	if (adev->dma_mode == XFER_MW_DMA_0)
+		pio = 1;
+	else
+		pio = 4;
+
+	/* Load the PIO timing active/recovery bits */
+	artop6260_load_piomode(ap, adev, pio);
+
+	/* Add ultra DMA bits if in UDMA mode */
+	pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+	ultra &= ~(7 << (4  * adev->devno));	/* One nibble per drive */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
+		if (mode == 0)
+			mode = 1;
+		ultra |= (mode << (4 * adev->devno));
+	}
+	pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+static struct scsi_host_template artop_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations artop6210_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= artop6210_set_piomode,
+	.set_dmamode		= artop6210_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= artop6210_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations artop6260_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= artop6260_set_piomode,
+	.set_dmamode		= artop6260_set_dmamode,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= artop6260_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= artop6260_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	artop_init_one - Register ARTOP ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in artop_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static int printed_version;
+	static const struct ata_port_info info_6210 = {
+		.sht		= &artop_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= ATA_UDMA2,
+		.port_ops	= &artop6210_ops,
+	};
+	static const struct ata_port_info info_626x = {
+		.sht		= &artop_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= ATA_UDMA4,
+		.port_ops	= &artop6260_ops,
+	};
+	static const struct ata_port_info info_626x_fast = {
+		.sht		= &artop_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= ATA_UDMA5,
+		.port_ops	= &artop6260_ops,
+	};
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	if (id->driver_data == 0) {	/* 6210 variant */
+		ppi[0] = &info_6210;
+		ppi[1] = &ata_dummy_port_info;
+		/* BIOS may have left us in UDMA, clear it before libata probe */
+		pci_write_config_byte(pdev, 0x54, 0);
+		/* For the moment (also lacks dsc) */
+		printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n");
+		printk(KERN_WARNING "Secondary ATA ports will not be activated.\n");
+	}
+	else if (id->driver_data == 1)	/* 6260 */
+		ppi[0] = &info_626x;
+	else if (id->driver_data == 2)	{ /* 6260 or 6260 + fast */
+		unsigned long io = pci_resource_start(pdev, 4);
+		u8 reg;
+
+		ppi[0] = &info_626x;
+		if (inb(io) & 0x10)
+			ppi[0] = &info_626x_fast;
+		/* Mac systems come up with some registers not set as we
+		   will need them */
+
+		/* Clear reset & test bits */
+		pci_read_config_byte(pdev, 0x49, &reg);
+		pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
+
+		/* PCI latency must be > 0x80 for burst mode, tweak it
+		 * if required.
+		 */
+		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
+		if (reg <= 0x80)
+			pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
+
+		/* Enable IRQ output and burst mode */
+		pci_read_config_byte(pdev, 0x4a, &reg);
+		pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+
+	}
+
+	BUG_ON(ppi[0] == NULL);
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id artop_pci_tbl[] = {
+	{ PCI_VDEVICE(ARTOP, 0x0005), 0 },
+	{ PCI_VDEVICE(ARTOP, 0x0006), 1 },
+	{ PCI_VDEVICE(ARTOP, 0x0007), 1 },
+	{ PCI_VDEVICE(ARTOP, 0x0008), 2 },
+	{ PCI_VDEVICE(ARTOP, 0x0009), 2 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver artop_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= artop_pci_tbl,
+	.probe			= artop_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static int __init artop_init(void)
+{
+	return pci_register_driver(&artop_pci_driver);
+}
+
+static void __exit artop_exit(void)
+{
+	pci_unregister_driver(&artop_pci_driver);
+}
+
+module_init(artop_init);
+module_exit(artop_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_atiixp.c linux-2.6.18.x86_64.p4/drivers/ata/pata_atiixp.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_atiixp.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_atiixp.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,321 @@
+/*
+ * pata_atiixp.c 	- ATI PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based on
+ *
+ *  linux/drivers/ide/pci/atiixp.c	Version 0.01-bart2	Feb. 26, 2004
+ *
+ *  Copyright (C) 2003 ATI Inc. <hyu@ati.com>
+ *  Copyright (C) 2004 Bartlomiej Zolnierkiewicz
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_atiixp"
+#define DRV_VERSION "0.4.5"
+
+enum {
+	ATIIXP_IDE_PIO_TIMING	= 0x40,
+	ATIIXP_IDE_MWDMA_TIMING	= 0x44,
+	ATIIXP_IDE_PIO_CONTROL	= 0x48,
+	ATIIXP_IDE_PIO_MODE	= 0x4a,
+	ATIIXP_IDE_UDMA_CONTROL	= 0x54,
+	ATIIXP_IDE_UDMA_MODE 	= 0x56
+};
+
+static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits atiixp_enable_bits[] = {
+		{ 0x48, 1, 0x01, 0x00 },
+		{ 0x48, 1, 0x08, 0x00 }
+	};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+static void atiixp_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL,   ata_std_postreset);
+}
+
+static int atiixp_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 udma;
+
+	/* Hack from drivers/ide/pci. Really we want to know how to do the
+	   raw detection not play follow the bios mode guess */
+	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
+	if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
+		return  ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	atiixp_set_pio_timing	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called by both the pio and dma setup functions to set the controller
+ *	timings for PIO transfers. We must load both the mode number and
+ *	timing values into the controller.
+ */
+
+static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = 2 * ap->port_no + adev->devno;
+
+	/* Check this is correct - the order is odd in both drivers */
+	int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+	u16 pio_mode_data, pio_timing_data;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
+	pio_mode_data &= ~(0x7 << (4 * dn));
+	pio_mode_data |= pio << (4 * dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
+
+	pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
+	pio_mode_data &= ~(0xFF << timing_shift);
+	pio_mode_data |= (pio_timings[pio] << timing_shift);
+	pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
+}
+
+/**
+ *	atiixp_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. We use a shared helper for this
+ *	as the DMA setup must also adjust the PIO timing information.
+ */
+
+static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	atiixp_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup. We use timing tables for most
+ *	modes but must tune an appropriate PIO mode to match.
+ */
+
+static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dma = adev->dma_mode;
+	int dn = 2 * ap->port_no + adev->devno;
+	int wanted_pio;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		u16 udma_mode_data;
+
+		dma -= XFER_UDMA_0;
+
+		pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
+		udma_mode_data &= ~(0x7 << (4 * dn));
+		udma_mode_data |= dma << (4 * dn);
+		pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
+	} else {
+		u16 mwdma_timing_data;
+		/* Check this is correct - the order is odd in both drivers */
+		int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+
+		dma -= XFER_MW_DMA_0;
+
+		pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
+		mwdma_timing_data &= ~(0xFF << timing_shift);
+		mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
+		pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
+	}
+	/*
+	 *	We must now look at the PIO mode situation. We may need to
+	 *	adjust the PIO mode to keep the timings acceptable
+	 */
+	 if (adev->dma_mode >= XFER_MW_DMA_2)
+	 	wanted_pio = 4;
+	else if (adev->dma_mode == XFER_MW_DMA_1)
+		wanted_pio = 3;
+	else if (adev->dma_mode == XFER_MW_DMA_0)
+		wanted_pio = 0;
+	else BUG();
+
+	if (adev->pio_mode != wanted_pio)
+		atiixp_set_pio_timing(ap, adev, wanted_pio);
+}
+
+/**
+ *	atiixp_bmdma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	When DMA begins we need to ensure that the UDMA control
+ *	register for the channel is correctly set.
+ */
+
+static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = (2 * ap->port_no) + adev->devno;
+	u16 tmp16;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+	if (adev->dma_mode >= XFER_UDMA_0)
+		tmp16 |= (1 << dn);
+	else
+		tmp16 &= ~(1 << dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	atiixp_dma_stop	-	DMA stop callback
+ *	@qc: Command in progress
+ *
+ *	DMA has completed. Clear the UDMA flag as the next operations will
+ *	be PIO ones not UDMA data transfer.
+ */
+
+static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int dn = (2 * ap->port_no) + qc->dev->devno;
+	u16 tmp16;
+
+	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+	tmp16 &= ~(1 << dn);
+	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+	ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template atiixp_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations atiixp_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= atiixp_set_piomode,
+	.set_dmamode	= atiixp_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= atiixp_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= atiixp_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= atiixp_bmdma_start,
+	.bmdma_stop	= atiixp_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &atiixp_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x06,	/* No MWDMA0 support */
+		.udma_mask = 0x3F,
+		.port_ops = &atiixp_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id atiixp[] = {
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
+
+	{ },
+};
+
+static struct pci_driver atiixp_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= atiixp,
+	.probe 		= atiixp_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.resume		= ata_pci_device_resume,
+	.suspend	= ata_pci_device_suspend,
+#endif
+};
+
+static int __init atiixp_init(void)
+{
+	return pci_register_driver(&atiixp_pci_driver);
+}
+
+
+static void __exit atiixp_exit(void)
+{
+	pci_unregister_driver(&atiixp_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, atiixp);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(atiixp_init);
+module_exit(atiixp_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cmd640.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cmd640.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cmd640.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cmd640.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,307 @@
+/*
+ * pata_cmd640.c 	- CMD640 PCI PATA for new ATA layer
+ *			  (C) 2007 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based upon
+ *  linux/drivers/ide/pci/cmd640.c		Version 1.02  Sep 01, 1996
+ *
+ *  Copyright (C) 1995-1996  Linus Torvalds & authors (see driver)
+ *
+ *	This drives only the PCI version of the controller. If you have a
+ *	VLB one then we have enough docs to support it but you can write
+ *	your own code.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd640"
+#define DRV_VERSION "0.0.5"
+
+struct cmd640_reg {
+	int last;
+	u8 reg58[ATA_MAX_DEVICES];
+};
+
+enum {
+	CFR = 0x50,
+	CNTRL = 0x51,
+	CMDTIM = 0x52,
+	ARTIM0 = 0x53,
+	DRWTIM0 = 0x54,
+	ARTIM23 = 0x57,
+	DRWTIM23 = 0x58,
+	BRST = 0x59
+};
+
+/**
+ *	cmd640_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup.
+ */
+
+static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct cmd640_reg *timing = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+	u8 reg;
+	int arttim = ARTIM0 + 2 * adev->devno;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
+		printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+		return;
+	}
+
+	/* The second channel has shared timings and the setup timing is
+	   messy to switch to merge it for worst case */
+	if (ap->port_no && pair) {
+		struct ata_timing p;
+		ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+		ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
+	}
+
+	/* Make the timings fit */
+	if (t.recover > 16) {
+		t.active += t.recover - 16;
+		t.recover = 16;
+	}
+	if (t.active > 16)
+		t.active = 16;
+
+	/* Now convert the clocks into values we can actually stuff into
+	   the chip */
+
+	if (t.recover > 1)
+		t.recover--;	/* 640B only */
+	else
+		t.recover = 15;
+
+	if (t.setup > 4)
+		t.setup = 0xC0;
+	else
+		t.setup = setup_data[t.setup];
+
+	if (ap->port_no == 0) {
+		t.active &= 0x0F;	/* 0 = 16 */
+
+		/* Load setup timing */
+		pci_read_config_byte(pdev, arttim, &reg);
+		reg &= 0x3F;
+		reg |= t.setup;
+		pci_write_config_byte(pdev, arttim, reg);
+
+		/* Load active/recovery */
+		pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
+	} else {
+		/* Save the shared timings for channel, they will be loaded
+		   by qc_issue_prot. Reloading the setup time is expensive
+		   so we keep a merged one loaded */
+		pci_read_config_byte(pdev, ARTIM23, &reg);
+		reg &= 0x3F;
+		reg |= t.setup;
+		pci_write_config_byte(pdev, ARTIM23, reg);
+		timing->reg58[adev->devno] = (t.active << 4) | t.recover;
+	}
+}
+
+
+/**
+ *	cmd640_qc_issue_prot	-	command preparation hook
+ *	@qc: Command to be issued
+ *
+ *	Channel 1 has shared timings. We must reprogram the
+ *	clock each drive 2/3 switch we do.
+ */
+
+static unsigned int cmd640_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct cmd640_reg *timing = ap->private_data;
+
+	if (ap->port_no != 0 && adev->devno != timing->last) {
+		pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
+		timing->last = adev->devno;
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+/**
+ *	cmd640_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	The CMD640 needs to maintain private data structures so we
+ *	allocate space here.
+ */
+
+static int cmd640_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct cmd640_reg *timing;
+
+	int ret = ata_port_start(ap);
+	if (ret < 0)
+		return ret;
+
+	timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
+	if (timing == NULL)
+		return -ENOMEM;
+	timing->last = -1;	/* Force a load */
+	ap->private_data = timing;
+	return ret;
+}
+
+static struct scsi_host_template cmd640_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cmd640_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cmd640_set_piomode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= cmd640_qc_issue_prot,
+
+	/* In theory this is not needed once we kill the prefetcher */
+	.data_xfer	= ata_data_xfer_noirq,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= cmd640_port_start,
+};
+
+static void cmd640_hardware_init(struct pci_dev *pdev)
+{
+	u8 r;
+	u8 ctrl;
+
+	/* CMD640 detected, commiserations */
+	pci_write_config_byte(pdev, 0x5B, 0x00);
+	/* Get version info */
+	pci_read_config_byte(pdev, CFR, &r);
+	/* PIO0 command cycles */
+	pci_write_config_byte(pdev, CMDTIM, 0);
+	/* 512 byte bursts (sector) */
+	pci_write_config_byte(pdev, BRST, 0x40);
+	/*
+	 * A reporter a long time ago
+	 * Had problems with the data fifo
+	 * So don't run the risk
+	 * Of putting crap on the disk
+	 * For its better just to go slow
+	 */
+	/* Do channel 0 */
+	pci_read_config_byte(pdev, CNTRL, &ctrl);
+	pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0);
+	/* Ditto for channel 1 */
+	pci_read_config_byte(pdev, ARTIM23, &ctrl);
+	ctrl |= 0x0C;
+	pci_write_config_byte(pdev, ARTIM23, ctrl);
+}
+
+static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &cmd640_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &cmd640_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	cmd640_hardware_init(pdev);
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static int cmd640_reinit_one(struct pci_dev *pdev)
+{
+	cmd640_hardware_init(pdev);
+#ifdef CONFIG_PM
+	return ata_pci_device_resume(pdev);
+#else
+	return 0;
+#endif
+}
+
+static const struct pci_device_id cmd640[] = {
+	{ PCI_VDEVICE(CMD, 0x640), 0 },
+	{ },
+};
+
+static struct pci_driver cmd640_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cmd640,
+	.probe 		= cmd640_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+#endif
+	.resume		= cmd640_reinit_one,
+};
+
+static int __init cmd640_init(void)
+{
+	return pci_register_driver(&cmd640_pci_driver);
+}
+
+static void __exit cmd640_exit(void)
+{
+	pci_unregister_driver(&cmd640_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd640);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cmd640_init);
+module_exit(cmd640_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cmd64x.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cmd64x.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cmd64x.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cmd64x.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,514 @@
+/*
+ * pata_cmd64x.c 	- CMD64x PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based upon
+ * linux/drivers/ide/pci/cmd64x.c		Version 1.30	Sept 10, 2002
+ *
+ * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
+ *           Note, this driver is not used at all on other systems because
+ *           there the "BIOS" has done all of the following already.
+ *           Due to massive hardware bugs, UltraDMA is only supported
+ *           on the 646U2 and not on the 646U.
+ *
+ * Copyright (C) 1998		Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 1998		David S. Miller (davem@redhat.com)
+ *
+ * Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
+ *
+ * TODO
+ *	Testing work
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd64x"
+#define DRV_VERSION "0.2.3"
+
+/*
+ * CMD64x specific registers definition.
+ */
+
+enum {
+	CFR 		= 0x50,
+		CFR_INTR_CH0  = 0x02,
+	CNTRL 		= 0x51,
+		CNTRL_DIS_RA0 = 0x40,
+		CNTRL_DIS_RA1 = 0x80,
+		CNTRL_ENA_2ND = 0x08,
+	CMDTIM 		= 0x52,
+	ARTTIM0 	= 0x53,
+	DRWTIM0 	= 0x54,
+	ARTTIM1 	= 0x55,
+	DRWTIM1 	= 0x56,
+	ARTTIM23 	= 0x57,
+		ARTTIM23_DIS_RA2  = 0x04,
+		ARTTIM23_DIS_RA3  = 0x08,
+		ARTTIM23_INTR_CH1 = 0x10,
+	ARTTIM2 	= 0x57,
+	ARTTIM3 	= 0x57,
+	DRWTIM23	= 0x58,
+	DRWTIM2 	= 0x58,
+	BRST 		= 0x59,
+	DRWTIM3 	= 0x5b,
+	BMIDECR0	= 0x70,
+	MRDMODE		= 0x71,
+		MRDMODE_INTR_CH0 = 0x04,
+		MRDMODE_INTR_CH1 = 0x08,
+		MRDMODE_BLK_CH0  = 0x10,
+		MRDMODE_BLK_CH1	 = 0x20,
+	BMIDESR0	= 0x72,
+	UDIDETCR0	= 0x73,
+	DTPR0		= 0x74,
+	BMIDECR1	= 0x78,
+	BMIDECSR	= 0x79,
+	BMIDESR1	= 0x7A,
+	UDIDETCR1	= 0x7B,
+	DTPR1		= 0x7C
+};
+
+static int cmd648_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 r;
+
+	/* Check cable detect bits */
+	pci_read_config_byte(pdev, BMIDECSR, &r);
+	if (r & (1 << ap->port_no))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	cmd64x_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup.
+ */
+
+static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+
+	u8 reg;
+
+	/* Port layout is not logical so use a table */
+	const u8 arttim_port[2][2] = {
+		{ ARTTIM0, ARTTIM1 },
+		{ ARTTIM23, ARTTIM23 }
+	};
+	const u8 drwtim_port[2][2] = {
+		{ DRWTIM0, DRWTIM1 },
+		{ DRWTIM2, DRWTIM3 }
+	};
+
+	int arttim = arttim_port[ap->port_no][adev->devno];
+	int drwtim = drwtim_port[ap->port_no][adev->devno];
+
+
+	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
+		printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+		return;
+	}
+	if (ap->port_no) {
+		/* Slave has shared address setup */
+		struct ata_device *pair = ata_dev_pair(adev);
+
+		if (pair) {
+			struct ata_timing tp;
+			ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
+			ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+		}
+	}
+
+	printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
+		t.active, t.recover, t.setup);
+	if (t.recover > 16) {
+		t.active += t.recover - 16;
+		t.recover = 16;
+	}
+	if (t.active > 16)
+		t.active = 16;
+
+	/* Now convert the clocks into values we can actually stuff into
+	   the chip */
+
+	if (t.recover > 1)
+		t.recover--;
+	else
+		t.recover = 15;
+
+	if (t.setup > 4)
+		t.setup = 0xC0;
+	else
+		t.setup = setup_data[t.setup];
+
+	t.active &= 0x0F;	/* 0 = 16 */
+
+	/* Load setup timing */
+	pci_read_config_byte(pdev, arttim, &reg);
+	reg &= 0x3F;
+	reg |= t.setup;
+	pci_write_config_byte(pdev, arttim, reg);
+
+	/* Load active/recovery */
+	pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
+}
+
+/**
+ *	cmd64x_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup.
+ */
+
+static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 udma_data[] = {
+		0x30, 0x20, 0x10, 0x20, 0x10, 0x00
+	};
+	static const u8 mwdma_data[] = {
+		0x30, 0x20, 0x10
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 regU, regD;
+
+	int pciU = UDIDETCR0 + 8 * ap->port_no;
+	int pciD = BMIDESR0 + 8 * ap->port_no;
+	int shift = 2 * adev->devno;
+
+	pci_read_config_byte(pdev, pciD, &regD);
+	pci_read_config_byte(pdev, pciU, &regU);
+
+	/* DMA bits off */
+	regD &= ~(0x20 << adev->devno);
+	/* DMA control bits */
+	regU &= ~(0x30 << shift);
+	/* DMA timing bits */
+	regU &= ~(0x05 << adev->devno);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		/* Merge thge timing value */
+		regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
+		/* Merge the control bits */
+		regU |= 1 << adev->devno; /* UDMA on */
+		if (adev->dma_mode > 2)	/* 15nS timing */
+			regU |= 4 << adev->devno;
+	} else
+		regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift;
+
+	regD |= 0x20 << adev->devno;
+
+	pci_write_config_byte(pdev, pciU, regU);
+	pci_write_config_byte(pdev, pciD, regD);
+}
+
+/**
+ *	cmd648_dma_stop	-	DMA stop callback
+ *	@qc: Command in progress
+ *
+ *	DMA has completed.
+ */
+
+static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 dma_intr;
+	int dma_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
+	int dma_reg = ap->port_no ? ARTTIM2 : CFR;
+
+	ata_bmdma_stop(qc);
+
+	pci_read_config_byte(pdev, dma_reg, &dma_intr);
+	pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask);
+}
+
+/**
+ *	cmd646r1_dma_stop	-	DMA stop callback
+ *	@qc: Command in progress
+ *
+ *	Stub for now while investigating the r1 quirk in the old driver.
+ */
+
+static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template cmd64x_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cmd64x_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cmd64x_set_piomode,
+	.set_dmamode	= cmd64x_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations cmd646r1_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cmd64x_set_piomode,
+	.set_dmamode	= cmd64x_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= cmd646r1_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations cmd648_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cmd64x_set_piomode,
+	.set_dmamode	= cmd64x_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= cmd648_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= cmd648_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	u32 class_rev;
+
+	static const struct ata_port_info cmd_info[6] = {
+		{	/* CMD 643 - no UDMA */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.port_ops = &cmd64x_port_ops
+		},
+		{	/* CMD 646 with broken UDMA */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.port_ops = &cmd64x_port_ops
+		},
+		{	/* CMD 646 with working UDMA */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA1,
+			.port_ops = &cmd64x_port_ops
+		},
+		{	/* CMD 646 rev 1  */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.port_ops = &cmd646r1_port_ops
+		},
+		{	/* CMD 648 */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &cmd648_port_ops
+		},
+		{	/* CMD 649 */
+			.sht = &cmd64x_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA3,
+			.port_ops = &cmd648_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
+	u8 mrdmode;
+
+	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+	class_rev &= 0xFF;
+
+	if (id->driver_data == 0)	/* 643 */
+		ata_pci_clear_simplex(pdev);
+
+	if (pdev->device == PCI_DEVICE_ID_CMD_646) {
+		/* Does UDMA work ? */
+		if (class_rev > 4)
+			ppi[0] = &cmd_info[2];
+		/* Early rev with other problems ? */
+		else if (class_rev == 1)
+			ppi[0] = &cmd_info[3];
+	}
+
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+	pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+	mrdmode &= ~ 0x30;	/* IRQ set up */
+	mrdmode |= 0x02;	/* Memory read line enable */
+	pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
+	/* Force PIO 0 here.. */
+
+	/* PPC specific fixup copied from old driver */
+#ifdef CONFIG_PPC
+	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int cmd64x_reinit_one(struct pci_dev *pdev)
+{
+	u8 mrdmode;
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+	pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+	mrdmode &= ~ 0x30;	/* IRQ set up */
+	mrdmode |= 0x02;	/* Memory read line enable */
+	pci_write_config_byte(pdev, MRDMODE, mrdmode);
+#ifdef CONFIG_PPC
+	pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id cmd64x[] = {
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 4 },
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 5 },
+
+	{ },
+};
+
+static struct pci_driver cmd64x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cmd64x,
+	.probe 		= cmd64x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cmd64x_reinit_one,
+#endif
+};
+
+static int __init cmd64x_init(void)
+{
+	return pci_register_driver(&cmd64x_pci_driver);
+}
+
+static void __exit cmd64x_exit(void)
+{
+	pci_unregister_driver(&cmd64x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd64x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cmd64x_init);
+module_exit(cmd64x_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5520.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5520.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5520.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5520.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,391 @@
+/*
+ *	IDE tuning and bus mastering support for the CS5510/CS5520
+ *	chipsets
+ *
+ *	The CS5510/CS5520 are slightly unusual devices. Unlike the
+ *	typical IDE controllers they do bus mastering with the drive in
+ *	PIO mode and smarter silicon.
+ *
+ *	The practical upshot of this is that we must always tune the
+ *	drive for the right PIO mode. We must also ignore all the blacklists
+ *	and the drive bus mastering DMA information. Also to confuse matters
+ *	further we can do DMA on PIO only drives.
+ *
+ *	DMA on the 5510 also requires we disable_hlt() during DMA on early
+ *	revisions.
+ *
+ *	*** This driver is strictly experimental ***
+ *
+ *	(c) Copyright Red Hat Inc 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Documentation:
+ *	Not publically available.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_cs5520"
+#define DRV_VERSION	"0.6.5"
+
+struct pio_clocks
+{
+	int address;
+	int assert;
+	int recovery;
+};
+
+static const struct pio_clocks cs5520_pio_clocks[]={
+	{3, 6, 11},
+	{2, 5, 6},
+	{1, 4, 3},
+	{1, 3, 2},
+	{1, 2, 1}
+};
+
+/**
+ *	cs5520_set_timings	-	program PIO timings
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Program the PIO mode timings for the controller according to the pio
+ *	clocking table.
+ */
+
+static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int slave = adev->devno;
+
+	pio -= XFER_PIO_0;
+
+	/* Channel command timing */
+	pci_write_config_byte(pdev, 0x62 + ap->port_no,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+	/* FIXME: should these use address ? */
+	/* Read command timing */
+	pci_write_config_byte(pdev, 0x64 +  4*ap->port_no + slave,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+	/* Write command timing */
+	pci_write_config_byte(pdev, 0x66 +  4*ap->port_no + slave,
+				(cs5520_pio_clocks[pio].recovery << 4) |
+				(cs5520_pio_clocks[pio].assert));
+}
+
+/**
+ *	cs5520_enable_dma	-	turn on DMA bits
+ *
+ *	Turn on the DMA bits for this disk. Needed because the BIOS probably
+ *	has not done the work for us. Belongs in the core SATA code.
+ */
+
+static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
+{
+	/* Set the DMA enable/disable flag */
+	u8 reg = ioread8(ap->ioaddr.bmdma_addr + 0x02);
+	reg |= 1<<(adev->devno + 5);
+	iowrite8(reg, ap->ioaddr.bmdma_addr + 0x02);
+}
+
+/**
+ *	cs5520_set_dmamode	-	program DMA timings
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Program the DMA mode timings for the controller according to the pio
+ *	clocking table. Note that this device sets the DMA timings to PIO
+ *	mode values. This may seem bizarre but the 5520 architecture talks
+ *	PIO mode to the disk and DMA mode to the controller so the underlying
+ *	transfers are PIO timed.
+ */
+
+static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 };
+	cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]);
+	cs5520_enable_dma(ap, adev);
+}
+
+/**
+ *	cs5520_set_piomode	-	program PIO timings
+ *	@ap: ATA port
+ *	@adev: ATA device
+ *
+ *	Program the PIO mode timings for the controller according to the pio
+ *	clocking table. We know pio_mode will equal dma_mode because of the
+ *	CS5520 architecture. At least once we turned DMA on and wrote a
+ *	mode setter.
+ */
+
+static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	cs5520_set_timings(ap, adev, adev->pio_mode);
+}
+
+static struct scsi_host_template cs5520_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cs5520_port_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= cs5520_set_piomode,
+	.set_dmamode		= cs5520_set_dmamode,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct ata_port_info pi = {
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= 0x1f,
+		.port_ops	= &cs5520_port_ops,
+	};
+	const struct ata_port_info *ppi[2];
+	u8 pcicfg;
+	void *iomap[5];
+	struct ata_host *host;
+	struct ata_ioports *ioaddr;
+	int i, rc;
+
+	/* IDE port enable bits */
+	pci_read_config_byte(pdev, 0x60, &pcicfg);
+
+	/* Check if the ATA ports are enabled */
+	if ((pcicfg & 3) == 0)
+		return -ENODEV;
+
+	ppi[0] = ppi[1] = &ata_dummy_port_info;
+	if (pcicfg & 1)
+		ppi[0] = &pi;
+	if (pcicfg & 2)
+		ppi[1] = &pi;
+
+	if ((pcicfg & 0x40) == 0) {
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "DMA mode disabled. Enabling.\n");
+		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+	}
+
+	pi.mwdma_mask = id->driver_data;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+
+	/* Perform set up for DMA */
+	if (pci_enable_device_bars(pdev, 1<<2)) {
+		printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
+		return -ENODEV;
+	}
+
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
+		return -ENODEV;
+	}
+	if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
+		return -ENODEV;
+	}
+
+	/* Map IO ports and initialize host accordingly */
+	iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
+	iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
+	iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
+	iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
+	iomap[4] = pcim_iomap(pdev, 2, 0);
+
+	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
+		return -ENOMEM;
+
+	ioaddr = &host->ports[0]->ioaddr;
+	ioaddr->cmd_addr = iomap[0];
+	ioaddr->ctl_addr = iomap[1];
+	ioaddr->altstatus_addr = iomap[1];
+	ioaddr->bmdma_addr = iomap[4];
+	ata_std_ports(ioaddr);
+
+	ioaddr = &host->ports[1]->ioaddr;
+	ioaddr->cmd_addr = iomap[2];
+	ioaddr->ctl_addr = iomap[3];
+	ioaddr->altstatus_addr = iomap[3];
+	ioaddr->bmdma_addr = iomap[4] + 8;
+	ata_std_ports(ioaddr);
+
+	/* activate the host */
+	pci_set_master(pdev);
+	rc = ata_host_start(host);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < 2; i++) {
+		static const int irq[] = { 14, 15 };
+		struct ata_port *ap = host->ports[0];
+
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
+				      ata_interrupt, 0, DRV_NAME, host);
+		if (rc)
+			return rc;
+	}
+
+	return ata_host_register(host, &cs5520_sht);
+}
+
+/**
+ *	cs5520_remove_one	-	device unload
+ *	@pdev: PCI device being removed
+ *
+ *	Handle an unplug/unload event for a PCI device. Unload the
+ *	PCI driver but do not use the default handler as we manage
+ *	resources ourself and *MUST NOT* disable the device as it has
+ *	other functions.
+ */
+
+static void __devexit cs5520_remove_one(struct pci_dev *pdev)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_detach(host);
+}
+
+#ifdef CONFIG_PM
+/**
+ *	cs5520_reinit_one	-	device resume
+ *	@pdev: PCI device
+ *
+ *	Do any reconfiguration work needed by a resume from RAM. We need
+ *	to restore DMA mode support on BIOSen which disabled it
+ */
+
+static int cs5520_reinit_one(struct pci_dev *pdev)
+{
+	u8 pcicfg;
+	pci_read_config_byte(pdev, 0x60, &pcicfg);
+	if ((pcicfg & 0x40) == 0)
+		pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+	return ata_pci_device_resume(pdev);
+}
+
+/**
+ *	cs5520_pci_device_suspend	-	device suspend
+ *	@pdev: PCI device
+ *
+ *	We have to cut and waste bits from the standard method because
+ *	the 5520 is a bit odd and not just a pure ATA device. As a result
+ *	we must not disable it. The needed code is short and this avoids
+ *	chip specific mess in the core code.
+ */
+
+static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	pci_save_state(pdev);
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+/* For now keep DMA off. We can set it for all but A rev CS5510 once the
+   core ATA code can handle it */
+
+static const struct pci_device_id pata_cs5520[] = {
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
+
+	{ },
+};
+
+static struct pci_driver cs5520_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pata_cs5520,
+	.probe 		= cs5520_init_one,
+	.remove		= cs5520_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= cs5520_pci_device_suspend,
+	.resume		= cs5520_reinit_one,
+#endif
+};
+
+static int __init cs5520_init(void)
+{
+	return pci_register_driver(&cs5520_pci_driver);
+}
+
+static void __exit cs5520_exit(void)
+{
+	pci_unregister_driver(&cs5520_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_cs5520);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5520_init);
+module_exit(cs5520_exit);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5530.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5530.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5530.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5530.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,410 @@
+/*
+ * pata-cs5530.c 	- CS5530 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon cs5530.c by Mark Lord.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME	"pata_cs5530"
+#define DRV_VERSION	"0.7.3"
+
+static void __iomem *cs5530_port_base(struct ata_port *ap)
+{
+	unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
+
+	return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
+}
+
+/**
+ *	cs5530_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the CS5530
+ *	chips.
+ */
+
+static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const unsigned int cs5530_pio_timings[2][5] = {
+		{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
+		{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
+	};
+	void __iomem *base = cs5530_port_base(ap);
+	u32 tuning;
+	int format;
+
+	/* Find out which table to use */
+	tuning = ioread32(base + 0x04);
+	format = (tuning & 0x80000000UL) ? 1 : 0;
+
+	/* Now load the right timing register */
+	if (adev->devno)
+		base += 0x08;
+
+	iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
+}
+
+/**
+ *	cs5530_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	We cannot mix MWDMA and UDMA without reloading timings each switch
+ *	master to slave. We track the last DMA setup in order to minimise
+ *	reloads.
+ */
+
+static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	void __iomem *base = cs5530_port_base(ap);
+	u32 tuning, timing = 0;
+	u8 reg;
+
+	/* Find out which table to use */
+	tuning = ioread32(base + 0x04);
+
+	switch(adev->dma_mode) {
+		case XFER_UDMA_0:
+			timing  = 0x00921250;break;
+		case XFER_UDMA_1:
+			timing  = 0x00911140;break;
+		case XFER_UDMA_2:
+			timing  = 0x00911030;break;
+		case XFER_MW_DMA_0:
+			timing  = 0x00077771;break;
+		case XFER_MW_DMA_1:
+			timing  = 0x00012121;break;
+		case XFER_MW_DMA_2:
+			timing  = 0x00002020;break;
+		default:
+			BUG();
+	}
+	/* Merge in the PIO format bit */
+	timing |= (tuning & 0x80000000UL);
+	if (adev->devno == 0) /* Master */
+		iowrite32(timing, base + 0x04);
+	else {
+		if (timing & 0x00100000)
+			tuning |= 0x00100000;	/* UDMA for both */
+		else
+			tuning &= ~0x00100000;	/* MWDMA for both */
+		iowrite32(tuning, base + 0x04);
+		iowrite32(timing, base + 0x0C);
+	}
+
+	/* Set the DMA capable bit in the BMDMA area */
+	reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	reg |= (1 << (5 + adev->devno));
+	iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+	/* Remember the last DMA setup we did */
+
+	ap->private_data = adev;
+}
+
+/**
+ *	cs5530_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary.  Specifically we have a problem that there is only
+ *	one MWDMA/UDMA bit.
+ */
+
+static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_device *prev = ap->private_data;
+
+	/* See if the DMA settings could be wrong */
+	if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
+		/* Maybe, but do the channels match MWDMA/UDMA ? */
+		if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
+		    (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
+		    	/* Switch the mode bits */
+		    	cs5530_set_dmamode(ap, adev);
+	}
+
+	return ata_qc_issue_prot(qc);
+}
+
+static struct scsi_host_template cs5530_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cs5530_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cs5530_set_piomode,
+	.set_dmamode	= cs5530_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= cs5530_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct dmi_system_id palmax_dmi_table[] = {
+	{
+		.ident = "Palmax PD1100",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
+		},
+	},
+	{ }
+};
+
+static int cs5530_is_palmax(void)
+{
+	if (dmi_check_system(palmax_dmi_table)) {
+		printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
+		return 1;
+	}
+	return 0;
+}
+
+
+/**
+ *	cs5530_init_chip	-	Chipset init
+ *
+ *	Perform the chip initialisation work that is shared between both
+ *	setup and resume paths
+ */
+
+static int cs5530_init_chip(void)
+{
+	struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
+
+	while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
+		switch (dev->device) {
+			case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
+				master_0 = pci_dev_get(dev);
+				break;
+			case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
+				cs5530_0 = pci_dev_get(dev);
+				break;
+		}
+	}
+	if (!master_0) {
+		printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
+		goto fail_put;
+	}
+	if (!cs5530_0) {
+		printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
+		goto fail_put;
+	}
+
+	pci_set_master(cs5530_0);
+	pci_set_mwi(cs5530_0);
+
+	/*
+	 * Set PCI CacheLineSize to 16-bytes:
+	 * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
+	 *
+	 * Note: This value is constant because the 5530 is only a Geode companion
+	 */
+
+	pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
+
+	/*
+	 * Disable trapping of UDMA register accesses (Win98 hack):
+	 * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
+	 */
+
+	pci_write_config_word(cs5530_0, 0xd0, 0x5006);
+
+	/*
+	 * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
+	 * The other settings are what is necessary to get the register
+	 * into a sane state for IDE DMA operation.
+	 */
+
+	pci_write_config_byte(master_0, 0x40, 0x1e);
+
+	/*
+	 * Set max PCI burst size (16-bytes seems to work best):
+	 *	   16bytes: set bit-1 at 0x41 (reg value of 0x16)
+	 *	all others: clear bit-1 at 0x41, and do:
+	 *	  128bytes: OR 0x00 at 0x41
+	 *	  256bytes: OR 0x04 at 0x41
+	 *	  512bytes: OR 0x08 at 0x41
+	 *	 1024bytes: OR 0x0c at 0x41
+	 */
+
+	pci_write_config_byte(master_0, 0x41, 0x14);
+
+	/*
+	 * These settings are necessary to get the chip
+	 * into a sane state for IDE DMA operation.
+	 */
+
+	pci_write_config_byte(master_0, 0x42, 0x00);
+	pci_write_config_byte(master_0, 0x43, 0xc1);
+
+	pci_dev_put(master_0);
+	pci_dev_put(cs5530_0);
+	return 0;
+fail_put:
+	if (master_0)
+		pci_dev_put(master_0);
+	if (cs5530_0)
+		pci_dev_put(cs5530_0);
+	return -ENODEV;
+}
+
+/**
+ *	cs5530_init_one		-	Initialise a CS5530
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Install a driver for the newly found CS5530 companion chip. Most of
+ *	this is just housekeeping. We have to set the chip up correctly and
+ *	turn off various bits of emulation magic.
+ */
+
+static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &cs5530_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,
+		.port_ops = &cs5530_port_ops
+	};
+	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
+	static const struct ata_port_info info_palmax_secondary = {
+		.sht = &cs5530_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &cs5530_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	/* Chip initialisation */
+	if (cs5530_init_chip())
+		return -ENODEV;
+
+	if (cs5530_is_palmax())
+		ppi[1] = &info_palmax_secondary;
+
+	/* Now kick off ATA set up */
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int cs5530_reinit_one(struct pci_dev *pdev)
+{
+	/* If we fail on resume we are doomed */
+	if (cs5530_init_chip())
+		BUG();
+	return ata_pci_device_resume(pdev);
+}
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id cs5530[] = {
+	{ PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
+
+	{ },
+};
+
+static struct pci_driver cs5530_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cs5530,
+	.probe 		= cs5530_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= cs5530_reinit_one,
+#endif
+};
+
+static int __init cs5530_init(void)
+{
+	return pci_register_driver(&cs5530_pci_driver);
+}
+
+static void __exit cs5530_exit(void)
+{
+	pci_unregister_driver(&cs5530_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5530);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5530_init);
+module_exit(cs5530_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5535.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5535.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cs5535.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cs5535.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,283 @@
+/*
+ * pata-cs5535.c 	- CS5535 PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
+ * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
+ * and Alexander Kiausch <alex.kiausch@t-online.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ * TODO
+ *	Review errata to see if serializing is neccessary
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/msr.h>
+
+#define DRV_NAME	"cs5535"
+#define DRV_VERSION	"0.2.12"
+
+/*
+ *	The Geode (Aka Athlon GX now) uses an internal MSR based
+ *	bus system for control. Demented but there you go.
+ */
+
+#define MSR_ATAC_BASE    	0x51300000
+#define ATAC_GLD_MSR_CAP 	(MSR_ATAC_BASE+0)
+#define ATAC_GLD_MSR_CONFIG    (MSR_ATAC_BASE+0x01)
+#define ATAC_GLD_MSR_SMI       (MSR_ATAC_BASE+0x02)
+#define ATAC_GLD_MSR_ERROR     (MSR_ATAC_BASE+0x03)
+#define ATAC_GLD_MSR_PM        (MSR_ATAC_BASE+0x04)
+#define ATAC_GLD_MSR_DIAG      (MSR_ATAC_BASE+0x05)
+#define ATAC_IO_BAR            (MSR_ATAC_BASE+0x08)
+#define ATAC_RESET             (MSR_ATAC_BASE+0x10)
+#define ATAC_CH0D0_PIO         (MSR_ATAC_BASE+0x20)
+#define ATAC_CH0D0_DMA         (MSR_ATAC_BASE+0x21)
+#define ATAC_CH0D1_PIO         (MSR_ATAC_BASE+0x22)
+#define ATAC_CH0D1_DMA         (MSR_ATAC_BASE+0x23)
+#define ATAC_PCI_ABRTERR       (MSR_ATAC_BASE+0x24)
+
+#define ATAC_BM0_CMD_PRIM      0x00
+#define ATAC_BM0_STS_PRIM      0x02
+#define ATAC_BM0_PRD           0x04
+
+#define CS5535_CABLE_DETECT    0x48
+
+#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
+
+/**
+ *	cs5535_cable_detect	-	detect cable type
+ *	@ap: Port to detect on
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for ATA66 capable cable. Return a libata
+ *	cable type.
+ */
+
+static int cs5535_cable_detect(struct ata_port *ap)
+{
+	u8 cable;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
+	if (cable & 1)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	cs5535_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. The CS5535 is pretty clean about all this
+ */
+
+static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 pio_timings[5] = {
+		0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
+	};
+	static const u16 pio_cmd_timings[5] = {
+		0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
+	};
+	u32 reg, dummy;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	int mode = adev->pio_mode - XFER_PIO_0;
+	int cmdmode = mode;
+
+	/* Command timing has to be for the lowest of the pair of devices */
+	if (pair) {
+		int pairmode = pair->pio_mode - XFER_PIO_0;
+		cmdmode = min(mode, pairmode);
+		/* Write the other drive timing register if it changed */
+		if (cmdmode < pairmode)
+			wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
+				pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
+	}
+	/* Write the drive timing register */
+	wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
+		pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
+
+	/* Set the PIO "format 1" bit in the DMA timing register */
+	rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+	wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
+}
+
+/**
+ *	cs5535_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ */
+
+static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 udma_timings[5] = {
+		0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
+	};
+	static const u32 mwdma_timings[3] = {
+		0x7F0FFFF3, 0x7F035352, 0x7F024241
+	};
+	u32 reg, dummy;
+	int mode = adev->dma_mode;
+
+	rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+	reg &= 0x80000000UL;
+	if (mode >= XFER_UDMA_0)
+		reg |= udma_timings[mode - XFER_UDMA_0];
+	else
+		reg |= mwdma_timings[mode - XFER_MW_DMA_0];
+	wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
+}
+
+static struct scsi_host_template cs5535_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cs5535_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cs5535_set_piomode,
+	.set_dmamode	= cs5535_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= cs5535_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	cs5535_init_one		-	Initialise a CS5530
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Install a driver for the newly found CS5530 companion chip. Most of
+ *	this is just housekeeping. We have to set the chip up correctly and
+ *	turn off various bits of emulation magic.
+ */
+
+static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &cs5535_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x1f,
+		.port_ops = &cs5535_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	u32 timings, dummy;
+
+	/* Check the BIOS set the initial timing clock. If not set the
+	   timings for PIO0 */
+	rdmsr(ATAC_CH0D0_PIO, timings, dummy);
+	if (CS5535_BAD_PIO(timings))
+		wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
+	rdmsr(ATAC_CH0D1_PIO, timings, dummy);
+	if (CS5535_BAD_PIO(timings))
+		wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id cs5535[] = {
+	{ PCI_VDEVICE(NS, 0x002D), },
+
+	{ },
+};
+
+static struct pci_driver cs5535_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= cs5535,
+	.probe 		= cs5535_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init cs5535_init(void)
+{
+	return pci_register_driver(&cs5535_pci_driver);
+}
+
+static void __exit cs5535_exit(void)
+{
+	pci_unregister_driver(&cs5535_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5535);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5535_init);
+module_exit(cs5535_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_cypress.c linux-2.6.18.x86_64.p4/drivers/ata/pata_cypress.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_cypress.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_cypress.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,222 @@
+/*
+ * pata_cypress.c 	- Cypress PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based heavily on
+ * linux/drivers/ide/pci/cy82c693.c		Version 0.40	Sep. 10, 2002
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cypress"
+#define DRV_VERSION "0.1.5"
+
+/* here are the offset definitions for the registers */
+
+enum {
+	CY82_IDE_CMDREG		= 0x04,
+	CY82_IDE_ADDRSETUP	= 0x48,
+	CY82_IDE_MASTER_IOR	= 0x4C,
+	CY82_IDE_MASTER_IOW	= 0x4D,
+	CY82_IDE_SLAVE_IOR	= 0x4E,
+	CY82_IDE_SLAVE_IOW	= 0x4F,
+	CY82_IDE_MASTER_8BIT	= 0x50,
+	CY82_IDE_SLAVE_8BIT	= 0x51,
+
+	CY82_INDEX_PORT		= 0x22,
+	CY82_DATA_PORT		= 0x23,
+
+	CY82_INDEX_CTRLREG1	= 0x01,
+	CY82_INDEX_CHANNEL0	= 0x30,
+	CY82_INDEX_CHANNEL1	= 0x31,
+	CY82_INDEX_TIMEOUT	= 0x32
+};
+
+/**
+ *	cy82c693_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup.
+ */
+
+static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_timing t;
+	const unsigned long T = 1000000 / 33;
+	short time_16, time_8;
+	u32 addr;
+
+	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
+		printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
+		return;
+	}
+
+	time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4);
+	time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4);
+
+	if (adev->devno == 0) {
+		pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+		addr &= ~0x0F;	/* Mask bits */
+		addr |= FIT(t.setup, 0, 15);
+
+		pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
+	} else {
+		pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+		addr &= ~0xF0;	/* Mask bits */
+		addr |= (FIT(t.setup, 0, 15) << 4);
+
+		pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
+		pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
+	}
+}
+
+/**
+ *	cy82c693_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the DMA mode setup.
+ */
+
+static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
+
+	/* Be afraid, be very afraid. Magic registers  in low I/O space */
+	outb(reg, 0x22);
+	outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
+
+	/* 0x50 gives the best behaviour on the Alpha's using this chip */
+	outb(CY82_INDEX_TIMEOUT, 0x22);
+	outb(0x50, 0x23);
+}
+
+static struct scsi_host_template cy82c693_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cy82c693_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= cy82c693_set_piomode,
+	.set_dmamode	= cy82c693_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &cy82c693_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &cy82c693_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
+	   For the moment we don't handle the secondary. FIXME */
+
+	if (PCI_FUNC(pdev->devfn) != 1)
+		return -ENODEV;
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id cy82c693[] = {
+	{ PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), },
+
+	{ },
+};
+
+static struct pci_driver cy82c693_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= cy82c693,
+	.probe 		= cy82c693_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init cy82c693_init(void)
+{
+	return pci_register_driver(&cy82c693_pci_driver);
+}
+
+
+static void __exit cy82c693_exit(void)
+{
+	pci_unregister_driver(&cy82c693_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cy82c693);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cy82c693_init);
+module_exit(cy82c693_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_efar.c linux-2.6.18.x86_64.p4/drivers/ata/pata_efar.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_efar.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_efar.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,356 @@
+/*
+ *    pata_efar.c - EFAR PIIX clone controller driver
+ *
+ *	(C) 2005 Red Hat <alan@redhat.com>
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
+ *    Intel ICH controllers the EFAR widened the UDMA mode register bits
+ *    and doesn't require the funky clock selection.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_efar"
+#define DRV_VERSION	"0.4.4"
+
+/**
+ *	efar_pre_reset	-	Enable bits
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for the EFAR ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int efar_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits efar_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+	};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	efar_probe_reset - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void efar_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	efar_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform cable detection for the EFAR ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int efar_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	pci_read_config_byte(pdev, 0x47, &tmp);
+	if (tmp & (2 >> ap->port_no))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	efar_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. The EFAR is a clone so very similar
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 2)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))	/* PIO 3/4 require IORDY */
+		control |= 2;	/* IE enable */
+	/* Intel specifies that the PPE functionality is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	/* Enable PPE, IE and TIME as appropriate */
+
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCF0;
+		idetm_data |= control;
+		idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	} else {
+		int shift = 4 * ap->port_no;
+		u8 slave_data;
+
+		idetm_data &= 0xCC0F;
+		idetm_data |= (control << 4);
+
+		/* Slave timing in seperate register */
+		pci_read_config_byte(dev, 0x44, &slave_data);
+		slave_data &= 0x0F << shift;
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
+		pci_write_config_byte(dev, 0x44, slave_data);
+	}
+
+	idetm_data |= 0x4000;	/* Ensure SITRE is enabled */
+	pci_write_config_word(dev, idetm_port, idetm_data);
+}
+
+/**
+ *	efar_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u8 master_port		= ap->port_no ? 0x42 : 0x40;
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno + 2 * ap->port_no;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(dev, master_port, &master_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma	= adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+
+		udma_enable |= (1 << devid);
+
+		/* Load the UDMA mode number */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(7 << (4 * devid));
+		udma_timing |= udma << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+	} else {
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (adev->devno) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= (0x0F + 0xE1 * ap->port_no);
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, master_port, master_data);
+	}
+	pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct scsi_host_template efar_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations efar_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= efar_set_piomode,
+	.set_dmamode		= efar_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= efar_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= efar_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	efar_init_one - Register EFAR ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in efar_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &efar_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma1-2 */
+		.udma_mask 	= 0x0f, /* UDMA 66 */
+		.port_ops	= &efar_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id efar_pci_tbl[] = {
+	{ PCI_VDEVICE(EFAR, 0x9130), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver efar_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= efar_pci_tbl,
+	.probe			= efar_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init efar_init(void)
+{
+	return pci_register_driver(&efar_pci_driver);
+}
+
+static void __exit efar_exit(void)
+{
+	pci_unregister_driver(&efar_pci_driver);
+}
+
+module_init(efar_init);
+module_exit(efar_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt366.c linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt366.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt366.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt366.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,478 @@
+/*
+ * Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ *
+ *
+ * TODO
+ *	Maybe PLL mode
+ *	Look into engine reset on timeout errors. Should not be
+ *		required.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt366"
+#define DRV_VERSION	"0.6.1"
+
+struct hpt_clock {
+	u8	xfer_speed;
+	u32	timing;
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 4:8    data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 9:12   cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 13:17  cmd_low_time. active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 18:21  udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ *        during task file register access.
+ * 22:24  pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ *        xfer.
+ * 25:27  cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ *        register access.
+ * 28     UDMA enable
+ * 29     DMA enable
+ * 30     PIO_MST enable. if set, the chip is in bus master mode during
+ *        PIO.
+ * 31     FIFO enable.
+ */
+
+static const struct hpt_clock hpt366_40[] = {
+	{	XFER_UDMA_4,	0x900fd943	},
+	{	XFER_UDMA_3,	0x900ad943	},
+	{	XFER_UDMA_2,	0x900bd943	},
+	{	XFER_UDMA_1,	0x9008d943	},
+	{	XFER_UDMA_0,	0x9008d943	},
+
+	{	XFER_MW_DMA_2,	0xa008d943	},
+	{	XFER_MW_DMA_1,	0xa010d955	},
+	{	XFER_MW_DMA_0,	0xa010d9fc	},
+
+	{	XFER_PIO_4,	0xc008d963	},
+	{	XFER_PIO_3,	0xc010d974	},
+	{	XFER_PIO_2,	0xc010d997	},
+	{	XFER_PIO_1,	0xc010d9c7	},
+	{	XFER_PIO_0,	0xc018d9d9	},
+	{	0,		0x0120d9d9	}
+};
+
+static const struct hpt_clock hpt366_33[] = {
+	{	XFER_UDMA_4,	0x90c9a731	},
+	{	XFER_UDMA_3,	0x90cfa731	},
+	{	XFER_UDMA_2,	0x90caa731	},
+	{	XFER_UDMA_1,	0x90cba731	},
+	{	XFER_UDMA_0,	0x90c8a731	},
+
+	{	XFER_MW_DMA_2,	0xa0c8a731	},
+	{	XFER_MW_DMA_1,	0xa0c8a732	},	/* 0xa0c8a733 */
+	{	XFER_MW_DMA_0,	0xa0c8a797	},
+
+	{	XFER_PIO_4,	0xc0c8a731	},
+	{	XFER_PIO_3,	0xc0c8a742	},
+	{	XFER_PIO_2,	0xc0d0a753	},
+	{	XFER_PIO_1,	0xc0d0a7a3	},	/* 0xc0d0a793 */
+	{	XFER_PIO_0,	0xc0d0a7aa	},	/* 0xc0d0a7a7 */
+	{	0,		0x0120a7a7	}
+};
+
+static const struct hpt_clock hpt366_25[] = {
+	{	XFER_UDMA_4,	0x90c98521	},
+	{	XFER_UDMA_3,	0x90cf8521	},
+	{	XFER_UDMA_2,	0x90cf8521	},
+	{	XFER_UDMA_1,	0x90cb8521	},
+	{	XFER_UDMA_0,	0x90cb8521	},
+
+	{	XFER_MW_DMA_2,	0xa0ca8521	},
+	{	XFER_MW_DMA_1,	0xa0ca8532	},
+	{	XFER_MW_DMA_0,	0xa0ca8575	},
+
+	{	XFER_PIO_4,	0xc0ca8521	},
+	{	XFER_PIO_3,	0xc0ca8532	},
+	{	XFER_PIO_2,	0xc0ca8542	},
+	{	XFER_PIO_1,	0xc0d08572	},
+	{	XFER_PIO_0,	0xc0d08585	},
+	{	0,		0x01208585	}
+};
+
+static const char *bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+	"Maxtor 90510D4",
+	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	NULL
+};
+
+static const char *bad_ata66_4[] = {
+	"IBM-DTLA-307075",
+	"IBM-DTLA-307060",
+	"IBM-DTLA-307045",
+	"IBM-DTLA-307030",
+	"IBM-DTLA-307020",
+	"IBM-DTLA-307015",
+	"IBM-DTLA-305040",
+	"IBM-DTLA-305030",
+	"IBM-DTLA-305020",
+	"IC35L010AVER07-0",
+	"IC35L020AVER07-0",
+	"IC35L030AVER07-0",
+	"IC35L040AVER07-0",
+	"IC35L060AVER07-0",
+	"WDC AC310200R",
+	NULL
+};
+
+static const char *bad_ata66_3[] = {
+	"WDC AC310200R",
+	NULL
+};
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	int i = 0;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	while (list[i] != NULL) {
+		if (!strcmp(list[i], model_num)) {
+			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
+				modestr, list[i]);
+			return 1;
+		}
+		i++;
+	}
+	return 0;
+}
+
+/**
+ *	hpt366_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA",  bad_ata33))
+			mask &= ~ATA_MASK_UDMA;
+		if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+			mask &= ~(0x07 << ATA_SHIFT_UDMA);
+		if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+			mask &= ~(0x0F << ATA_SHIFT_UDMA);
+	}
+	return ata_pci_default_filter(adev, mask);
+}
+
+/**
+ *	hpt36x_find_mode	-	reset the hpt36x bus
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided.
+ */
+
+static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = ap->host->private_data;
+
+	while(clocks->xfer_speed) {
+		if (clocks->xfer_speed == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+static int hpt36x_cable_detect(struct ata_port *ap)
+{
+	u8 ata66;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	if (ata66 & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	hpt366_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	if (fast & 0x80) {
+		fast &= ~0x80;
+		pci_write_config_byte(pdev, addr2, fast);
+	}
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt36x_find_mode(ap, adev->pio_mode);
+	mode &= ~0x8000000;	/* No FIFO in PIO */
+	mode &= ~0x30070000;	/* Leave config bits alone */
+	reg &= 0x30070000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt366_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	if (fast & 0x80) {
+		fast &= ~0x80;
+		pci_write_config_byte(pdev, addr2, fast);
+	}
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt36x_find_mode(ap, adev->dma_mode);
+	mode |= 0x8000000;	/* FIFO in MWDMA or UDMA */
+	mode &= ~0xC0000000;	/* Leave config bits alone */
+	reg &= 0xC0000000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+static struct scsi_host_template hpt36x_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/*
+ *	Configuration for HPT366/68
+ */
+
+static struct ata_port_operations hpt366_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt366_set_piomode,
+	.set_dmamode	= hpt366_set_dmamode,
+	.mode_filter	= hpt366_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= hpt36x_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	hpt36x_init_chipset	-	common chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the chip setup work that must be done at both init and
+ *	resume time
+ */
+
+static void hpt36x_init_chipset(struct pci_dev *dev)
+{
+	u8 drive_fast;
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x51, &drive_fast);
+	if (drive_fast & 0x80)
+		pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+}
+
+/**
+ *	hpt36x_init_one		-	Initialise an HPT366/368
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT36x device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT366			4 (HPT366)	0	UDMA66
+ *	HPT366			4 (HPT366)	1	UDMA66
+ *	HPT368			4 (HPT366)	2	UDMA66
+ *	HPT37x/30x		4 (HPT366)	3+	Other driver
+ *
+ */
+
+static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_hpt366 = {
+		.sht = &hpt36x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x1f,
+		.port_ops = &hpt366_port_ops
+	};
+	struct ata_port_info info = info_hpt366;
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	u32 class_rev;
+	u32 reg1;
+
+	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+	class_rev &= 0xFF;
+
+	/* May be a later chip in disguise. Check */
+	/* Newer chips are not in the HPT36x driver. Ignore them */
+	if (class_rev > 2)
+			return -ENODEV;
+
+	hpt36x_init_chipset(dev);
+
+	pci_read_config_dword(dev, 0x40,  &reg1);
+
+	/* PCI clocking determines the ATA timing values to use */
+	/* info_hpt366 is safe against re-entry so we can scribble on it */
+	switch((reg1 & 0x700) >> 8) {
+		case 5:
+			info.private_data = &hpt366_40;
+			break;
+		case 9:
+			info.private_data = &hpt366_25;
+			break;
+		default:
+			info.private_data = &hpt366_33;
+			break;
+	}
+	/* Now kick off ATA set up */
+	return ata_pci_init_one(dev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int hpt36x_reinit_one(struct pci_dev *dev)
+{
+	hpt36x_init_chipset(dev);
+	return ata_pci_device_resume(dev);
+}
+#endif
+
+static const struct pci_device_id hpt36x[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ },
+};
+
+static struct pci_driver hpt36x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= hpt36x,
+	.probe 		= hpt36x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt36x_reinit_one,
+#endif
+};
+
+static int __init hpt36x_init(void)
+{
+	return pci_register_driver(&hpt36x_pci_driver);
+}
+
+static void __exit hpt36x_exit(void)
+{
+	pci_unregister_driver(&hpt36x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt36x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt36x_init);
+module_exit(hpt36x_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt37x.c linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt37x.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt37x.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt37x.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,1184 @@
+/*
+ * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ * Portions Copyright (C) 2005-2006	MontaVista Software, Inc.
+ *
+ * TODO
+ *	PLL mode
+ *	Look into engine reset on timeout errors. Should not be
+ *		required.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt37x"
+#define DRV_VERSION	"0.6.6"
+
+struct hpt_clock {
+	u8	xfer_speed;
+	u32	timing;
+};
+
+struct hpt_chip {
+	const char *name;
+	unsigned int base;
+	struct hpt_clock const *clocks[4];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 4:8    data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 9:12   cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 13:17  cmd_low_time. active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 18:21  udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ *        during task file register access.
+ * 22:24  pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ *        xfer.
+ * 25:27  cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ *        register access.
+ * 28     UDMA enable
+ * 29     DMA enable
+ * 30     PIO_MST enable. if set, the chip is in bus master mode during
+ *        PIO.
+ * 31     FIFO enable.
+ */
+
+static struct hpt_clock hpt37x_timings_33[] = {
+	{ XFER_UDMA_6,		0x12446231 },	/* 0x12646231 ?? */
+	{ XFER_UDMA_5,		0x12446231 },
+	{ XFER_UDMA_4,		0x12446231 },
+	{ XFER_UDMA_3,		0x126c6231 },
+	{ XFER_UDMA_2,		0x12486231 },
+	{ XFER_UDMA_1,		0x124c6233 },
+	{ XFER_UDMA_0,		0x12506297 },
+
+	{ XFER_MW_DMA_2,	0x22406c31 },
+	{ XFER_MW_DMA_1,	0x22406c33 },
+	{ XFER_MW_DMA_0,	0x22406c97 },
+
+	{ XFER_PIO_4,		0x06414e31 },
+	{ XFER_PIO_3,		0x06414e42 },
+	{ XFER_PIO_2,		0x06414e53 },
+	{ XFER_PIO_1,		0x06814e93 },
+	{ XFER_PIO_0,		0x06814ea7 }
+};
+
+static struct hpt_clock hpt37x_timings_50[] = {
+	{ XFER_UDMA_6,		0x12848242 },
+	{ XFER_UDMA_5,		0x12848242 },
+	{ XFER_UDMA_4,		0x12ac8242 },
+	{ XFER_UDMA_3,		0x128c8242 },
+	{ XFER_UDMA_2,		0x120c8242 },
+	{ XFER_UDMA_1,		0x12148254 },
+	{ XFER_UDMA_0,		0x121882ea },
+
+	{ XFER_MW_DMA_2,	0x22808242 },
+	{ XFER_MW_DMA_1,	0x22808254 },
+	{ XFER_MW_DMA_0,	0x228082ea },
+
+	{ XFER_PIO_4,		0x0a81f442 },
+	{ XFER_PIO_3,		0x0a81f443 },
+	{ XFER_PIO_2,		0x0a81f454 },
+	{ XFER_PIO_1,		0x0ac1f465 },
+	{ XFER_PIO_0,		0x0ac1f48a }
+};
+
+static struct hpt_clock hpt37x_timings_66[] = {
+	{ XFER_UDMA_6,		0x1c869c62 },
+	{ XFER_UDMA_5,		0x1cae9c62 },	/* 0x1c8a9c62 */
+	{ XFER_UDMA_4,		0x1c8a9c62 },
+	{ XFER_UDMA_3,		0x1c8e9c62 },
+	{ XFER_UDMA_2,		0x1c929c62 },
+	{ XFER_UDMA_1,		0x1c9a9c62 },
+	{ XFER_UDMA_0,		0x1c829c62 },
+
+	{ XFER_MW_DMA_2,	0x2c829c62 },
+	{ XFER_MW_DMA_1,	0x2c829c66 },
+	{ XFER_MW_DMA_0,	0x2c829d2e },
+
+	{ XFER_PIO_4,		0x0c829c62 },
+	{ XFER_PIO_3,		0x0c829c84 },
+	{ XFER_PIO_2,		0x0c829ca6 },
+	{ XFER_PIO_1,		0x0d029d26 },
+	{ XFER_PIO_0,		0x0d029d5e }
+};
+
+
+static const struct hpt_chip hpt370 = {
+	"HPT370",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		NULL,
+		NULL
+	}
+};
+
+static const struct hpt_chip hpt370a = {
+	"HPT370A",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		NULL
+	}
+};
+
+static const struct hpt_chip hpt372 = {
+	"HPT372",
+	55,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt302 = {
+	"HPT302",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt371 = {
+	"HPT371",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt372a = {
+	"HPT372A",
+	66,
+	{
+		hpt37x_timings_33,
+		NULL,
+		hpt37x_timings_50,
+		hpt37x_timings_66
+	}
+};
+
+static const struct hpt_chip hpt374 = {
+	"HPT374",
+	48,
+	{
+		hpt37x_timings_33,
+		NULL,
+		NULL,
+		NULL
+	}
+};
+
+/**
+ *	hpt37x_find_mode	-	reset the hpt37x bus
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided.
+ */
+
+static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = ap->host->private_data;
+
+	while(clocks->xfer_speed) {
+		if (clocks->xfer_speed == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	int i = 0;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	while (list[i] != NULL) {
+		if (!strcmp(list[i], model_num)) {
+			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
+				modestr, list[i]);
+			return 1;
+		}
+		i++;
+	}
+	return 0;
+}
+
+static const char *bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+	"Maxtor 90510D4",
+	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	NULL
+};
+
+static const char *bad_ata100_5[] = {
+	"IBM-DTLA-307075",
+	"IBM-DTLA-307060",
+	"IBM-DTLA-307045",
+	"IBM-DTLA-307030",
+	"IBM-DTLA-307020",
+	"IBM-DTLA-307015",
+	"IBM-DTLA-305040",
+	"IBM-DTLA-305030",
+	"IBM-DTLA-305020",
+	"IC35L010AVER07-0",
+	"IC35L020AVER07-0",
+	"IC35L030AVER07-0",
+	"IC35L040AVER07-0",
+	"IC35L060AVER07-0",
+	"WDC AC310200R",
+	NULL
+};
+
+/**
+ *	hpt370_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+			mask &= ~ATA_MASK_UDMA;
+		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+			mask &= ~(0x1F << ATA_SHIFT_UDMA);
+	}
+	return ata_pci_default_filter(adev, mask);
+}
+
+/**
+ *	hpt370a_filter	-	mode selection filter
+ *	@adev: ATA device
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class != ATA_DEV_ATA) {
+		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+			mask &= ~ (0x1F << ATA_SHIFT_UDMA);
+	}
+	return ata_pci_default_filter(adev, mask);
+}
+
+/**
+ *	hpt37x_pre_reset	-	reset the hpt37x bus
+ *	@ap: ATA port to reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the initial reset handling for the 370/372 and 374 func 0
+ */
+
+static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	u8 scr2, ata66;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits hpt37x_enable_bits[] = {
+		{ 0x50, 1, 0x04, 0x04 },
+		{ 0x54, 1, 0x04, 0x04 }
+	};
+	if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	pci_read_config_byte(pdev, 0x5B, &scr2);
+	pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+	/* Cable register now active */
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Restore state */
+	pci_write_config_byte(pdev, 0x5B, scr2);
+
+	if (ata66 & (1 << ap->port_no))
+		ap->cbl = ATA_CBL_PATA40;
+	else
+		ap->cbl = ATA_CBL_PATA80;
+
+	/* Reset the state machine */
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(100);
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	hpt37x_error_handler	-	reset the hpt374
+ *	@ap: ATA port to reset
+ *
+ *	Perform probe for HPT37x, except for HPT374 channel 2
+ */
+
+static void hpt37x_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits hpt37x_enable_bits[] = {
+		{ 0x50, 1, 0x04, 0x04 },
+		{ 0x54, 1, 0x04, 0x04 }
+	};
+	u16 mcr3, mcr6;
+	u8 ata66;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	/* Do the extra channel work */
+	pci_read_config_word(pdev, 0x52, &mcr3);
+	pci_read_config_word(pdev, 0x56, &mcr6);
+	/* Set bit 15 of 0x52 to enable TCBLID as input
+	   Set bit 15 of 0x56 to enable FCBLID as input
+	 */
+	pci_write_config_word(pdev, 0x52, mcr3 | 0x8000);
+	pci_write_config_word(pdev, 0x56, mcr6 | 0x8000);
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Reset TCBLID/FCBLID to output */
+	pci_write_config_word(pdev, 0x52, mcr3);
+	pci_write_config_word(pdev, 0x56, mcr6);
+
+	if (ata66 & (1 << ap->port_no))
+		ap->cbl = ATA_CBL_PATA40;
+	else
+		ap->cbl = ATA_CBL_PATA80;
+
+	/* Reset the state machine */
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(100);
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	hpt374_error_handler	-	reset the hpt374
+ *	@classes:
+ *
+ *	The 374 cable detect is a little different due to the extra
+ *	channels. The function 0 channels work like usual but function 1
+ *	is special
+ */
+
+static void hpt374_error_handler(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!(PCI_FUNC(pdev->devfn) & 1))
+		hpt37x_error_handler(ap);
+	else
+		ata_bmdma_drive_eh(ap, hpt374_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	hpt370_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x02;
+	fast |= 0x01;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt37x_find_mode(ap, adev->pio_mode);
+	mode &= ~0x8000000;	/* No FIFO in PIO */
+	mode &= ~0x30070000;	/* Leave config bits alone */
+	reg &= 0x30070000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt370_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x02;
+	fast |= 0x01;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt37x_find_mode(ap, adev->dma_mode);
+	mode |= 0x8000000;	/* FIFO in MWDMA or UDMA */
+	mode &= ~0xC0000000;	/* Leave config bits alone */
+	reg &= 0xC0000000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt370_bmdma_start		-	DMA engine begin
+ *	@qc: ATA command
+ *
+ *	The 370 and 370A want us to reset the DMA engine each time we
+ *	use it. The 372 and later are fine.
+ */
+
+static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(10);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	hpt370_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Work around the HPT370 DMA engine.
+ */
+
+static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 dma_stat = ioread8(ap->ioaddr.bmdma_addr + 2);
+	u8 dma_cmd;
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+	if (dma_stat & 0x01) {
+		udelay(20);
+		dma_stat = ioread8(bmdma + 2);
+	}
+	if (dma_stat & 0x01) {
+		/* Clear the engine */
+		pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+		udelay(10);
+		/* Stop DMA */
+		dma_cmd = ioread8(bmdma );
+		iowrite8(dma_cmd & 0xFE, bmdma);
+		/* Clear Error */
+		dma_stat = ioread8(bmdma + 2);
+		iowrite8(dma_stat | 0x06 , bmdma + 2);
+		/* Clear the engine */
+		pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+		udelay(10);
+	}
+	ata_bmdma_stop(qc);
+}
+
+/**
+ *	hpt372_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt37x_find_mode(ap, adev->pio_mode);
+
+	printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
+	mode &= ~0x80000000;	/* No FIFO in PIO */
+	mode &= ~0x30070000;	/* Leave config bits alone */
+	reg &= 0x30070000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt372_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt37x_find_mode(ap, adev->dma_mode);
+	printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
+	mode &= ~0xC0000000;	/* Leave config bits alone */
+	mode |= 0x80000000;	/* FIFO in MWDMA or UDMA */
+	reg &= 0xC0000000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt37x_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Clean up after the HPT372 and later DMA engine
+ */
+
+static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int mscreg = 0x50 + 4 * ap->port_no;
+	u8 bwsr_stat, msc_stat;
+
+	pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+	pci_read_config_byte(pdev, mscreg, &msc_stat);
+	if (bwsr_stat & (1 << ap->port_no))
+		pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+	ata_bmdma_stop(qc);
+}
+
+
+static struct scsi_host_template hpt37x_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/*
+ *	Configuration for HPT370
+ */
+
+static struct ata_port_operations hpt370_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt370_set_piomode,
+	.set_dmamode	= hpt370_set_dmamode,
+	.mode_filter	= hpt370_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= hpt37x_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= hpt370_bmdma_start,
+	.bmdma_stop	= hpt370_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Configuration for HPT370A. Close to 370 but less filters
+ */
+
+static struct ata_port_operations hpt370a_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt370_set_piomode,
+	.set_dmamode	= hpt370_set_dmamode,
+	.mode_filter	= hpt370a_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= hpt37x_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= hpt370_bmdma_start,
+	.bmdma_stop	= hpt370_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Configuration for HPT372, HPT371, HPT302. Slightly different PIO
+ *	and DMA mode setting functionality.
+ */
+
+static struct ata_port_operations hpt372_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt372_set_piomode,
+	.set_dmamode	= hpt372_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= hpt37x_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= hpt37x_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Configuration for HPT374. Mode setting works like 372 and friends
+ *	but we have a different cable detection procedure.
+ */
+
+static struct ata_port_operations hpt374_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt372_set_piomode,
+	.set_dmamode	= hpt372_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= hpt374_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= hpt37x_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	htp37x_clock_slot	-	Turn timing to PC clock entry
+ *	@freq: Reported frequency timing
+ *	@base: Base timing
+ *
+ *	Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
+ *	and 3 for 66Mhz)
+ */
+
+static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
+{
+	unsigned int f = (base * freq) / 192;	/* Mhz */
+	if (f < 40)
+		return 0;	/* 33Mhz slot */
+	if (f < 45)
+		return 1;	/* 40Mhz slot */
+	if (f < 55)
+		return 2;	/* 50Mhz slot */
+	return 3;		/* 60Mhz slot */
+}
+
+/**
+ *	hpt37x_calibrate_dpll		-	Calibrate the DPLL loop
+ *	@dev: PCI device
+ *
+ *	Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
+ *	succeeds
+ */
+
+static int hpt37x_calibrate_dpll(struct pci_dev *dev)
+{
+	u8 reg5b;
+	u32 reg5c;
+	int tries;
+
+	for(tries = 0; tries < 0x5000; tries++) {
+		udelay(50);
+		pci_read_config_byte(dev, 0x5b, &reg5b);
+		if (reg5b & 0x80) {
+			/* See if it stays set */
+			for(tries = 0; tries < 0x1000; tries ++) {
+				pci_read_config_byte(dev, 0x5b, &reg5b);
+				/* Failed ? */
+				if ((reg5b & 0x80) == 0)
+					return 0;
+			}
+			/* Turn off tuning, we have the DPLL set */
+			pci_read_config_dword(dev, 0x5c, &reg5c);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+			return 1;
+		}
+	}
+	/* Never went stable */
+	return 0;
+}
+/**
+ *	hpt37x_init_one		-	Initialise an HPT37X/302
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT37x device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT366			4 (HPT366)	0	Other driver
+ *	HPT366			4 (HPT366)	1	Other driver
+ *	HPT368			4 (HPT366)	2	Other driver
+ *	HPT370			4 (HPT366)	3	UDMA100
+ *	HPT370A			4 (HPT366)	4	UDMA100
+ *	HPT372			4 (HPT366)	5	UDMA133 (1)
+ *	HPT372N			4 (HPT366)	6	Other driver
+ *	HPT372A			5 (HPT372)	1	UDMA133 (1)
+ *	HPT372N			5 (HPT372)	2	Other driver
+ *	HPT302			6 (HPT302)	1	UDMA133
+ *	HPT302N			6 (HPT302)	2	Other driver
+ *	HPT371			7 (HPT371)	*	UDMA133
+ *	HPT374			8 (HPT374)	*	UDMA133 4 channel
+ *	HPT372N			9 (HPT372N)	*	Other driver
+ *
+ *	(1) UDMA133 support depends on the bus clock
+ */
+
+static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* HPT370 - UDMA100 */
+	static const struct ata_port_info info_hpt370 = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &hpt370_port_ops
+	};
+	/* HPT370A - UDMA100 */
+	static const struct ata_port_info info_hpt370a = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &hpt370a_port_ops
+	};
+	/* HPT370 - UDMA100 */
+	static const struct ata_port_info info_hpt370_33 = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x0f,
+		.port_ops = &hpt370_port_ops
+	};
+	/* HPT370A - UDMA100 */
+	static const struct ata_port_info info_hpt370a_33 = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x0f,
+		.port_ops = &hpt370a_port_ops
+	};
+	/* HPT371, 372 and friends - UDMA133 */
+	static const struct ata_port_info info_hpt372 = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &hpt372_port_ops
+	};
+	/* HPT374 - UDMA133 */
+	static const struct ata_port_info info_hpt374 = {
+		.sht = &hpt37x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &hpt374_port_ops
+	};
+
+	static const int MHz[4] = { 33, 40, 50, 66 };
+	const struct ata_port_info *port;
+	void *private_data = NULL;
+	struct ata_port_info port_info;
+	const struct ata_port_info *ppi[] = { &port_info, NULL };
+
+	u8 irqmask;
+	u32 class_rev;
+	u8 mcr1;
+	u32 freq;
+	int prefer_dpll = 1;
+
+	unsigned long iobase = pci_resource_start(dev, 4);
+
+	const struct hpt_chip *chip_table;
+	int clock_slot;
+
+	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+	class_rev &= 0xFF;
+
+	if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+		/* May be a later chip in disguise. Check */
+		/* Older chips are in the HPT366 driver. Ignore them */
+		if (class_rev < 3)
+			return -ENODEV;
+		/* N series chips have their own driver. Ignore */
+		if (class_rev == 6)
+			return -ENODEV;
+
+		switch(class_rev) {
+			case 3:
+				port = &info_hpt370;
+				chip_table = &hpt370;
+				prefer_dpll = 0;
+				break;
+			case 4:
+				port = &info_hpt370a;
+				chip_table = &hpt370a;
+				prefer_dpll = 0;
+				break;
+			case 5:
+				port = &info_hpt372;
+				chip_table = &hpt372;
+				break;
+			default:
+				printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
+				return -ENODEV;
+		}
+	} else {
+		switch(dev->device) {
+			case PCI_DEVICE_ID_TTI_HPT372:
+				/* 372N if rev >= 2*/
+				if (class_rev >= 2)
+					return -ENODEV;
+				port = &info_hpt372;
+				chip_table = &hpt372a;
+				break;
+			case PCI_DEVICE_ID_TTI_HPT302:
+				/* 302N if rev > 1 */
+				if (class_rev > 1)
+					return -ENODEV;
+				port = &info_hpt372;
+				/* Check this */
+				chip_table = &hpt302;
+				break;
+			case PCI_DEVICE_ID_TTI_HPT371:
+				if (class_rev > 1)
+					return -ENODEV;
+				port = &info_hpt372;
+				chip_table = &hpt371;
+				/* Single channel device, master is not present
+				   but the BIOS (or us for non x86) must mark it
+				   absent */
+				pci_read_config_byte(dev, 0x50, &mcr1);
+				mcr1 &= ~0x04;
+				pci_write_config_byte(dev, 0x50, mcr1);
+				break;
+			case PCI_DEVICE_ID_TTI_HPT374:
+				chip_table = &hpt374;
+				port = &info_hpt374;
+				break;
+			default:
+				printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
+				return -ENODEV;
+		}
+	}
+	/* Ok so this is a chip we support */
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x5A, &irqmask);
+	irqmask &= ~0x10;
+	pci_write_config_byte(dev, 0x5a, irqmask);
+
+	/*
+	 * default to pci clock. make sure MA15/16 are set to output
+	 * to prevent drives having problems with 40-pin cables. Needed
+	 * for some drives such as IBM-DTLA which will not enter ready
+	 * state on reset when PDIAG is a input.
+	 */
+
+	pci_write_config_byte(dev, 0x5b, 0x23);
+
+	/*
+	 * HighPoint does this for HPT372A.
+	 * NOTE: This register is only writeable via I/O space.
+	 */
+	if (chip_table == &hpt372a)
+		outb(0x0e, iobase + 0x9c);
+
+	/* Some devices do not let this value be accessed via PCI space
+	   according to the old driver */
+
+	freq = inl(iobase + 0x90);
+	if ((freq >> 12) != 0xABCDE) {
+		int i;
+		u8 sr;
+		u32 total = 0;
+
+		printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
+
+		/* This is the process the HPT371 BIOS is reported to use */
+		for(i = 0; i < 128; i++) {
+			pci_read_config_byte(dev, 0x78, &sr);
+			total += sr & 0x1FF;
+			udelay(15);
+		}
+		freq = total / 128;
+	}
+	freq &= 0x1FF;
+
+	/*
+	 *	Turn the frequency check into a band and then find a timing
+	 *	table to match it.
+	 */
+
+	clock_slot = hpt37x_clock_slot(freq, chip_table->base);
+	if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) {
+		/*
+		 *	We need to try PLL mode instead
+		 *
+		 *	For non UDMA133 capable devices we should
+		 *	use a 50MHz DPLL by choice
+		 */
+		unsigned int f_low, f_high;
+		int dpll, adjust;
+
+		/* Compute DPLL */
+		dpll = 2;
+		if (port->udma_mask & 0xE0)
+			dpll = 3;
+
+		f_low = (MHz[clock_slot] * 48) / MHz[dpll];
+		f_high = f_low + 2;
+		if (clock_slot > 1)
+			f_high += 2;
+
+		/* Select the DPLL clock. */
+		pci_write_config_byte(dev, 0x5b, 0x21);
+		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+
+		for(adjust = 0; adjust < 8; adjust++) {
+			if (hpt37x_calibrate_dpll(dev))
+				break;
+			/* See if it'll settle at a fractionally different clock */
+			if ((adjust & 3) == 3) {
+				f_low --;
+				f_high ++;
+			}
+			pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+		}
+		if (adjust == 8) {
+			printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n");
+			return -ENODEV;
+		}
+		if (dpll == 3)
+			private_data = (void *)hpt37x_timings_66;
+		else
+			private_data = (void *)hpt37x_timings_50;
+
+		printk(KERN_INFO "hpt37x: Bus clock %dMHz, using DPLL.\n", MHz[dpll]);
+	} else {
+		private_data = (void *)chip_table->clocks[clock_slot];
+		/*
+		 *	Perform a final fixup. Note that we will have used the
+		 *	DPLL on the HPT372 which means we don't have to worry
+		 *	about lack of UDMA133 support on lower clocks
+ 		 */
+
+		if (clock_slot < 2 && port == &info_hpt370)
+			port = &info_hpt370_33;
+		if (clock_slot < 2 && port == &info_hpt370a)
+			port = &info_hpt370a_33;
+		printk(KERN_INFO "hpt37x: %s: Bus clock %dMHz.\n", chip_table->name, MHz[clock_slot]);
+	}
+
+	/* Now kick off ATA set up */
+	port_info = *port;
+	port_info.private_data = private_data;
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id hpt37x[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+
+	{ },
+};
+
+static struct pci_driver hpt37x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= hpt37x,
+	.probe 		= hpt37x_init_one,
+	.remove		= ata_pci_remove_one
+};
+
+static int __init hpt37x_init(void)
+{
+	return pci_register_driver(&hpt37x_pci_driver);
+}
+
+static void __exit hpt37x_exit(void)
+{
+	pci_unregister_driver(&hpt37x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt37x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt37x_init);
+module_exit(hpt37x_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt3x2n.c linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt3x2n.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt3x2n.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt3x2n.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,638 @@
+/*
+ * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c		Version 0.36	April 25, 2003
+ *
+ * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003		Red Hat Inc
+ * Portions Copyright (C) 2005-2006	MontaVista Software, Inc.
+ *
+ *
+ * TODO
+ *	Work out best PLL policy
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt3x2n"
+#define DRV_VERSION	"0.3.3"
+
+enum {
+	HPT_PCI_FAST	=	(1 << 31),
+	PCI66		=	(1 << 1),
+	USE_DPLL	=	(1 << 0)
+};
+
+struct hpt_clock {
+	u8	xfer_speed;
+	u32	timing;
+};
+
+struct hpt_chip {
+	const char *name;
+	struct hpt_clock *clocks[3];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3    data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 4:8    data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ *        DMA. cycles = value + 1
+ * 9:12   cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 13:17  cmd_low_time. active time of DIOW_/DIOR_ during task file
+ *        register access.
+ * 18:21  udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ *        during task file register access.
+ * 22:24  pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ *        xfer.
+ * 25:27  cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ *        register access.
+ * 28     UDMA enable
+ * 29     DMA enable
+ * 30     PIO_MST enable. if set, the chip is in bus master mode during
+ *        PIO.
+ * 31     FIFO enable.
+ */
+
+/* 66MHz DPLL clocks */
+
+static struct hpt_clock hpt3x2n_clocks[] = {
+	{	XFER_UDMA_7,	0x1c869c62	},
+	{	XFER_UDMA_6,	0x1c869c62	},
+	{	XFER_UDMA_5,	0x1c8a9c62	},
+	{	XFER_UDMA_4,	0x1c8a9c62	},
+	{	XFER_UDMA_3,	0x1c8e9c62	},
+	{	XFER_UDMA_2,	0x1c929c62	},
+	{	XFER_UDMA_1,	0x1c9a9c62	},
+	{	XFER_UDMA_0,	0x1c829c62	},
+
+	{	XFER_MW_DMA_2,	0x2c829c62	},
+	{	XFER_MW_DMA_1,	0x2c829c66	},
+	{	XFER_MW_DMA_0,	0x2c829d2c	},
+
+	{	XFER_PIO_4,	0x0c829c62	},
+	{	XFER_PIO_3,	0x0c829c84	},
+	{	XFER_PIO_2,	0x0c829ca6	},
+	{	XFER_PIO_1,	0x0d029d26	},
+	{	XFER_PIO_0,	0x0d029d5e	},
+	{	0,		0x0d029d5e	}
+};
+
+/**
+ *	hpt3x2n_find_mode	-	reset the hpt3x2n bus
+ *	@ap: ATA port
+ *	@speed: transfer mode
+ *
+ *	Return the 32bit register programming information for this channel
+ *	that matches the speed provided. For the moment the clocks table
+ *	is hard coded but easy to change. This will be needed if we use
+ *	different DPLLs
+ */
+
+static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
+{
+	struct hpt_clock *clocks = hpt3x2n_clocks;
+
+	while(clocks->xfer_speed) {
+		if (clocks->xfer_speed == speed)
+			return clocks->timing;
+		clocks++;
+	}
+	BUG();
+	return 0xffffffffU;	/* silence compiler warning */
+}
+
+/**
+ *	hpt3x2n_cable_detect	-	Detect the cable type
+ *	@ap: ATA port to detect on
+ *
+ *	Return the cable type attached to this port
+ */
+
+static int hpt3x2n_cable_detect(struct ata_port *ap)
+{
+	u8 scr2, ata66;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, 0x5B, &scr2);
+	pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+	/* Cable register now active */
+	pci_read_config_byte(pdev, 0x5A, &ata66);
+	/* Restore state */
+	pci_write_config_byte(pdev, 0x5B, scr2);
+
+	if (ata66 & (1 << ap->port_no))
+		return ATA_CBL_PATA40;
+	else
+		return ATA_CBL_PATA80;
+}
+
+/**
+ *	hpt3x2n_pre_reset	-	reset the hpt3x2n bus
+ *	@ap: ATA port to reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the initial reset handling for the 3x2n series controllers.
+ *	Reset the hardware and state machine,
+ */
+
+static int hpt3xn_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	/* Reset the state machine */
+	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+	udelay(100);
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	hpt3x2n_error_handler	-	probe the hpt3x2n bus
+ *	@ap: ATA port to reset
+ *
+ *	Perform the probe reset handling for the 3x2N
+ */
+
+static void hpt3x2n_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	hpt3x2n_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Perform PIO mode setup.
+ */
+
+static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt3x2n_find_mode(ap, adev->pio_mode);
+	mode &= ~0x8000000;	/* No FIFO in PIO */
+	mode &= ~0x30070000;	/* Leave config bits alone */
+	reg &= 0x30070000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt3x2n_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1, addr2;
+	u32 reg;
+	u32 mode;
+	u8 fast;
+
+	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	addr2 = 0x51 + 4 * ap->port_no;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	fast &= ~0x07;
+	pci_write_config_byte(pdev, addr2, fast);
+
+	pci_read_config_dword(pdev, addr1, &reg);
+	mode = hpt3x2n_find_mode(ap, adev->dma_mode);
+	mode |= 0x8000000;	/* FIFO in MWDMA or UDMA */
+	mode &= ~0xC0000000;	/* Leave config bits alone */
+	reg &= 0xC0000000;	/* Strip timing bits */
+	pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ *	hpt3x2n_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Clean up after the HPT3x2n and later DMA engine
+ */
+
+static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int mscreg = 0x50 + 2 * ap->port_no;
+	u8 bwsr_stat, msc_stat;
+
+	pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+	pci_read_config_byte(pdev, mscreg, &msc_stat);
+	if (bwsr_stat & (1 << ap->port_no))
+		pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+	ata_bmdma_stop(qc);
+}
+
+/**
+ *	hpt3x2n_set_clock	-	clock control
+ *	@ap: ATA port
+ *	@source: 0x21 or 0x23 for PLL or PCI sourced clock
+ *
+ *	Switch the ATA bus clock between the PLL and PCI clock sources
+ *	while correctly isolating the bus and resetting internal logic
+ *
+ *	We must use the DPLL for
+ *	-	writing
+ *	-	second channel UDMA7 (SATA ports) or higher
+ *	-	66MHz PCI
+ *
+ *	or we will underclock the device and get reduced performance.
+ */
+
+static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+{
+	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+	/* Tristate the bus */
+	iowrite8(0x80, bmdma+0x73);
+	iowrite8(0x80, bmdma+0x77);
+
+	/* Switch clock and reset channels */
+	iowrite8(source, bmdma+0x7B);
+	iowrite8(0xC0, bmdma+0x79);
+
+	/* Reset state machines */
+	iowrite8(0x37, bmdma+0x70);
+	iowrite8(0x37, bmdma+0x74);
+
+	/* Complete reset */
+	iowrite8(0x00, bmdma+0x79);
+
+	/* Reconnect channels to bus */
+	iowrite8(0x00, bmdma+0x73);
+	iowrite8(0x00, bmdma+0x77);
+}
+
+/* Check if our partner interface is busy */
+
+static int hpt3x2n_pair_idle(struct ata_port *ap)
+{
+	struct ata_host *host = ap->host;
+	struct ata_port *pair = host->ports[ap->port_no ^ 1];
+
+	if (pair->hsm_task_state == HSM_ST_IDLE)
+		return 1;
+	return 0;
+}
+
+static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+{
+	long flags = (long)ap->host->private_data;
+	/* See if we should use the DPLL */
+	if (writing)
+		return USE_DPLL;	/* Needed for write */
+	if (flags & PCI66)
+		return USE_DPLL;	/* Needed at 66Mhz */
+	return 0;
+}
+
+static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct ata_port *ap = qc->ap;
+	int flags = (long)ap->host->private_data;
+
+	if (hpt3x2n_pair_idle(ap)) {
+		int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
+		if ((flags & USE_DPLL) != dpll) {
+			if (dpll == 1)
+				hpt3x2n_set_clock(ap, 0x21);
+			else
+				hpt3x2n_set_clock(ap, 0x23);
+		}
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+static struct scsi_host_template hpt3x2n_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/*
+ *	Configuration for HPT3x2n.
+ */
+
+static struct ata_port_operations hpt3x2n_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt3x2n_set_piomode,
+	.set_dmamode	= hpt3x2n_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= hpt3x2n_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= hpt3x2n_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= hpt3x2n_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= hpt3x2n_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	hpt3xn_calibrate_dpll		-	Calibrate the DPLL loop
+ *	@dev: PCI device
+ *
+ *	Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
+ *	succeeds
+ */
+
+static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
+{
+	u8 reg5b;
+	u32 reg5c;
+	int tries;
+
+	for(tries = 0; tries < 0x5000; tries++) {
+		udelay(50);
+		pci_read_config_byte(dev, 0x5b, &reg5b);
+		if (reg5b & 0x80) {
+			/* See if it stays set */
+			for(tries = 0; tries < 0x1000; tries ++) {
+				pci_read_config_byte(dev, 0x5b, &reg5b);
+				/* Failed ? */
+				if ((reg5b & 0x80) == 0)
+					return 0;
+			}
+			/* Turn off tuning, we have the DPLL set */
+			pci_read_config_dword(dev, 0x5c, &reg5c);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+			return 1;
+		}
+	}
+	/* Never went stable */
+	return 0;
+}
+
+static int hpt3x2n_pci_clock(struct pci_dev *pdev)
+{
+	unsigned long freq;
+	u32 fcnt;
+	unsigned long iobase = pci_resource_start(pdev, 4);
+
+	fcnt = inl(iobase + 0x90);	/* Not PCI readable for some chips */
+	if ((fcnt >> 12) != 0xABCDE) {
+		printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
+		return 33;	/* Not BIOS set */
+	}
+	fcnt &= 0x1FF;
+
+	freq = (fcnt * 77) / 192;
+
+	/* Clamp to bands */
+	if (freq < 40)
+		return 33;
+	if (freq < 45)
+		return 40;
+	if (freq < 55)
+		return 50;
+	return 66;
+}
+
+/**
+ *	hpt3x2n_init_one		-	Initialise an HPT37X/302
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Initialise an HPT3x2n device. There are some interesting complications
+ *	here. Firstly the chip may report 366 and be one of several variants.
+ *	Secondly all the timings depend on the clock for the chip which we must
+ *	detect and look up
+ *
+ *	This is the known chip mappings. It may be missing a couple of later
+ *	releases.
+ *
+ *	Chip version		PCI		Rev	Notes
+ *	HPT372			4 (HPT366)	5	Other driver
+ *	HPT372N			4 (HPT366)	6	UDMA133
+ *	HPT372			5 (HPT372)	1	Other driver
+ *	HPT372N			5 (HPT372)	2	UDMA133
+ *	HPT302			6 (HPT302)	*	Other driver
+ *	HPT302N			6 (HPT302)	> 1	UDMA133
+ *	HPT371			7 (HPT371)	*	Other driver
+ *	HPT371N			7 (HPT371)	> 1	UDMA133
+ *	HPT374			8 (HPT374)	*	Other driver
+ *	HPT372N			9 (HPT372N)	*	UDMA133
+ *
+ *	(1) UDMA133 support depends on the bus clock
+ *
+ *	To pin down		HPT371N
+ */
+
+static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* HPT372N and friends - UDMA133 */
+	static const struct ata_port_info info = {
+		.sht = &hpt3x2n_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &hpt3x2n_port_ops
+	};
+	struct ata_port_info port = info;
+	const struct ata_port_info *ppi[] = { &port, NULL };
+
+	u8 irqmask;
+	u32 class_rev;
+
+	unsigned int pci_mhz;
+	unsigned int f_low, f_high;
+	int adjust;
+	unsigned long iobase = pci_resource_start(dev, 4);
+
+	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+	class_rev &= 0xFF;
+
+	switch(dev->device) {
+		case PCI_DEVICE_ID_TTI_HPT366:
+			if (class_rev < 6)
+				return -ENODEV;
+			break;
+		case PCI_DEVICE_ID_TTI_HPT371:
+			if (class_rev < 2)
+				return -ENODEV;
+			/* 371N if rev > 1 */
+			break;
+		case PCI_DEVICE_ID_TTI_HPT372:
+			/* 372N if rev >= 2*/
+			if (class_rev < 2)
+				return -ENODEV;
+			break;
+		case PCI_DEVICE_ID_TTI_HPT302:
+			if (class_rev < 2)
+				return -ENODEV;
+			break;
+		case PCI_DEVICE_ID_TTI_HPT372N:
+			break;
+		default:
+			printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
+			return -ENODEV;
+	}
+
+	/* Ok so this is a chip we support */
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+	pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+	pci_read_config_byte(dev, 0x5A, &irqmask);
+	irqmask &= ~0x10;
+	pci_write_config_byte(dev, 0x5a, irqmask);
+
+	/*
+	 * HPT371 chips physically have only one channel, the secondary one,
+	 * but the primary channel registers do exist!  Go figure...
+	 * So,  we manually disable the non-existing channel here
+	 * (if the BIOS hasn't done this already).
+	 */
+	if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+		u8 mcr1;
+		pci_read_config_byte(dev, 0x50, &mcr1);
+		mcr1 &= ~0x04;
+		pci_write_config_byte(dev, 0x50, mcr1);
+	}
+
+	/* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
+	   50 for UDMA100. Right now we always use 66 */
+
+	pci_mhz = hpt3x2n_pci_clock(dev);
+
+	f_low = (pci_mhz * 48) / 66;	/* PCI Mhz for 66Mhz DPLL */
+	f_high = f_low + 2;		/* Tolerance */
+
+	pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+	/* PLL clock */
+	pci_write_config_byte(dev, 0x5B, 0x21);
+
+	/* Unlike the 37x we don't try jiggling the frequency */
+	for(adjust = 0; adjust < 8; adjust++) {
+		if (hpt3xn_calibrate_dpll(dev))
+			break;
+		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+	}
+	if (adjust == 8) {
+		printk(KERN_WARNING "hpt3x2n: DPLL did not stabilize.\n");
+		return -ENODEV;
+	}
+
+	/* Set our private data up. We only need a few flags so we use
+	   it directly */
+	port.private_data = NULL;
+	if (pci_mhz > 60) {
+		port.private_data = (void *)PCI66;
+		/*
+		 * On  HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+		 * the MISC. register to stretch the UltraDMA Tss timing.
+		 * NOTE: This register is only writeable via I/O space.
+		 */
+		if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+			outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
+	}
+
+	/* Now kick off ATA set up */
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id hpt3x2n[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), },
+
+	{ },
+};
+
+static struct pci_driver hpt3x2n_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= hpt3x2n,
+	.probe 		= hpt3x2n_init_one,
+	.remove		= ata_pci_remove_one
+};
+
+static int __init hpt3x2n_init(void)
+{
+	return pci_register_driver(&hpt3x2n_pci_driver);
+}
+
+static void __exit hpt3x2n_exit(void)
+{
+	pci_unregister_driver(&hpt3x2n_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x2n);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt3x2n_init);
+module_exit(hpt3x2n_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt3x3.c linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt3x3.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_hpt3x3.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_hpt3x3.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,233 @@
+/*
+ *	pata_hpt3x3		-	HPT3x3 driver
+ *	(c) Copyright 2005-2006 Red Hat
+ *
+ *	Was pata_hpt34x but the naming was confusing as it supported the
+ *	343 and 363 so it has been renamed.
+ *
+ *	Based on:
+ *	linux/drivers/ide/pci/hpt34x.c		Version 0.40	Sept 10, 2002
+ *	Copyright (C) 1998-2000	Andre Hedrick <andre@linux-ide.org>
+ *
+ *	May be copied or modified under the terms of the GNU General Public
+ *	License
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_hpt3x3"
+#define DRV_VERSION	"0.4.3"
+
+/**
+ *	hpt3x3_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the HPT3x3 as
+ *	all we have to do is clear the MWDMA and UDMA bits then load the
+ *	mode number.
+ */
+
+static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 r1, r2;
+	int dn = 2 * ap->port_no + adev->devno;
+
+	pci_read_config_dword(pdev, 0x44, &r1);
+	pci_read_config_dword(pdev, 0x48, &r2);
+	/* Load the PIO timing number */
+	r1 &= ~(7 << (3 * dn));
+	r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
+	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
+
+	pci_write_config_dword(pdev, 0x44, r1);
+	pci_write_config_dword(pdev, 0x48, r2);
+}
+
+/**
+ *	hpt3x3_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	Set up the channel for MWDMA or UDMA modes. Much the same as with
+ *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 r1, r2;
+	int dn = 2 * ap->port_no + adev->devno;
+	int mode_num = adev->dma_mode & 0x0F;
+
+	pci_read_config_dword(pdev, 0x44, &r1);
+	pci_read_config_dword(pdev, 0x48, &r2);
+	/* Load the timing number */
+	r1 &= ~(7 << (3 * dn));
+	r1 |= (mode_num << (3 * dn));
+	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
+
+	if (adev->dma_mode >= XFER_UDMA_0)
+		r2 |= 0x01 << dn;	/* Ultra mode */
+	else
+		r2 |= 0x10 << dn;	/* MWDMA */
+
+	pci_write_config_dword(pdev, 0x44, r1);
+	pci_write_config_dword(pdev, 0x48, r2);
+}
+
+static struct scsi_host_template hpt3x3_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations hpt3x3_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= hpt3x3_set_piomode,
+	.set_dmamode	= hpt3x3_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	hpt3x3_init_chipset	-	chip setup
+ *	@dev: PCI device
+ *
+ *	Perform the setup required at boot and on resume.
+ */
+
+static void hpt3x3_init_chipset(struct pci_dev *dev)
+{
+	u16 cmd;
+	/* Initialize the board */
+	pci_write_config_word(dev, 0x80, 0x00);
+	/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	if (cmd & PCI_COMMAND_MEMORY)
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
+	else
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+}
+
+
+/**
+ *	hpt3x3_init_one		-	Initialise an HPT343/363
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Perform basic initialisation. The chip has a quirk that it won't
+ *	function unless it is at XX00. The old ATA driver touched this up
+ *	but we leave it for pci quirks to do properly.
+ */
+
+static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &hpt3x3_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,
+		.port_ops = &hpt3x3_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	hpt3x3_init_chipset(dev);
+	/* Now kick off ATA set up */
+	return ata_pci_init_one(dev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int hpt3x3_reinit_one(struct pci_dev *dev)
+{
+	hpt3x3_init_chipset(dev);
+	return ata_pci_device_resume(dev);
+}
+#endif
+
+static const struct pci_device_id hpt3x3[] = {
+	{ PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
+
+	{ },
+};
+
+static struct pci_driver hpt3x3_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= hpt3x3,
+	.probe 		= hpt3x3_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= hpt3x3_reinit_one,
+#endif
+};
+
+static int __init hpt3x3_init(void)
+{
+	return pci_register_driver(&hpt3x3_pci_driver);
+}
+
+
+static void __exit hpt3x3_exit(void)
+{
+	pci_unregister_driver(&hpt3x3_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x3);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt3x3_init);
+module_exit(hpt3x3_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_icside.c linux-2.6.18.x86_64.p4/drivers/ata/pata_icside.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_icside.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_icside.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,694 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define DRV_NAME	"pata_icside"
+
+#define ICS_IDENT_OFFSET		0x2280
+
+#define ICS_ARCIN_V5_INTRSTAT		0x0000
+#define ICS_ARCIN_V5_INTROFFSET		0x0004
+
+#define ICS_ARCIN_V6_INTROFFSET_1	0x2200
+#define ICS_ARCIN_V6_INTRSTAT_1		0x2290
+#define ICS_ARCIN_V6_INTROFFSET_2	0x3200
+#define ICS_ARCIN_V6_INTRSTAT_2		0x3290
+
+struct portinfo {
+	unsigned int dataoffset;
+	unsigned int ctrloffset;
+	unsigned int stepping;
+};
+
+static const struct portinfo pata_icside_portinfo_v5 = {
+	.dataoffset	= 0x2800,
+	.ctrloffset	= 0x2b80,
+	.stepping	= 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_1 = {
+	.dataoffset	= 0x2000,
+	.ctrloffset	= 0x2380,
+	.stepping	= 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_2 = {
+	.dataoffset	= 0x3000,
+	.ctrloffset	= 0x3380,
+	.stepping	= 6,
+};
+
+#define PATA_ICSIDE_MAX_SG	128
+
+struct pata_icside_state {
+	void __iomem *irq_port;
+	void __iomem *ioc_base;
+	unsigned int type;
+	unsigned int dma;
+	struct {
+		u8 port_sel;
+		u8 disabled;
+		unsigned int speed[ATA_MAX_DEVICES];
+	} port[2];
+	struct scatterlist sg[PATA_ICSIDE_MAX_SG];
+};
+
+struct pata_icside_info {
+	struct pata_icside_state *state;
+	struct expansion_card	*ec;
+	void __iomem		*base;
+	void __iomem		*irqaddr;
+	unsigned int		irqmask;
+	const expansioncard_ops_t *irqops;
+	unsigned int		mwdma_mask;
+	unsigned int		nr_ports;
+	const struct portinfo	*port[2];
+};
+
+#define ICS_TYPE_A3IN	0
+#define ICS_TYPE_A3USER	1
+#define ICS_TYPE_V6	3
+#define ICS_TYPE_V5	15
+#define ICS_TYPE_NOTYPE	((unsigned int)-1)
+
+/* ---------------- Version 5 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose  : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose  : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
+	.irqenable	= pata_icside_irqenable_arcin_v5,
+	.irqdisable	= pata_icside_irqdisable_arcin_v5,
+};
+
+
+/* ---------------- Version 6 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose  : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+	void __iomem *base = state->irq_port;
+
+	if (!state->port[0].disabled)
+		writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+	if (!state->port[1].disabled)
+		writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose  : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+	readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
+ * Purpose  : detect an active interrupt from card
+ */
+static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
+{
+	struct pata_icside_state *state = ec->irq_data;
+
+	return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
+	       readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
+	.irqenable	= pata_icside_irqenable_arcin_v6,
+	.irqdisable	= pata_icside_irqdisable_arcin_v6,
+	.irqpending	= pata_icside_irqpending_arcin_v6,
+};
+
+
+/*
+ * SG-DMA support.
+ *
+ * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
+ * There is only one DMA controller per card, which means that only
+ * one drive can be accessed at one time.  NOTE! We do not enforce that
+ * here, but we rely on the main IDE driver spotting that both
+ * interfaces use the same IRQ, which should guarantee this.
+ */
+
+/*
+ * Configure the IOMD to give the appropriate timings for the transfer
+ * mode being requested.  We take the advice of the ATA standards, and
+ * calculate the cycle time based on the transfer mode, and the EIDE
+ * MW DMA specs that the drive provides in the IDENTIFY command.
+ *
+ * We have the following IOMD DMA modes to choose from:
+ *
+ *	Type	Active		Recovery	Cycle
+ *	A	250 (250)	312 (550)	562 (800)
+ *	B	187 (200)	250 (550)	437 (750)
+ *	C	125 (125)	125 (375)	250 (500)
+ *	D	62  (50)	125 (375)	187 (425)
+ *
+ * (figures in brackets are actual measured timings on DIOR/DIOW)
+ *
+ * However, we also need to take care of the read/write active and
+ * recovery timings:
+ *
+ *			Read	Write
+ *  	Mode	Active	-- Recovery --	Cycle	IOMD type
+ *	MW0	215	50	215	480	A
+ *	MW1	80	50	50	150	C
+ *	MW2	70	25	25	120	C
+ */
+static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pata_icside_state *state = ap->host->private_data;
+	struct ata_timing t;
+	unsigned int cycle;
+	char iomd_type;
+
+	/*
+	 * DMA is based on a 16MHz clock
+	 */
+	if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
+		return;
+
+	/*
+	 * Choose the IOMD cycle timing which ensure that the interface
+	 * satisfies the measured active, recovery and cycle times.
+	 */
+	if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
+		iomd_type = 'D', cycle = 187;
+	else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
+		iomd_type = 'C', cycle = 250;
+	else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
+		iomd_type = 'B', cycle = 437;
+	else
+		iomd_type = 'A', cycle = 562;
+
+	ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n",
+		t.active, t.recover, t.cycle, iomd_type);
+
+	state->port[ap->port_no].speed[adev->devno] = cycle;
+}
+
+static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+	struct scatterlist *sg, *rsg = state->sg;
+	unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
+
+	/*
+	 * We are simplex; BUG if we try to fiddle with DMA
+	 * while it's active.
+	 */
+	BUG_ON(dma_channel_active(state->dma));
+
+	/*
+	 * Copy ATAs scattered sg list into a contiguous array of sg
+	 */
+	ata_for_each_sg(sg, qc) {
+		memcpy(rsg, sg, sizeof(*sg));
+		rsg++;
+	}
+
+	/*
+	 * Route the DMA signals to the correct interface
+	 */
+	writeb(state->port[ap->port_no].port_sel, state->ioc_base);
+
+	set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
+	set_dma_sg(state->dma, state->sg, rsg - state->sg);
+	set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+
+	BUG_ON(dma_channel_active(state->dma));
+	enable_dma(state->dma);
+}
+
+static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct pata_icside_state *state = ap->host->private_data;
+
+	disable_dma(state->dma);
+
+	/* see ata_bmdma_stop */
+	ata_altstatus(ap);
+}
+
+static u8 pata_icside_bmdma_status(struct ata_port *ap)
+{
+	struct pata_icside_state *state = ap->host->private_data;
+	void __iomem *irq_port;
+
+	irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
+						    ICS_ARCIN_V6_INTRSTAT_1);
+
+	return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
+}
+
+static int icside_dma_init(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	struct expansion_card *ec = info->ec;
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		state->port[0].speed[i] = 480;
+		state->port[1].speed[i] = 480;
+	}
+
+	if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
+		state->dma = ec->dma;
+		info->mwdma_mask = 0x07;	/* MW0..2 */
+	}
+
+	return 0;
+}
+
+
+static int pata_icside_port_start(struct ata_port *ap)
+{
+	/* No PRD to alloc */
+	return ata_pad_alloc(ap, ap->dev);
+}
+
+static struct scsi_host_template pata_icside_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= PATA_ICSIDE_MAX_SG,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ~0, /* no dma boundaries */
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/* wish this was exported from libata-core */
+static void ata_dummy_noret(struct ata_port *port)
+{
+}
+
+/*
+ * We need to shut down unused ports to prevent spurious interrupts.
+ * FIXME: the libata core doesn't call this function for PATA interfaces.
+ */
+static void pata_icside_port_disable(struct ata_port *ap)
+{
+	struct pata_icside_state *state = ap->host->private_data;
+
+	ata_port_printk(ap, KERN_ERR, "disabling icside port\n");
+
+	ata_port_disable(ap);
+
+	state->port[ap->port_no].disabled = 1;
+
+	if (state->type == ICS_TYPE_V6) {
+		/*
+		 * Disable interrupts from this port, otherwise we
+		 * receive spurious interrupts from the floating
+		 * interrupt line.
+		 */
+		void __iomem *irq_port = state->irq_port +
+				(ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
+		readb(irq_port);
+	}
+}
+
+static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq)
+{
+	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
+	u8 status;
+
+	status = ata_busy_wait(ap, bits, 1000);
+	if (status & bits)
+		if (ata_msg_err(ap))
+			printk(KERN_ERR "abnormal status 0x%X\n", status);
+
+	if (ata_msg_intr(ap))
+		printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n",
+			__FUNCTION__, status);
+
+	return status;
+}
+
+static struct ata_port_operations pata_icside_port_ops = {
+	.port_disable		= pata_icside_port_disable,
+
+	.set_dmamode		= pata_icside_set_dmamode,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
+	.dev_select		= ata_std_dev_select,
+
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= pata_icside_bmdma_setup,
+	.bmdma_start		= pata_icside_bmdma_start,
+
+	.data_xfer		= ata_data_xfer_noirq,
+
+	/* no need to build any PRD tables for DMA */
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= pata_icside_bmdma_stop,
+
+	.irq_clear		= ata_dummy_noret,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= pata_icside_irq_ack,
+
+	.port_start		= pata_icside_port_start,
+
+	.bmdma_stop		= pata_icside_bmdma_stop,
+	.bmdma_status		= pata_icside_bmdma_status,
+};
+
+static void __devinit
+pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base,
+			 const struct portinfo *info)
+{
+	void __iomem *cmd = base + info->dataoffset;
+
+	ioaddr->cmd_addr	= cmd;
+	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << info->stepping);
+	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << info->stepping);
+	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << info->stepping);
+	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << info->stepping);
+	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << info->stepping);
+	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << info->stepping);
+	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << info->stepping);
+	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << info->stepping);
+	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << info->stepping);
+	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << info->stepping);
+
+	ioaddr->ctl_addr	= base + info->ctrloffset;
+	ioaddr->altstatus_addr	= ioaddr->ctl_addr;
+}
+
+static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	void __iomem *base;
+
+	base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
+	if (!base)
+		return -ENOMEM;
+
+	state->irq_port = base;
+
+	info->base = base;
+	info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
+	info->irqmask = 1;
+	info->irqops = &pata_icside_ops_arcin_v5;
+	info->nr_ports = 1;
+	info->port[0] = &pata_icside_portinfo_v5;
+
+	return 0;
+}
+
+static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
+{
+	struct pata_icside_state *state = info->state;
+	struct expansion_card *ec = info->ec;
+	void __iomem *ioc_base, *easi_base;
+	unsigned int sel = 0;
+
+	ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+	if (!ioc_base)
+		return -ENOMEM;
+
+	easi_base = ioc_base;
+
+	if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
+		easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
+		if (!easi_base)
+			return -ENOMEM;
+
+		/*
+		 * Enable access to the EASI region.
+		 */
+		sel = 1 << 5;
+	}
+
+	writeb(sel, ioc_base);
+
+	state->irq_port = easi_base;
+	state->ioc_base = ioc_base;
+	state->port[0].port_sel = sel;
+	state->port[1].port_sel = sel | 1;
+
+	/*
+	 * FIXME: work around libata's aversion to calling port_disable.
+	 * This permanently disables interrupts on port 0 - bad luck if
+	 * you have a drive on that port.
+	 */
+	state->port[0].disabled = 1;
+
+	info->base = easi_base;
+	info->irqops = &pata_icside_ops_arcin_v6;
+	info->nr_ports = 2;
+	info->port[0] = &pata_icside_portinfo_v6_1;
+	info->port[1] = &pata_icside_portinfo_v6_2;
+
+	return icside_dma_init(info);
+}
+
+static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
+{
+	struct expansion_card *ec = info->ec;
+	struct ata_host *host;
+	int i;
+
+	if (info->irqaddr) {
+		ec->irqaddr = info->irqaddr;
+		ec->irqmask = info->irqmask;
+	}
+	if (info->irqops)
+		ecard_setirq(ec, info->irqops, info->state);
+
+	/*
+	 * Be on the safe side - disable interrupts
+	 */
+	ec->ops->irqdisable(ec, ec->irq);
+
+	host = ata_host_alloc(&ec->dev, info->nr_ports);
+	if (!host)
+		return -ENOMEM;
+
+	host->private_data = info->state;
+	host->flags = ATA_HOST_SIMPLEX;
+
+	for (i = 0; i < info->nr_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ap->pio_mask = 0x1f;
+		ap->mwdma_mask = info->mwdma_mask;
+		ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
+		ap->ops = &pata_icside_port_ops;
+
+		pata_icside_setup_ioaddr(&ap->ioaddr, info->base, info->port[i]);
+	}
+
+	return ata_host_activate(host, ec->irq, ata_interrupt, 0,
+				 &pata_icside_sht);
+}
+
+static int __devinit
+pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+	struct pata_icside_state *state;
+	struct pata_icside_info info;
+	void __iomem *idmem;
+	int ret;
+
+	ret = ecard_request_resources(ec);
+	if (ret)
+		goto out;
+
+	state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
+	if (!state) {
+		ret = -ENOMEM;
+		goto release;
+	}
+
+	state->type = ICS_TYPE_NOTYPE;
+	state->dma = NO_DMA;
+
+	idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+	if (idmem) {
+		unsigned int type;
+
+		type = readb(idmem + ICS_IDENT_OFFSET) & 1;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
+		type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
+		ecardm_iounmap(ec, idmem);
+
+		state->type = type;
+	}
+
+	memset(&info, 0, sizeof(info));
+	info.state = state;
+	info.ec = ec;
+
+	switch (state->type) {
+	case ICS_TYPE_A3IN:
+		dev_warn(&ec->dev, "A3IN unsupported\n");
+		ret = -ENODEV;
+		break;
+
+	case ICS_TYPE_A3USER:
+		dev_warn(&ec->dev, "A3USER unsupported\n");
+		ret = -ENODEV;
+		break;
+
+	case ICS_TYPE_V5:
+		ret = pata_icside_register_v5(&info);
+		break;
+
+	case ICS_TYPE_V6:
+		ret = pata_icside_register_v6(&info);
+		break;
+
+	default:
+		dev_warn(&ec->dev, "unknown interface type\n");
+		ret = -ENODEV;
+		break;
+	}
+
+	if (ret == 0)
+		ret = pata_icside_add_ports(&info);
+
+	if (ret == 0)
+		goto out;
+
+ release:
+	ecard_release_resources(ec);
+ out:
+	return ret;
+}
+
+static void pata_icside_shutdown(struct expansion_card *ec)
+{
+	struct ata_host *host = ecard_get_drvdata(ec);
+	unsigned long flags;
+
+	/*
+	 * Disable interrupts from this card.  We need to do
+	 * this before disabling EASI since we may be accessing
+	 * this register via that region.
+	 */
+	local_irq_save(flags);
+	ec->ops->irqdisable(ec, ec->irq);
+	local_irq_restore(flags);
+
+	/*
+	 * Reset the ROM pointer so that we can read the ROM
+	 * after a soft reboot.  This also disables access to
+	 * the IDE taskfile via the EASI region.
+	 */
+	if (host) {
+		struct pata_icside_state *state = host->private_data;
+		if (state->ioc_base)
+			writeb(0, state->ioc_base);
+	}
+}
+
+static void __devexit pata_icside_remove(struct expansion_card *ec)
+{
+	struct ata_host *host = ecard_get_drvdata(ec);
+	struct pata_icside_state *state = host->private_data;
+
+	ata_host_detach(host);
+
+	pata_icside_shutdown(ec);
+
+	/*
+	 * don't NULL out the drvdata - devres/libata wants it
+	 * to free the ata_host structure.
+	 */
+	if (state->dma != NO_DMA)
+		free_dma(state->dma);
+
+	ecard_release_resources(ec);
+}
+
+static const struct ecard_id pata_icside_ids[] = {
+	{ MANU_ICS,  PROD_ICS_IDE  },
+	{ MANU_ICS2, PROD_ICS2_IDE },
+	{ 0xffff, 0xffff }
+};
+
+static struct ecard_driver pata_icside_driver = {
+	.probe		= pata_icside_probe,
+	.remove 	= __devexit_p(pata_icside_remove),
+	.shutdown	= pata_icside_shutdown,
+	.id_table	= pata_icside_ids,
+	.drv = {
+		.name	= DRV_NAME,
+	},
+};
+
+static int __init pata_icside_init(void)
+{
+	return ecard_register_driver(&pata_icside_driver);
+}
+
+static void __exit pata_icside_exit(void)
+{
+	ecard_remove_driver(&pata_icside_driver);
+}
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ICS PATA driver");
+
+module_init(pata_icside_init);
+module_exit(pata_icside_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_isapnp.c linux-2.6.18.x86_64.p4/drivers/ata/pata_isapnp.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_isapnp.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_isapnp.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,166 @@
+
+/*
+ *   pata-isapnp.c - ISA PnP PATA controller driver.
+ *   Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ *
+ *   Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_isapnp"
+#define DRV_VERSION "0.2.1"
+
+static struct scsi_host_template isapnp_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations isapnp_port_ops = {
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	isapnp_init_one		-	attach an isapnp interface
+ *	@idev: PnP device
+ *	@dev_id: matching detect line
+ *
+ *	Register an ISA bus IDE interface. Such interfaces are PIO 0 and
+ *	non shared IRQ.
+ */
+
+static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *cmd_addr, *ctl_addr;
+	int rc;
+
+	if (pnp_port_valid(idev, 0) == 0)
+		return -ENODEV;
+
+	/* FIXME: Should selected polled PIO here not fail */
+	if (pnp_irq_valid(idev, 0) == 0)
+		return -ENODEV;
+
+	/* allocate host */
+	host = ata_host_alloc(&idev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
+	if (!cmd_addr)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+
+	ap->ops = &isapnp_port_ops;
+	ap->pio_mask = 1;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.cmd_addr = cmd_addr;
+
+	if (pnp_port_valid(idev, 1) == 0) {
+		ctl_addr = devm_ioport_map(&idev->dev,
+					   pnp_port_start(idev, 1), 1);
+		ap->ioaddr.altstatus_addr = ctl_addr;
+		ap->ioaddr.ctl_addr = ctl_addr;
+	}
+
+	ata_std_ports(&ap->ioaddr);
+
+	/* activate */
+	return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0,
+				 &isapnp_sht);
+}
+
+/**
+ *	isapnp_remove_one	-	unplug an isapnp interface
+ *	@idev: PnP device
+ *
+ *	Remove a previously configured PnP ATA port. Called only on module
+ *	unload events as the core does not currently deal with ISAPnP docking.
+ */
+
+static void isapnp_remove_one(struct pnp_dev *idev)
+{
+	struct device *dev = &idev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_detach(host);
+}
+
+static struct pnp_device_id isapnp_devices[] = {
+  	/* Generic ESDI/IDE/ATA compatible hard disk controller */
+	{.id = "PNP0600", .driver_data = 0},
+	{.id = ""}
+};
+
+static struct pnp_driver isapnp_driver = {
+	.name		= DRV_NAME,
+	.id_table	= isapnp_devices,
+	.probe		= isapnp_init_one,
+	.remove		= isapnp_remove_one,
+};
+
+static int __init isapnp_init(void)
+{
+	return pnp_register_driver(&isapnp_driver);
+}
+
+static void __exit isapnp_exit(void)
+{
+	pnp_unregister_driver(&isapnp_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(isapnp_init);
+module_exit(isapnp_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_it8213.c linux-2.6.18.x86_64.p4/drivers/ata/pata_it8213.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_it8213.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_it8213.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,366 @@
+/*
+ *    pata_it8213.c - iTE Tech. Inc.  IT8213 PATA driver
+ *
+ *    The IT8213 is a very Intel ICH like device for timing purposes, having
+ *    a similar register layout and the same split clock arrangement. Cable
+ *    detection is different, and it does not have slave channels or all the
+ *    clutter of later ICH/SATA setups.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_it8213"
+#define DRV_VERSION	"0.0.3"
+
+/**
+ *	it8213_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Filter out ports by the enable bits before doing the normal reset
+ *	and probe.
+ */
+
+static int it8213_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits it8213_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+	};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	it8213_error_handler - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void it8213_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, it8213_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	it8213_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *
+ *	Perform cable detection for the 8213 ATA interface. This is
+ *	different to the PIIX arrangement
+ */
+
+static int it8213_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+	pci_read_config_byte(pdev, 0x42, &tmp);
+	if (tmp & 2)	/* The initial docs are incorrect */
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	it8213_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. The 8213 is a clone so very similar
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 2)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))	/* PIO 3/4 require IORDY */
+		control |= 2;	/* IORDY enable */
+	/* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */
+	if (adev->class != ATA_DEV_ATA)
+		control |= 4;
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	/* Enable PPE, IE and TIME as appropriate */
+
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCF0;
+		idetm_data |= control;
+		idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	} else {
+		u8 slave_data;
+
+		idetm_data &= 0xCC0F;
+		idetm_data |= (control << 4);
+
+		/* Slave timing in seperate register */
+		pci_read_config_byte(dev, 0x44, &slave_data);
+		slave_data &= 0xF0;
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << 4;
+		pci_write_config_byte(dev, 0x44, slave_data);
+	}
+
+	idetm_data |= 0x4000;	/* Ensure SITRE is enabled */
+	pci_write_config_word(dev, idetm_port, idetm_data);
+}
+
+/**
+ *	it8213_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	This device is basically an ICH alike.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 master_data;
+	u8 speed		= adev->dma_mode;
+	int devid		= adev->devno;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(dev, 0x40, &master_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (speed >= XFER_UDMA_0) {
+		unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+		u16 udma_timing;
+		u16 ideconf;
+		int u_clock, u_speed;
+
+		/* Clocks follow the PIIX style */
+		u_speed = min(2 - (udma & 1), udma);
+		if (udma == 5)
+			u_clock = 0x1000;	/* 100Mhz */
+		else if (udma > 2)
+			u_clock = 1;		/* 66Mhz */
+		else
+			u_clock = 0;		/* 33Mhz */
+
+		udma_enable |= (1 << devid);
+
+		/* Load the UDMA mode number */
+		pci_read_config_word(dev, 0x4A, &udma_timing);
+		udma_timing &= ~(3 << (4 * devid));
+		udma_timing |= (udma & 3) << (4 * devid);
+		pci_write_config_word(dev, 0x4A, udma_timing);
+
+		/* Load the clock selection */
+		pci_read_config_word(dev, 0x54, &ideconf);
+		ideconf &= ~(0x1001 << devid);
+		ideconf |= u_clock << devid;
+		pci_write_config_word(dev, 0x54, ideconf);
+	} else {
+		/*
+		 * MWDMA is driven by the PIO timings. We must also enable
+		 * IORDY unconditionally along with TIME1. PPE has already
+		 * been set when the PIO timing was set.
+		 */
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		unsigned int control;
+		u8 slave_data;
+		static const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+		control = 3;	/* IORDY|TIME1 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO into PIO0 */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			/* Enable DMA timing only */
+			control |= 8;	/* PIO cycles in PIO0 */
+
+		if (devid) {	/* Slave */
+			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
+			master_data |= control << 4;
+			pci_read_config_byte(dev, 0x44, &slave_data);
+			slave_data &= (0x0F + 0xE1 * ap->port_no);
+			/* Load the matching timing */
+			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+			pci_write_config_byte(dev, 0x44, slave_data);
+		} else { 	/* Master */
+			master_data &= 0xCCF4;	/* Mask out IORDY|TIME1|DMAONLY
+						   and master timing bits */
+			master_data |= control;
+			master_data |=
+				(timings[pio][0] << 12) |
+				(timings[pio][1] << 8);
+		}
+		udma_enable &= ~(1 << devid);
+		pci_write_config_word(dev, 0x40, master_data);
+	}
+	pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct scsi_host_template it8213_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.max_sectors		= ATA_MAX_SECTORS,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations it8213_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= it8213_set_piomode,
+	.set_dmamode		= it8213_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= it8213_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= it8213_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	it8213_init_one - Register 8213 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in it8213_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &it8213_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= 0x1f, /* UDMA 100 */
+		.port_ops	= &it8213_ops,
+	};
+	/* Current IT8213 stuff is single port */
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id it8213_pci_tbl[] = {
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver it8213_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= it8213_pci_tbl,
+	.probe			= it8213_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init it8213_init(void)
+{
+	return pci_register_driver(&it8213_pci_driver);
+}
+
+static void __exit it8213_exit(void)
+{
+	pci_unregister_driver(&it8213_pci_driver);
+}
+
+module_init(it8213_init);
+module_exit(it8213_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_it821x.c linux-2.6.18.x86_64.p4/drivers/ata/pata_it821x.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_it821x.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_it821x.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,805 @@
+/*
+ * pata_it821x.c 	- IT821x PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon
+ *
+ * it821x.c
+ *
+ * linux/drivers/ide/pci/it821x.c		Version 0.09	December 2004
+ *
+ * Copyright (C) 2004		Red Hat <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *  Based in part on the ITE vendor provided SCSI driver.
+ *
+ *  Documentation available from
+ * 	http://www.ite.com.tw/pc/IT8212F_V04.pdf
+ *  Some other documents are NDA.
+ *
+ *  The ITE8212 isn't exactly a standard IDE controller. It has two
+ *  modes. In pass through mode then it is an IDE controller. In its smart
+ *  mode its actually quite a capable hardware raid controller disguised
+ *  as an IDE controller. Smart mode only understands DMA read/write and
+ *  identify, none of the fancier commands apply. The IT8211 is identical
+ *  in other respects but lacks the raid mode.
+ *
+ *  Errata:
+ *  o	Rev 0x10 also requires master/slave hold the same DMA timings and
+ *	cannot do ATAPI MWDMA.
+ *  o	The identify data for raid volumes lacks CHS info (technically ok)
+ *	but also fails to set the LBA28 and other bits. We fix these in
+ *	the IDE probe quirk code.
+ *  o	If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
+ *	raid then the controller firmware dies
+ *  o	Smart mode without RAID doesn't clear all the necessary identify
+ *	bits to reduce the command set to the one used
+ *
+ *  This has a few impacts on the driver
+ *  - In pass through mode we do all the work you would expect
+ *  - In smart mode the clocking set up is done by the controller generally
+ *    but we must watch the other limits and filter.
+ *  - There are a few extra vendor commands that actually talk to the
+ *    controller but only work PIO with no IRQ.
+ *
+ *  Vendor areas of the identify block in smart mode are used for the
+ *  timing and policy set up. Each HDD in raid mode also has a serial
+ *  block on the disk. The hardware extra commands are get/set chip status,
+ *  rebuild, get rebuild status.
+ *
+ *  In Linux the driver supports pass through mode as if the device was
+ *  just another IDE controller. If the smart mode is running then
+ *  volumes are managed by the controller firmware and each IDE "disk"
+ *  is a raid volume. Even more cute - the controller can do automated
+ *  hotplug and rebuild.
+ *
+ *  The pass through controller itself is a little demented. It has a
+ *  flaw that it has a single set of PIO/MWDMA timings per channel so
+ *  non UDMA devices restrict each others performance. It also has a
+ *  single clock source per channel so mixed UDMA100/133 performance
+ *  isn't perfect and we have to pick a clock. Thankfully none of this
+ *  matters in smart mode. ATAPI DMA is not currently supported.
+ *
+ *  It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
+ *
+ *  TODO
+ *	-	ATAPI and other speed filtering
+ *	-	RAID configuration ioctls
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+
+#define DRV_NAME "pata_it821x"
+#define DRV_VERSION "0.3.6"
+
+struct it821x_dev
+{
+	unsigned int smart:1,		/* Are we in smart raid mode */
+		timing10:1;		/* Rev 0x10 */
+	u8	clock_mode;		/* 0, ATA_50 or ATA_66 */
+	u8	want[2][2];		/* Mode/Pri log for master slave */
+	/* We need these for switching the clock when DMA goes on/off
+	   The high byte is the 66Mhz timing */
+	u16	pio[2];			/* Cached PIO values */
+	u16	mwdma[2];		/* Cached MWDMA values */
+	u16	udma[2];		/* Cached UDMA values (per drive) */
+	u16	last_device;		/* Master or slave loaded ? */
+};
+
+#define ATA_66		0
+#define ATA_50		1
+#define ATA_ANY		2
+
+#define UDMA_OFF	0
+#define MWDMA_OFF	0
+
+/*
+ *	We allow users to force the card into non raid mode without
+ *	flashing the alternative BIOS. This is also neccessary right now
+ *	for embedded platforms that cannot run a PC BIOS but are using this
+ *	device.
+ */
+
+static int it8212_noraid;
+
+/**
+ *	it821x_program	-	program the PIO/MWDMA registers
+ *	@ap: ATA port
+ *	@adev: Device to program
+ *	@timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
+ *
+ *	Program the PIO/MWDMA timing for this channel according to the
+ *	current clock. These share the same register so are managed by
+ *	the DMA start/stop sequence as with the old driver.
+ */
+
+static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	int channel = ap->port_no;
+	u8 conf;
+
+	/* Program PIO/MWDMA timing bits */
+	if (itdev->clock_mode == ATA_66)
+		conf = timing >> 8;
+	else
+		conf = timing & 0xFF;
+	pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
+}
+
+
+/**
+ *	it821x_program_udma	-	program the UDMA registers
+ *	@ap: ATA port
+ *	@adev: ATA device to update
+ *	@timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
+ *
+ *	Program the UDMA timing for this drive according to the
+ *	current clock. Handles the dual clocks and also knows about
+ *	the errata on the 0x10 revision. The UDMA errata is partly handled
+ *	here and partly in start_dma.
+ */
+
+static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+	struct it821x_dev *itdev = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int channel = ap->port_no;
+	int unit = adev->devno;
+	u8 conf;
+
+	/* Program UDMA timing bits */
+	if (itdev->clock_mode == ATA_66)
+		conf = timing >> 8;
+	else
+		conf = timing & 0xFF;
+	if (itdev->timing10 == 0)
+		pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
+	else {
+		/* Early revision must be programmed for both together */
+		pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
+		pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
+	}
+}
+
+/**
+ *	it821x_clock_strategy
+ *	@ap: ATA interface
+ *	@adev: ATA device being updated
+ *
+ *	Select between the 50 and 66Mhz base clocks to get the best
+ *	results for this interface.
+ */
+
+static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	u8 unit = adev->devno;
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	int clock, altclock;
+	u8 v;
+	int sel = 0;
+
+	/* Look for the most wanted clocking */
+	if (itdev->want[0][0] > itdev->want[1][0]) {
+		clock = itdev->want[0][1];
+		altclock = itdev->want[1][1];
+	} else {
+		clock = itdev->want[1][1];
+		altclock = itdev->want[0][1];
+	}
+
+	/* Master doesn't care does the slave ? */
+	if (clock == ATA_ANY)
+		clock = altclock;
+
+	/* Nobody cares - keep the same clock */
+	if (clock == ATA_ANY)
+		return;
+	/* No change */
+	if (clock == itdev->clock_mode)
+		return;
+
+	/* Load this into the controller */
+	if (clock == ATA_66)
+		itdev->clock_mode = ATA_66;
+	else {
+		itdev->clock_mode = ATA_50;
+		sel = 1;
+	}
+	pci_read_config_byte(pdev, 0x50, &v);
+	v &= ~(1 << (1 + ap->port_no));
+	v |= sel << (1 + ap->port_no);
+	pci_write_config_byte(pdev, 0x50, v);
+
+	/*
+	 *	Reprogram the UDMA/PIO of the pair drive for the switch
+	 *	MWDMA will be dealt with by the dma switcher
+	 */
+	if (pair && itdev->udma[1-unit] != UDMA_OFF) {
+		it821x_program_udma(ap, pair, itdev->udma[1-unit]);
+		it821x_program(ap, pair, itdev->pio[1-unit]);
+	}
+	/*
+	 *	Reprogram the UDMA/PIO of our drive for the switch.
+	 *	MWDMA will be dealt with by the dma switcher
+	 */
+	if (itdev->udma[unit] != UDMA_OFF) {
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+		it821x_program(ap, adev, itdev->pio[unit]);
+	}
+}
+
+/**
+ *	it821x_passthru_set_piomode	-	set PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Configure for PIO mode. This is complicated as the register is
+ *	shared by PIO and MWDMA and for both channels.
+ */
+
+static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	/* Spec says 89 ref driver uses 88 */
+	static const u16 pio[]	= { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
+	static const u8 pio_want[]    = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
+
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+	int mode_wanted = adev->pio_mode - XFER_PIO_0;
+
+	/* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
+	itdev->want[unit][1] = pio_want[mode_wanted];
+	itdev->want[unit][0] = 1;	/* PIO is lowest priority */
+	itdev->pio[unit] = pio[mode_wanted];
+	it821x_clock_strategy(ap, adev);
+	it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+/**
+ *	it821x_passthru_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Set up the DMA modes. The actions taken depend heavily on the mode
+ *	to use. If UDMA is used as is hopefully the usual case then the
+ *	timing register is private and we need only consider the clock. If
+ *	we are using MWDMA then we have to manage the setting ourself as
+ *	we switch devices and mode.
+ */
+
+static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u16 dma[]	= 	{ 0x8866, 0x3222, 0x3121 };
+	static const u8 mwdma_want[] =  { ATA_ANY, ATA_66, ATA_ANY };
+	static const u16 udma[]	= 	{ 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
+	static const u8 udma_want[] =   { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev = ap->private_data;
+	int channel = ap->port_no;
+	int unit = adev->devno;
+	u8 conf;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int mode_wanted = adev->dma_mode - XFER_UDMA_0;
+
+		itdev->want[unit][1] = udma_want[mode_wanted];
+		itdev->want[unit][0] = 3;	/* UDMA is high priority */
+		itdev->mwdma[unit] = MWDMA_OFF;
+		itdev->udma[unit] = udma[mode_wanted];
+		if (mode_wanted >= 5)
+			itdev->udma[unit] |= 0x8080;	/* UDMA 5/6 select on */
+
+		/* UDMA on. Again revision 0x10 must do the pair */
+		pci_read_config_byte(pdev, 0x50, &conf);
+		if (itdev->timing10)
+			conf &= channel ? 0x9F: 0xE7;
+		else
+			conf &= ~ (1 << (3 + 2 * channel + unit));
+		pci_write_config_byte(pdev, 0x50, conf);
+		it821x_clock_strategy(ap, adev);
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+	} else {
+		int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
+
+		itdev->want[unit][1] = mwdma_want[mode_wanted];
+		itdev->want[unit][0] = 2;	/* MWDMA is low priority */
+		itdev->mwdma[unit] = dma[mode_wanted];
+		itdev->udma[unit] = UDMA_OFF;
+
+		/* UDMA bits off - Revision 0x10 do them in pairs */
+		pci_read_config_byte(pdev, 0x50, &conf);
+		if (itdev->timing10)
+			conf |= channel ? 0x60: 0x18;
+		else
+			conf |= 1 << (3 + 2 * channel + unit);
+		pci_write_config_byte(pdev, 0x50, conf);
+		it821x_clock_strategy(ap, adev);
+	}
+}
+
+/**
+ *	it821x_passthru_dma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	Usually drivers set the DMA timing at the point the set_dmamode call
+ *	is made. IT821x however requires we load new timings on the
+ *	transitions in some cases.
+ */
+
+static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+
+	if (itdev->mwdma[unit] != MWDMA_OFF)
+		it821x_program(ap, adev, itdev->mwdma[unit]);
+	else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
+		it821x_program_udma(ap, adev, itdev->udma[unit]);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	it821x_passthru_dma_stop	-	DMA stop callback
+ *	@qc: ATA command
+ *
+ *	We loaded new timings in dma_start, as a result we need to restore
+ *	the PIO timings in dma_stop so that the next command issue gets the
+ *	right clock values.
+ */
+
+static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct it821x_dev *itdev = ap->private_data;
+	int unit = adev->devno;
+
+	ata_bmdma_stop(qc);
+	if (itdev->mwdma[unit] != MWDMA_OFF)
+		it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+
+/**
+ *	it821x_passthru_dev_select	-	Select master/slave
+ *	@ap: ATA port
+ *	@device: Device number (not pointer)
+ *
+ *	Device selection hook. If neccessary perform clock switching
+ */
+
+static void it821x_passthru_dev_select(struct ata_port *ap,
+				       unsigned int device)
+{
+	struct it821x_dev *itdev = ap->private_data;
+	if (itdev && device != itdev->last_device) {
+		struct ata_device *adev = &ap->device[device];
+		it821x_program(ap, adev, itdev->pio[adev->devno]);
+		itdev->last_device = device;
+	}
+	ata_std_dev_select(ap, device);
+}
+
+/**
+ *	it821x_smart_qc_issue_prot	-	wrap qc issue prot
+ *	@qc: command
+ *
+ *	Wrap the command issue sequence for the IT821x. We need to
+ *	perform out own device selection timing loads before the
+ *	usual happenings kick off
+ */
+
+static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	switch(qc->tf.command)
+	{
+		/* Commands the firmware supports */
+		case ATA_CMD_READ:
+		case ATA_CMD_READ_EXT:
+		case ATA_CMD_WRITE:
+		case ATA_CMD_WRITE_EXT:
+		case ATA_CMD_PIO_READ:
+		case ATA_CMD_PIO_READ_EXT:
+		case ATA_CMD_PIO_WRITE:
+		case ATA_CMD_PIO_WRITE_EXT:
+		case ATA_CMD_READ_MULTI:
+		case ATA_CMD_READ_MULTI_EXT:
+		case ATA_CMD_WRITE_MULTI:
+		case ATA_CMD_WRITE_MULTI_EXT:
+		case ATA_CMD_ID_ATA:
+		/* Arguably should just no-op this one */
+		case ATA_CMD_SET_FEATURES:
+			return ata_qc_issue_prot(qc);
+	}
+	printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+	return AC_ERR_INVALID;
+}
+
+/**
+ *	it821x_passthru_qc_issue_prot	-	wrap qc issue prot
+ *	@qc: command
+ *
+ *	Wrap the command issue sequence for the IT821x. We need to
+ *	perform out own device selection timing loads before the
+ *	usual happenings kick off
+ */
+
+static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	it821x_passthru_dev_select(qc->ap, qc->dev->devno);
+	return ata_qc_issue_prot(qc);
+}
+
+/**
+ *	it821x_smart_set_mode	-	mode setting
+ *	@ap: interface to set up
+ *	@unused: device that failed (error only)
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *	The BIOS configured everything. Our job is not to fiddle. We
+ *	read the dma enabled bits from the PCI configuration of the device
+ *	and respect them.
+ */
+
+static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused)
+{
+	int dma_enabled = 0;
+	int i;
+
+	/* Bits 5 and 6 indicate if DMA is active on master/slave */
+	/* It is possible that BMDMA isn't allocated */
+	if (ap->ioaddr.bmdma_addr)
+		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			/* We don't really care */
+			dev->pio_mode = XFER_PIO_0;
+			dev->dma_mode = XFER_MW_DMA_0;
+			/* We do need the right mode information for DMA or PIO
+			   and this comes from the current configuration flags */
+			if (dma_enabled & (1 << (5 + i))) {
+				ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+				dev->xfer_mode = XFER_MW_DMA_0;
+				dev->xfer_shift = ATA_SHIFT_MWDMA;
+				dev->flags &= ~ATA_DFLAG_PIO;
+			} else {
+				ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+				dev->xfer_mode = XFER_PIO_0;
+				dev->xfer_shift = ATA_SHIFT_PIO;
+				dev->flags |= ATA_DFLAG_PIO;
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ *	it821x_dev_config	-	Called each device identify
+ *	@adev: Device that has just been identified
+ *
+ *	Perform the initial setup needed for each device that is chip
+ *	special. In our case we need to lock the sector count to avoid
+ *	blowing the brains out of the firmware with large LBA48 requests
+ *
+ *	FIXME: When FUA appears we need to block FUA too. And SMART and
+ *	basically we need to filter commands for this chip.
+ */
+
+static void it821x_dev_config(struct ata_device *adev)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	if (adev->max_sectors > 255)
+		adev->max_sectors = 255;
+
+	if (strstr(model_num, "Integrated Technology Express")) {
+		/* RAID mode */
+		printk(KERN_INFO "IT821x %sRAID%d volume",
+			adev->id[147]?"Bootable ":"",
+			adev->id[129]);
+		if (adev->id[129] != 1)
+			printk("(%dK stripe)", adev->id[146]);
+		printk(".\n");
+	}
+}
+
+
+/**
+ *	it821x_check_atapi_dma	-	ATAPI DMA handler
+ *	@qc: Command we are about to issue
+ *
+ *	Decide if this ATAPI command can be issued by DMA on this
+ *	controller. Return 0 if it can be.
+ */
+
+static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct it821x_dev *itdev = ap->private_data;
+
+	/* No ATAPI DMA in smart mode */
+	if (itdev->smart)
+		return -EOPNOTSUPP;
+	/* No ATAPI DMA on rev 10 */
+	if (itdev->timing10)
+		return -EOPNOTSUPP;
+	/* Cool */
+	return 0;
+}
+
+
+/**
+ *	it821x_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	The it821x needs to maintain private data structures and also to
+ *	use the standard PCI interface which lacks support for this
+ *	functionality. We instead set up the private data on the port
+ *	start hook, and tear it down on port stop
+ */
+
+static int it821x_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct it821x_dev *itdev;
+	u8 conf;
+
+	int ret = ata_port_start(ap);
+	if (ret < 0)
+		return ret;
+
+	itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL);
+	if (itdev == NULL)
+		return -ENOMEM;
+	ap->private_data = itdev;
+
+	pci_read_config_byte(pdev, 0x50, &conf);
+
+	if (conf & 1) {
+		itdev->smart = 1;
+		/* Long I/O's although allowed in LBA48 space cause the
+		   onboard firmware to enter the twighlight zone */
+		/* No ATAPI DMA in this mode either */
+	}
+	/* Pull the current clocks from 0x50 */
+	if (conf & (1 << (1 + ap->port_no)))
+		itdev->clock_mode = ATA_50;
+	else
+		itdev->clock_mode = ATA_66;
+
+	itdev->want[0][1] = ATA_ANY;
+	itdev->want[1][1] = ATA_ANY;
+	itdev->last_device = -1;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
+	if (conf == 0x10) {
+		itdev->timing10 = 1;
+		/* Need to disable ATAPI DMA for this case */
+		if (!itdev->smart)
+			printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
+	}
+
+	return 0;
+}
+
+static struct scsi_host_template it821x_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations it821x_smart_port_ops = {
+	.set_mode	= it821x_smart_set_mode,
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.mode_filter	= ata_pci_default_filter,
+
+	.check_status 	= ata_check_status,
+	.check_atapi_dma= it821x_check_atapi_dma,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+	.dev_config	= it821x_dev_config,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_unknown,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= it821x_smart_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= it821x_port_start,
+};
+
+static struct ata_port_operations it821x_passthru_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= it821x_passthru_set_piomode,
+	.set_dmamode	= it821x_passthru_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.check_atapi_dma= it821x_check_atapi_dma,
+	.dev_select 	= it821x_passthru_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_unknown,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= it821x_passthru_bmdma_start,
+	.bmdma_stop	= it821x_passthru_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= it821x_passthru_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_handler	= ata_interrupt,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= it821x_port_start,
+};
+
+static void __devinit it821x_disable_raid(struct pci_dev *pdev)
+{
+	/* Reset local CPU, and set BIOS not ready */
+	pci_write_config_byte(pdev, 0x5E, 0x01);
+
+	/* Set to bypass mode, and reset PCI bus */
+	pci_write_config_byte(pdev, 0x50, 0x00);
+	pci_write_config_word(pdev, PCI_COMMAND,
+			      PCI_COMMAND_PARITY | PCI_COMMAND_IO |
+			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+	pci_write_config_word(pdev, 0x40, 0xA0F3);
+
+	pci_write_config_dword(pdev,0x4C, 0x02040204);
+	pci_write_config_byte(pdev, 0x42, 0x36);
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
+}
+
+
+static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	u8 conf;
+
+	static const struct ata_port_info info_smart = {
+		.sht = &it821x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &it821x_smart_port_ops
+	};
+	static const struct ata_port_info info_passthru = {
+		.sht = &it821x_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &it821x_passthru_port_ops
+	};
+
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	static char *mode[2] = { "pass through", "smart" };
+
+	/* Force the card into bypass mode if so requested */
+	if (it8212_noraid) {
+		printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
+		it821x_disable_raid(pdev);
+	}
+	pci_read_config_byte(pdev, 0x50, &conf);
+	conf &= 1;
+
+	printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]);
+	if (conf == 0)
+		ppi[0] = &info_passthru;
+	else
+		ppi[0] = &info_smart;
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int it821x_reinit_one(struct pci_dev *pdev)
+{
+	/* Resume - turn raid back off if need be */
+	if (it8212_noraid)
+		it821x_disable_raid(pdev);
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id it821x[] = {
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
+	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
+
+	{ },
+};
+
+static struct pci_driver it821x_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= it821x,
+	.probe 		= it821x_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= it821x_reinit_one,
+#endif
+};
+
+static int __init it821x_init(void)
+{
+	return pci_register_driver(&it821x_pci_driver);
+}
+
+static void __exit it821x_exit(void)
+{
+	pci_unregister_driver(&it821x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it821x);
+MODULE_VERSION(DRV_VERSION);
+
+
+module_param_named(noraid, it8212_noraid, int, S_IRUGO);
+MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
+
+module_init(it821x_init);
+module_exit(it821x_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_ixp4xx_cf.c linux-2.6.18.x86_64.p4/drivers/ata/pata_ixp4xx_cf.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_ixp4xx_cf.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_ixp4xx_cf.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,260 @@
+/*
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (c) 2006 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * An ATA driver to handle a Compact Flash connected
+ * to the ixp4xx expansion bus in TrueIDE mode. The CF
+ * must have it chip selects connected to two CS lines
+ * on the ixp4xx. The interrupt line is optional, if not
+ * specified the driver will run in polling mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME	"pata_ixp4xx_cf"
+#define DRV_VERSION	"0.1.3"
+
+static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
+			dev->pio_mode = XFER_PIO_0;
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+	return 0;
+}
+
+static void ixp4xx_phy_reset(struct ata_port *ap)
+{
+	ap->cbl = ATA_CBL_PATA40;
+	ata_port_probe(ap);
+	ata_bus_reset(ap);
+}
+
+static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+				unsigned int buflen, int write_data)
+{
+	unsigned int i;
+	unsigned int words = buflen >> 1;
+	u16 *buf16 = (u16 *) buf;
+	struct ata_port *ap = adev->ap;
+	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+	struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+
+	/* set the expansion bus in 16bit mode and restore
+	 * 8 bit mode after the transaction.
+	 */
+	*data->cs0_cfg &= ~(0x01);
+	udelay(100);
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		for (i = 0; i < words; i++)
+			writew(buf16[i], mmio);
+	} else {
+		for (i = 0; i < words; i++)
+			buf16[i] = readw(mmio);
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			writew(align_buf[0], mmio);
+		} else {
+			align_buf[0] = readw(mmio);
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+
+	udelay(100);
+	*data->cs0_cfg |= 0x01;
+}
+
+static void ixp4xx_irq_clear(struct ata_port *ap)
+{
+}
+
+static struct scsi_host_template ixp4xx_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations ixp4xx_port_ops = {
+	.set_mode	= ixp4xx_set_mode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+	.eng_timeout	= ata_eng_timeout,
+	.data_xfer	= ixp4xx_mmio_data_xfer,
+	.cable_detect	= ata_cable_40wire,
+
+	.irq_clear	= ixp4xx_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+
+	.phy_reset	= ixp4xx_phy_reset,
+};
+
+static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
+				struct ixp4xx_pata_data *data)
+{
+	ioaddr->cmd_addr	= data->cs0;
+	ioaddr->altstatus_addr	= data->cs1 + 0x06;
+	ioaddr->ctl_addr	= data->cs1 + 0x06;
+
+	ata_std_ports(ioaddr);
+
+#ifndef __ARMEB__
+
+	/* adjust the addresses to handle the address swizzling of the
+	 * ixp4xx in little endian mode.
+	 */
+
+	*(unsigned long *)&ioaddr->data_addr		^= 0x02;
+	*(unsigned long *)&ioaddr->cmd_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->altstatus_addr	^= 0x03;
+	*(unsigned long *)&ioaddr->ctl_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->error_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->feature_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->nsect_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbal_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbam_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->lbah_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->device_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->status_addr		^= 0x03;
+	*(unsigned long *)&ioaddr->command_addr		^= 0x03;
+#endif
+}
+
+static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
+{
+	unsigned int irq;
+	struct resource *cs0, *cs1;
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct ixp4xx_pata_data *data = pdev->dev.platform_data;
+	int rc;
+
+	cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+	if (!cs0 || !cs1)
+		return -EINVAL;
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
+
+	data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
+	data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq)
+		set_irq_type(irq, IRQT_RISING);
+
+	/* Setup expansion bus chip selects */
+	*data->cs0_cfg = data->cs0_bits;
+	*data->cs1_cfg = data->cs1_bits;
+
+	ap = host->ports[0];
+
+	ap->ops	= &ixp4xx_port_ops;
+	ap->pio_mask = 0x1f; /* PIO4 */
+	ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
+
+	/* run in polling mode if no irq has been assigned */
+	if (!irq)
+		ap->flags |= ATA_FLAG_PIO_POLLING;
+
+	ixp4xx_setup_port(&ap->ioaddr, data);
+
+	dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+	/* activate host */
+	return ata_host_activate(host, irq, ata_interrupt, 0, &ixp4xx_sht);
+}
+
+static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
+{
+	struct ata_host *host = platform_get_drvdata(dev);
+
+	ata_host_detach(host);
+
+	return 0;
+}
+
+static struct platform_driver ixp4xx_pata_platform_driver = {
+	.driver	 = {
+		.name   = DRV_NAME,
+		.owner  = THIS_MODULE,
+	},
+	.probe		= ixp4xx_pata_probe,
+	.remove		= __devexit_p(ixp4xx_pata_remove),
+};
+
+static int __init ixp4xx_pata_init(void)
+{
+	return platform_driver_register(&ixp4xx_pata_platform_driver);
+}
+
+static void __exit ixp4xx_pata_exit(void)
+{
+	platform_driver_unregister(&ixp4xx_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ixp4xx_pata_init);
+module_exit(ixp4xx_pata_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_jmicron.c linux-2.6.18.x86_64.p4/drivers/ata/pata_jmicron.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_jmicron.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_jmicron.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,253 @@
+/*
+ *    pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
+ *			PATA port of the controller. The SATA ports are
+ *			driven by AHCI in the usual configuration although
+ *			this driver can handle other setups if we need it.
+ *
+ *	(c) 2006 Red Hat  <alan@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_jmicron"
+#define DRV_VERSION	"0.1.5"
+
+typedef enum {
+	PORT_PATA0 = 0,
+	PORT_PATA1 = 1,
+	PORT_SATA = 2,
+} port_type;
+
+/**
+ *	jmicron_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ *
+ *	On the Jmicron 361/363 there is a single PATA port that can be mapped
+ *	either as primary or secondary (or neither). We don't do any policy
+ *	and setup here. We assume that has been done by init_one and the
+ *	BIOS.
+ */
+
+static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 control;
+	u32 control5;
+	int port_mask = 1<< (4 * ap->port_no);
+	int port = ap->port_no;
+	port_type port_map[2];
+
+	/* Check if our port is enabled */
+	pci_read_config_dword(pdev, 0x40, &control);
+	if ((control & port_mask) == 0)
+		return -ENOENT;
+
+	/* There are two basic mappings. One has the two SATA ports merged
+	   as master/slave and the secondary as PATA, the other has only the
+	   SATA port mapped */
+	if (control & (1 << 23)) {
+		port_map[0] = PORT_SATA;
+		port_map[1] = PORT_PATA0;
+	} else {
+		port_map[0] = PORT_SATA;
+		port_map[1] = PORT_SATA;
+	}
+
+	/* The 365/366 may have this bit set to map the second PATA port
+	   as the internal primary channel */
+	pci_read_config_dword(pdev, 0x80, &control5);
+	if (control5 & (1<<24))
+		port_map[0] = PORT_PATA1;
+
+	/* The two ports may then be logically swapped by the firmware */
+	if (control & (1 << 22))
+		port = port ^ 1;
+
+	/*
+	 *	Now we know which physical port we are talking about we can
+	 *	actually do our cable checking etc. Thankfully we don't need
+	 *	to do the plumbing for other cases.
+	 */
+	switch (port_map[port])
+	{
+	case PORT_PATA0:
+		if (control & (1 << 5))
+			return 0;
+		if (control & (1 << 3))	/* 40/80 pin primary */
+			ap->cbl = ATA_CBL_PATA40;
+		else
+			ap->cbl = ATA_CBL_PATA80;
+		break;
+	case PORT_PATA1:
+		/* Bit 21 is set if the port is enabled */
+		if ((control5 & (1 << 21)) == 0)
+			return 0;
+		if (control5 & (1 << 19))	/* 40/80 pin secondary */
+			ap->cbl = ATA_CBL_PATA40;
+		else
+			ap->cbl = ATA_CBL_PATA80;
+		break;
+	case PORT_SATA:
+		ap->cbl = ATA_CBL_SATA;
+		break;
+	}
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	jmicron_error_handler - Setup and error handler
+ *	@ap: Port to handle
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void jmicron_error_handler(struct ata_port *ap)
+{
+	return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template jmicron_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	/* Use standard CHS mapping rules */
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations jmicron_ops = {
+	.port_disable		= ata_port_disable,
+
+	/* Task file is PCI ATA format, use helpers */
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= jmicron_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	/* BMDMA handling is PCI ATA format, use helpers */
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	/* IRQ-related hooks */
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	/* Generic PATA PCI ATA helpers */
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	jmicron_init_one - Register Jmicron ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in jmicron_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht		= &jmicron_sht,
+		.flags	= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x3f,
+
+		.port_ops	= &jmicron_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id jmicron_pci_tbl[] = {
+	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361,
+	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 },
+	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363,
+	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 },
+	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365,
+	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 },
+	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366,
+	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 },
+	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368,
+	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver jmicron_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= jmicron_pci_tbl,
+	.probe			= jmicron_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init jmicron_init(void)
+{
+	return pci_register_driver(&jmicron_pci_driver);
+}
+
+static void __exit jmicron_exit(void)
+{
+	pci_unregister_driver(&jmicron_pci_driver);
+}
+
+module_init(jmicron_init);
+module_exit(jmicron_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_legacy.c linux-2.6.18.x86_64.p4/drivers/ata/pata_legacy.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_legacy.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_legacy.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,983 @@
+/*
+ *   pata-legacy.c - Legacy port PATA/SATA controller driver.
+ *   Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *   An ATA driver for the legacy ATA ports.
+ *
+ *   Data Sources:
+ *	Opti 82C465/82C611 support: Data sheets at opti-inc.com
+ *	HT6560 series:
+ *	Promise 20230/20620:
+ *		http://www.ryston.cz/petr/vlb/pdc20230b.html
+ *		http://www.ryston.cz/petr/vlb/pdc20230c.html
+ *		http://www.ryston.cz/petr/vlb/pdc20630.html
+ *
+ *  Unsupported but docs exist:
+ *	Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
+ *	Winbond W83759A
+ *
+ *  This driver handles legacy (that is "ISA/VLB side") IDE ports found
+ *  on PC class systems. There are three hybrid devices that are exceptions
+ *  The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
+ *  the MPIIX where the tuning is PCI side but the IDE is "ISA side".
+ *
+ *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
+ *  opti82c465mv/promise 20230c/20630
+ *
+ *  Use the autospeed and pio_mask options with:
+ *	Appian ADI/2 aka CLPD7220 or AIC25VL01.
+ *  Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
+ *	Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
+ *	Winbond W83759A, Promise PDC20230-B
+ *
+ *  For now use autospeed and pio_mask as above with the W83759A. This may
+ *  change.
+ *
+ *  TODO
+ *	Merge existing pata_qdi driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_legacy"
+#define DRV_VERSION "0.5.5"
+
+#define NR_HOST 6
+
+static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+static int legacy_irq[NR_HOST] = { 14, 15, 11, 10, 8, 12 };
+
+struct legacy_data {
+	unsigned long timing;
+	u8 clock[2];
+	u8 last;
+	int fast;
+	struct platform_device *platform_dev;
+
+};
+
+static struct legacy_data legacy_data[NR_HOST];
+static struct ata_host *legacy_host[NR_HOST];
+static int nr_legacy_host;
+
+
+static int probe_all;			/* Set to check all ISA port ranges */
+static int ht6560a;			/* HT 6560A on primary 1, secondary 2, both 3 */
+static int ht6560b;			/* HT 6560A on primary 1, secondary 2, both 3 */
+static int opti82c611a;			/* Opti82c611A on primary 1, secondary 2, both 3 */
+static int opti82c46x;			/* Opti 82c465MV present (pri/sec autodetect) */
+static int autospeed;			/* Chip present which snoops speed changes */
+static int pio_mask = 0x1F;		/* PIO range for autospeed devices */
+static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
+
+/**
+ *	legacy_set_mode		-	mode setting
+ *	@ap: IDE interface
+ *	@unused: Device that failed when error is returned
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned.
+ *
+ *	The BIOS configured everything. Our job is not to fiddle. Just use
+ *	whatever PIO the hardware is using and leave it at that. When we
+ *	get some kind of nice user driven API for control then we can
+ *	expand on this as per hdparm in the base kernel.
+ */
+
+static int legacy_set_mode(struct ata_port *ap, struct ata_device **unused)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+			dev->pio_mode = XFER_PIO_0;
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+		}
+	}
+	return 0;
+}
+
+static struct scsi_host_template legacy_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+/*
+ *	These ops are used if the user indicates the hardware
+ *	snoops the commands to decide on the mode and handles the
+ *	mode selection "magically" itself. Several legacy controllers
+ *	do this. The mode range can be set if it is not 0x1F by setting
+ *	pio_mask as well.
+ */
+
+static struct ata_port_operations simple_port_ops = {
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer_noirq,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations legacy_port_ops = {
+	.set_mode	= legacy_set_mode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+	.cable_detect	= ata_cable_40wire,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer_noirq,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Promise 20230C and 20620 support
+ *
+ *	This controller supports PIO0 to PIO2. We set PIO timings conservatively to
+ *	allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
+ *	controller and PIO'd to the host and not supported.
+ */
+
+static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int tries = 5;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	u8 rt;
+	unsigned long flags;
+
+	/* Safe as UP only. Force I/Os to occur together */
+
+	local_irq_save(flags);
+
+	/* Unlock the control interface */
+	do
+	{
+		inb(0x1F5);
+		outb(inb(0x1F2) | 0x80, 0x1F2);
+		inb(0x1F2);
+		inb(0x3F6);
+		inb(0x3F6);
+		inb(0x1F2);
+		inb(0x1F2);
+	}
+	while((inb(0x1F2) & 0x80) && --tries);
+
+	local_irq_restore(flags);
+
+	outb(inb(0x1F4) & 0x07, 0x1F4);
+
+	rt = inb(0x1F3);
+	rt &= 0x07 << (3 * adev->devno);
+	if (pio)
+		rt |= (1 + 3 * pio) << (3 * adev->devno);
+
+	udelay(100);
+	outb(inb(0x1F2) | 0x01, 0x1F2);
+	udelay(100);
+	inb(0x1F5);
+
+}
+
+static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->ap;
+	int slop = buflen & 3;
+	unsigned long flags;
+
+	if (ata_id_has_dword_io(adev->id)) {
+		local_irq_save(flags);
+
+		/* Perform the 32bit I/O synchronization sequence */
+		ioread8(ap->ioaddr.nsect_addr);
+		ioread8(ap->ioaddr.nsect_addr);
+		ioread8(ap->ioaddr.nsect_addr);
+
+		/* Now the data */
+
+		if (write_data)
+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			u32 pad;
+			if (write_data) {
+				memcpy(&pad, buf + buflen - slop, slop);
+				pad = le32_to_cpu(pad);
+				iowrite32(pad, ap->ioaddr.data_addr);
+			} else {
+				pad = ioread32(ap->ioaddr.data_addr);
+				pad = cpu_to_le16(pad);
+				memcpy(buf + buflen - slop, &pad, slop);
+			}
+		}
+		local_irq_restore(flags);
+	}
+	else
+		ata_data_xfer_noirq(adev, buf, buflen, write_data);
+}
+
+static struct ata_port_operations pdc20230_port_ops = {
+	.set_piomode	= pdc20230_set_piomode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= pdc_data_xfer_vlb,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Holtek 6560A support
+ *
+ *	This controller supports PIO0 to PIO2 (no IORDY even though higher timings
+ *	can be loaded).
+ */
+
+static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover;
+	struct ata_timing t;
+
+	/* Get the timing data in cycles. For now play safe at 50Mhz */
+	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+	active = FIT(t.active, 2, 15);
+	recover = FIT(t.recover, 4, 15);
+
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+
+	iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+	ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560a_port_ops = {
+	.set_piomode	= ht6560a_set_piomode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,	/* Check vlb/noirq */
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Holtek 6560B support
+ *
+ *	This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
+ *	unless we see an ATAPI device in which case we force it off.
+ *
+ *	FIXME: need to implement 2nd channel support.
+ */
+
+static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover;
+	struct ata_timing t;
+
+	/* Get the timing data in cycles. For now play safe at 50Mhz */
+	ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+	active = FIT(t.active, 2, 15);
+	recover = FIT(t.recover, 2, 16);
+	recover &= 0x15;
+
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+	inb(0x3E6);
+
+	iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+
+	if (adev->class != ATA_DEV_ATA) {
+		u8 rconf = inb(0x3E6);
+		if (rconf & 0x24) {
+			rconf &= ~ 0x24;
+			outb(rconf, 0x3E6);
+		}
+	}
+	ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560b_port_ops = {
+	.set_piomode	= ht6560b_set_piomode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,	/* FIXME: Check 32bit and noirq */
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Opti core chipset helpers
+ */
+
+/**
+ *	opti_syscfg	-	read OPTI chipset configuration
+ *	@reg: Configuration register to read
+ *
+ *	Returns the value of an OPTI system board configuration register.
+ */
+
+static u8 opti_syscfg(u8 reg)
+{
+	unsigned long flags;
+	u8 r;
+
+	/* Uniprocessor chipset and must force cycles adjancent */
+	local_irq_save(flags);
+	outb(reg, 0x22);
+	r = inb(0x24);
+	local_irq_restore(flags);
+	return r;
+}
+
+/*
+ *	Opti 82C611A
+ *
+ *	This controller supports PIO0 to PIO3.
+ */
+
+static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover, setup;
+	struct ata_timing t;
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int khz[4] = { 50000, 40000, 33000, 25000 };
+	u8 rc;
+
+	/* Enter configuration mode */
+	ioread16(ap->ioaddr.error_addr);
+	ioread16(ap->ioaddr.error_addr);
+	iowrite8(3, ap->ioaddr.nsect_addr);
+
+	/* Read VLB clock strapping */
+	clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+	/* Setup timing is shared */
+	if (pair) {
+		struct ata_timing tp;
+		ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+	}
+
+	active = FIT(t.active, 2, 17) - 2;
+	recover = FIT(t.recover, 1, 16) - 1;
+	setup = FIT(t.setup, 1, 4) - 1;
+
+	/* Select the right timing bank for write timing */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x7F;
+	rc |= (adev->devno << 7);
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Write the timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+	/* Select the right bank for read timings, also
+	   load the shared timings for address */
+	rc = ioread8(ap->ioaddr.device_addr);
+	rc &= 0xC0;
+	rc |= adev->devno;	/* Index select */
+	rc |= (setup << 4) | 0x04;
+	iowrite8(rc, ap->ioaddr.device_addr);
+
+	/* Load the read timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+	/* Ensure the timing register mode is right */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x73;
+	rc |= 0x84;
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Exit command mode */
+	iowrite8(0x83,  ap->ioaddr.nsect_addr);
+}
+
+
+static struct ata_port_operations opti82c611a_port_ops = {
+	.set_piomode	= opti82c611a_set_piomode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/*
+ *	Opti 82C465MV
+ *
+ *	This controller supports PIO0 to PIO3. Unlike the 611A the MVB
+ *	version is dual channel but doesn't have a lot of unique registers.
+ */
+
+static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	u8 active, recover, setup;
+	struct ata_timing t;
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int khz[4] = { 50000, 40000, 33000, 25000 };
+	u8 rc;
+	u8 sysclk;
+
+	/* Get the clock */
+	sysclk = opti_syscfg(0xAC) & 0xC0;	/* BIOS set */
+
+	/* Enter configuration mode */
+	ioread16(ap->ioaddr.error_addr);
+	ioread16(ap->ioaddr.error_addr);
+	iowrite8(3, ap->ioaddr.nsect_addr);
+
+	/* Read VLB clock strapping */
+	clock = 1000000000 / khz[sysclk];
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+	/* Setup timing is shared */
+	if (pair) {
+		struct ata_timing tp;
+		ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+		ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+	}
+
+	active = FIT(t.active, 2, 17) - 2;
+	recover = FIT(t.recover, 1, 16) - 1;
+	setup = FIT(t.setup, 1, 4) - 1;
+
+	/* Select the right timing bank for write timing */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x7F;
+	rc |= (adev->devno << 7);
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Write the timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+	/* Select the right bank for read timings, also
+	   load the shared timings for address */
+	rc = ioread8(ap->ioaddr.device_addr);
+	rc &= 0xC0;
+	rc |= adev->devno;	/* Index select */
+	rc |= (setup << 4) | 0x04;
+	iowrite8(rc, ap->ioaddr.device_addr);
+
+	/* Load the read timings */
+	iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+	/* Ensure the timing register mode is right */
+	rc = ioread8(ap->ioaddr.lbal_addr);
+	rc &= 0x73;
+	rc |= 0x84;
+	iowrite8(rc, ap->ioaddr.lbal_addr);
+
+	/* Exit command mode */
+	iowrite8(0x83,  ap->ioaddr.nsect_addr);
+
+	/* We need to know this for quad device on the MVB */
+	ap->host->private_data = ap;
+}
+
+/**
+ *	opt82c465mv_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings. The
+ *	MVB has a single set of timing registers and these are shared
+ *	across channels. As there are two registers we really ought to
+ *	track the last two used values as a sort of register window. For
+ *	now we just reload on a channel switch. On the single channel
+ *	setup this condition never fires so we do nothing extra.
+ *
+ *	FIXME: dual channel needs ->serialize support
+ */
+
+static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If timings are set and for the wrong channel (2nd test is
+	   due to a libata shortcoming and will eventually go I hope) */
+	if (ap->host->private_data != ap->host
+	    && ap->host->private_data != NULL)
+		opti82c46x_set_piomode(ap, adev);
+
+	return ata_qc_issue_prot(qc);
+}
+
+static struct ata_port_operations opti82c46x_port_ops = {
+	.set_piomode	= opti82c46x_set_piomode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= opti82c46x_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+
+/**
+ *	legacy_init_one		-	attach a legacy interface
+ *	@port: port number
+ *	@io: I/O port start
+ *	@ctrl: control port
+ *	@irq: interrupt line
+ *
+ *	Register an ISA bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ */
+
+static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
+{
+	struct legacy_data *ld = &legacy_data[nr_legacy_host];
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct platform_device *pdev;
+	struct ata_port_operations *ops = &legacy_port_ops;
+	void __iomem *io_addr, *ctrl_addr;
+	int pio_modes = pio_mask;
+	u32 mask = (1 << port);
+	u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
+	int ret;
+
+	pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	ret = -EBUSY;
+	if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
+	    devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
+		goto fail;
+
+	ret = -ENOMEM;
+	io_addr = devm_ioport_map(&pdev->dev, io, 8);
+	ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1);
+	if (!io_addr || !ctrl_addr)
+		goto fail;
+
+	if (ht6560a & mask) {
+		ops = &ht6560a_port_ops;
+		pio_modes = 0x07;
+		iordy = ATA_FLAG_NO_IORDY;
+	}
+	if (ht6560b & mask) {
+		ops = &ht6560b_port_ops;
+		pio_modes = 0x1F;
+	}
+	if (opti82c611a & mask) {
+		ops = &opti82c611a_port_ops;
+		pio_modes = 0x0F;
+	}
+	if (opti82c46x & mask) {
+		ops = &opti82c46x_port_ops;
+		pio_modes = 0x0F;
+	}
+
+	/* Probe for automatically detectable controllers */
+
+	if (io == 0x1F0 && ops == &legacy_port_ops) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+
+		/* Probes */
+		inb(0x1F5);
+		outb(inb(0x1F2) | 0x80, 0x1F2);
+		inb(0x1F2);
+		inb(0x3F6);
+		inb(0x3F6);
+		inb(0x1F2);
+		inb(0x1F2);
+
+		if ((inb(0x1F2) & 0x80) == 0) {
+			/* PDC20230c or 20630 ? */
+			printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
+				pio_modes = 0x07;
+			ops = &pdc20230_port_ops;
+			iordy = ATA_FLAG_NO_IORDY;
+			udelay(100);
+			inb(0x1F5);
+		} else {
+			outb(0x55, 0x1F2);
+			inb(0x1F2);
+			inb(0x1F2);
+			if (inb(0x1F2) == 0x00) {
+				printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
+			}
+		}
+		local_irq_restore(flags);
+	}
+
+
+	/* Chip does mode setting by command snooping */
+	if (ops == &legacy_port_ops && (autospeed & mask))
+		ops = &simple_port_ops;
+
+	ret = -ENOMEM;
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		goto fail;
+	ap = host->ports[0];
+
+	ap->ops = ops;
+	ap->pio_mask = pio_modes;
+	ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
+	ap->ioaddr.cmd_addr = io_addr;
+	ap->ioaddr.altstatus_addr = ctrl_addr;
+	ap->ioaddr.ctl_addr = ctrl_addr;
+	ata_std_ports(&ap->ioaddr);
+	ap->private_data = ld;
+
+	ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht);
+	if (ret)
+		goto fail;
+
+	legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
+	ld->platform_dev = pdev;
+	return 0;
+
+fail:
+	platform_device_unregister(pdev);
+	return ret;
+}
+
+/**
+ *	legacy_check_special_cases	-	ATA special cases
+ *	@p: PCI device to check
+ *	@master: set this if we find an ATA master
+ *	@master: set this if we find an ATA secondary
+ *
+ *	A small number of vendors implemented early PCI ATA interfaces on bridge logic
+ *	without the ATA interface being PCI visible. Where we have a matching PCI driver
+ *	we must skip the relevant device here. If we don't know about it then the legacy
+ *	driver is the right driver anyway.
+ */
+
+static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
+{
+	/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
+	if (p->vendor == 0x1078 && p->device == 0x0000) {
+		*primary = *secondary = 1;
+		return;
+	}
+	/* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
+	if (p->vendor == 0x1078 && p->device == 0x0002) {
+		*primary = *secondary = 1;
+		return;
+	}
+	/* Intel MPIIX - PIO ATA on non PCI side of bridge */
+	if (p->vendor == 0x8086 && p->device == 0x1234) {
+		u16 r;
+		pci_read_config_word(p, 0x6C, &r);
+		if (r & 0x8000) {	/* ATA port enabled */
+			if (r & 0x4000)
+				*secondary = 1;
+			else
+				*primary = 1;
+		}
+		return;
+	}
+}
+
+
+/**
+ *	legacy_init		-	attach legacy interfaces
+ *
+ *	Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
+ *	Right now we do not scan the ide0 and ide1 address but should do so
+ *	for non PCI systems or systems with no PCI IDE legacy mode devices.
+ *	If you fix that note there are special cases to consider like VLB
+ *	drivers and CS5510/20.
+ */
+
+static __init int legacy_init(void)
+{
+	int i;
+	int ct = 0;
+	int primary = 0;
+	int secondary = 0;
+	int last_port = NR_HOST;
+
+	struct pci_dev *p = NULL;
+
+	for_each_pci_dev(p) {
+		int r;
+		/* Check for any overlap of the system ATA mappings. Native mode controllers
+		   stuck on these addresses or some devices in 'raid' mode won't be found by
+		   the storage class test */
+		for (r = 0; r < 6; r++) {
+			if (pci_resource_start(p, r) == 0x1f0)
+				primary = 1;
+			if (pci_resource_start(p, r) == 0x170)
+				secondary = 1;
+		}
+		/* Check for special cases */
+		legacy_check_special_cases(p, &primary, &secondary);
+
+		/* If PCI bus is present then don't probe for tertiary legacy ports */
+		if (probe_all == 0)
+			last_port = 2;
+	}
+
+	/* If an OPTI 82C46X is present find out where the channels are */
+	if (opti82c46x) {
+		static const char *optis[4] = {
+			"3/463MV", "5MV",
+			"5MVA", "5MVB"
+		};
+		u8 chans = 1;
+		u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
+
+		opti82c46x = 3;	/* Assume master and slave first */
+		printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
+		if (ctrl == 3)
+			chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
+		ctrl = opti_syscfg(0xAC);
+		/* Check enabled and this port is the 465MV port. On the
+		   MVB we may have two channels */
+		if (ctrl & 8) {
+			if (ctrl & 4)
+				opti82c46x = 2;	/* Slave */
+			else
+				opti82c46x = 1;	/* Master */
+			if (chans == 2)
+				opti82c46x = 3; /* Master and Slave */
+		}	/* Slave only */
+		else if (chans == 1)
+			opti82c46x = 1;
+	}
+
+	for (i = 0; i < last_port; i++) {
+		/* Skip primary if we have seen a PCI one */
+		if (i == 0 && primary == 1)
+			continue;
+		/* Skip secondary if we have seen a PCI one */
+		if (i == 1 && secondary == 1)
+			continue;
+		if (legacy_init_one(i, legacy_port[i],
+				   legacy_port[i] + 0x0206,
+				   legacy_irq[i]) == 0)
+			ct++;
+	}
+	if (ct != 0)
+		return 0;
+	return -ENODEV;
+}
+
+static __exit void legacy_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_legacy_host; i++) {
+		struct legacy_data *ld = &legacy_data[i];
+
+		ata_host_detach(legacy_host[i]);
+		platform_device_unregister(ld->platform_dev);
+		if (ld->timing)
+			release_region(ld->timing, 2);
+	}
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for legacy ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(probe_all, int, 0);
+module_param(autospeed, int, 0);
+module_param(ht6560a, int, 0);
+module_param(ht6560b, int, 0);
+module_param(opti82c611a, int, 0);
+module_param(opti82c46x, int, 0);
+module_param(pio_mask, int, 0);
+module_param(iordy_mask, int, 0);
+
+module_init(legacy_init);
+module_exit(legacy_exit);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_marvell.c linux-2.6.18.x86_64.p4/drivers/ata/pata_marvell.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_marvell.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_marvell.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,228 @@
+/*
+ *	Marvell PATA driver.
+ *
+ *	For the moment we drive the PATA port in legacy mode. That
+ *	isn't making full use of the device functionality but it is
+ *	easy to get working.
+ *
+ *	(c) 2006 Red Hat  <alan@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_marvell"
+#define DRV_VERSION	"0.1.4"
+
+/**
+ *	marvell_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ */
+
+static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 devices;
+	void __iomem *barp;
+	int i;
+
+	/* Check if our port is enabled */
+
+	barp = pci_iomap(pdev, 5, 0x10);
+	if (barp == NULL)
+		return -ENOMEM;
+	printk("BAR5:");
+	for(i = 0; i <= 0x0F; i++)
+		printk("%02X:%02X ", i, readb(barp + i));
+	printk("\n");
+
+	devices = readl(barp + 0x0C);
+	pci_iounmap(pdev, barp);
+
+	if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
+	    (!(devices & 0x10)))	/* PATA enable ? */
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+static int marvell_cable_detect(struct ata_port *ap)
+{
+	/* Cable type */
+	switch(ap->port_no)
+	{
+	case 0:
+		if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
+			return ATA_CBL_PATA40;
+		return ATA_CBL_PATA80;
+	case 1: /* Legacy SATA port */
+		return ATA_CBL_SATA;
+	}
+
+	BUG();
+	return 0;	/* Our BUG macro needs the right markup */
+}
+
+/**
+ *	marvell_error_handler - Setup and error handler
+ *	@ap: Port to handle
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void marvell_error_handler(struct ata_port *ap)
+{
+	return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset,
+				  NULL, ata_std_postreset);
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template marvell_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	/* Use standard CHS mapping rules */
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations marvell_ops = {
+	.port_disable		= ata_port_disable,
+
+	/* Task file is PCI ATA format, use helpers */
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= marvell_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= marvell_cable_detect,
+
+	/* BMDMA handling is PCI ATA format, use helpers */
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	/* Timeout handling */
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	/* Generic PATA PCI ATA helpers */
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	marvell_init_one - Register Marvell ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in marvell_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht		= &marvell_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x3f,
+
+		.port_ops	= &marvell_ops,
+	};
+	static const struct ata_port_info info_sata = {
+		.sht		= &marvell_sht,
+		/* Slave possible as its magically mapped not real */
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x7f,
+
+		.port_ops	= &marvell_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, &info_sata };
+
+	if (pdev->device == 0x6101)
+		ppi[1] = &ata_dummy_port_info;
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id marvell_pci_tbl[] = {
+	{ PCI_DEVICE(0x11AB, 0x6101), },
+	{ PCI_DEVICE(0x11AB, 0x6145), },
+	{ }	/* terminate list */
+};
+
+static struct pci_driver marvell_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= marvell_pci_tbl,
+	.probe			= marvell_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init marvell_init(void)
+{
+	return pci_register_driver(&marvell_pci_driver);
+}
+
+static void __exit marvell_exit(void)
+{
+	pci_unregister_driver(&marvell_pci_driver);
+}
+
+module_init(marvell_init);
+module_exit(marvell_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_mpc52xx.c linux-2.6.18.x86_64.p4/drivers/ata/pata_mpc52xx.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_mpc52xx.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_mpc52xx.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,533 @@
+/*
+ * drivers/ata/pata_mpc52xx.c
+ *
+ * libata driver for the Freescale MPC52xx on-chip IDE interface
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+
+#include <asm/types.h>
+#include <asm/prom.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+
+#define DRV_NAME	"mpc52xx_ata"
+#define DRV_VERSION	"0.1.0ac2"
+
+
+/* Private structures used by the driver */
+struct mpc52xx_ata_timings {
+	u32	pio1;
+	u32	pio2;
+};
+
+struct mpc52xx_ata_priv {
+	unsigned int			ipb_period;
+	struct mpc52xx_ata __iomem *	ata_regs;
+	int				ata_irq;
+	struct mpc52xx_ata_timings	timings[2];
+	int				csel;
+};
+
+
+/* ATAPI-4 PIO specs (in ns) */
+static const int ataspec_t0[5]    = {600, 383, 240, 180, 120};
+static const int ataspec_t1[5]    = { 70,  50,  30,  30,  25};
+static const int ataspec_t2_8[5]  = {290, 290, 290,  80,  70};
+static const int ataspec_t2_16[5] = {165, 125, 100,  80,  70};
+static const int ataspec_t2i[5]   = {  0,   0,   0,  70,  25};
+static const int ataspec_t4[5]    = { 30,  20,  15,  10,  10};
+static const int ataspec_ta[5]    = { 35,  35,  35,  35,  35};
+
+#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
+
+
+/* Bit definitions inside the registers */
+#define MPC52xx_ATA_HOSTCONF_SMR	0x80000000UL /* State machine reset */
+#define MPC52xx_ATA_HOSTCONF_FR		0x40000000UL /* FIFO Reset */
+#define MPC52xx_ATA_HOSTCONF_IE		0x02000000UL /* Enable interrupt in PIO */
+#define MPC52xx_ATA_HOSTCONF_IORDY	0x01000000UL /* Drive supports IORDY protocol */
+
+#define MPC52xx_ATA_HOSTSTAT_TIP	0x80000000UL /* Transaction in progress */
+#define MPC52xx_ATA_HOSTSTAT_UREP	0x40000000UL /* UDMA Read Extended Pause */
+#define MPC52xx_ATA_HOSTSTAT_RERR	0x02000000UL /* Read Error */
+#define MPC52xx_ATA_HOSTSTAT_WERR	0x01000000UL /* Write Error */
+
+#define MPC52xx_ATA_FIFOSTAT_EMPTY	0x01 /* FIFO Empty */
+
+#define MPC52xx_ATA_DMAMODE_WRITE	0x01 /* Write DMA */
+#define MPC52xx_ATA_DMAMODE_READ	0x02 /* Read DMA */
+#define MPC52xx_ATA_DMAMODE_UDMA	0x04 /* UDMA enabled */
+#define MPC52xx_ATA_DMAMODE_IE		0x08 /* Enable drive interrupt to CPU in DMA mode */
+#define MPC52xx_ATA_DMAMODE_FE		0x10 /* FIFO Flush enable in Rx mode */
+#define MPC52xx_ATA_DMAMODE_FR		0x20 /* FIFO Reset */
+#define MPC52xx_ATA_DMAMODE_HUT		0x40 /* Host UDMA burst terminate */
+
+
+/* Structure of the hardware registers */
+struct mpc52xx_ata {
+
+	/* Host interface registers */
+	u32 config;		/* ATA + 0x00 Host configuration */
+	u32 host_status;	/* ATA + 0x04 Host controller status */
+	u32 pio1;		/* ATA + 0x08 PIO Timing 1 */
+	u32 pio2;		/* ATA + 0x0c PIO Timing 2 */
+	u32 mdma1;		/* ATA + 0x10 MDMA Timing 1 */
+	u32 mdma2;		/* ATA + 0x14 MDMA Timing 2 */
+	u32 udma1;		/* ATA + 0x18 UDMA Timing 1 */
+	u32 udma2;		/* ATA + 0x1c UDMA Timing 2 */
+	u32 udma3;		/* ATA + 0x20 UDMA Timing 3 */
+	u32 udma4;		/* ATA + 0x24 UDMA Timing 4 */
+	u32 udma5;		/* ATA + 0x28 UDMA Timing 5 */
+	u32 share_cnt;		/* ATA + 0x2c ATA share counter */
+	u32 reserved0[3];
+
+	/* FIFO registers */
+	u32 fifo_data;		/* ATA + 0x3c */
+	u8  fifo_status_frame;	/* ATA + 0x40 */
+	u8  fifo_status;	/* ATA + 0x41 */
+	u16 reserved7[1];
+	u8  fifo_control;	/* ATA + 0x44 */
+	u8  reserved8[5];
+	u16 fifo_alarm;		/* ATA + 0x4a */
+	u16 reserved9;
+	u16 fifo_rdp;		/* ATA + 0x4e */
+	u16 reserved10;
+	u16 fifo_wrp;		/* ATA + 0x52 */
+	u16 reserved11;
+	u16 fifo_lfrdp;		/* ATA + 0x56 */
+	u16 reserved12;
+	u16 fifo_lfwrp;		/* ATA + 0x5a */
+
+	/* Drive TaskFile registers */
+	u8  tf_control;		/* ATA + 0x5c TASKFILE Control/Alt Status */
+	u8  reserved13[3];
+	u16 tf_data;		/* ATA + 0x60 TASKFILE Data */
+	u16 reserved14;
+	u8  tf_features;	/* ATA + 0x64 TASKFILE Features/Error */
+	u8  reserved15[3];
+	u8  tf_sec_count;	/* ATA + 0x68 TASKFILE Sector Count */
+	u8  reserved16[3];
+	u8  tf_sec_num;		/* ATA + 0x6c TASKFILE Sector Number */
+	u8  reserved17[3];
+	u8  tf_cyl_low;		/* ATA + 0x70 TASKFILE Cylinder Low */
+	u8  reserved18[3];
+	u8  tf_cyl_high;	/* ATA + 0x74 TASKFILE Cylinder High */
+	u8  reserved19[3];
+	u8  tf_dev_head;	/* ATA + 0x78 TASKFILE Device/Head */
+	u8  reserved20[3];
+	u8  tf_command;		/* ATA + 0x7c TASKFILE Command/Status */
+	u8  dma_mode;		/* ATA + 0x7d ATA Host DMA Mode configuration */
+	u8  reserved21[2];
+};
+
+
+/* ======================================================================== */
+/* Aux fns                                                                  */
+/* ======================================================================== */
+
+
+/* MPC52xx low level hw control */
+
+static int
+mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
+{
+	struct mpc52xx_ata_timings *timing = &priv->timings[dev];
+	unsigned int ipb_period = priv->ipb_period;
+	unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
+
+	if ((pio<0) || (pio>4))
+		return -EINVAL;
+
+	t0	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
+	t1	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
+	t2_8	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
+	t2_16	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
+	t2i	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
+	t4	= CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
+	ta	= CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
+
+	timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
+	timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
+
+	return 0;
+}
+
+static void
+mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
+{
+	struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+	struct mpc52xx_ata_timings *timing = &priv->timings[device];
+
+	out_be32(&regs->pio1,  timing->pio1);
+	out_be32(&regs->pio2,  timing->pio2);
+	out_be32(&regs->mdma1, 0);
+	out_be32(&regs->mdma2, 0);
+	out_be32(&regs->udma1, 0);
+	out_be32(&regs->udma2, 0);
+	out_be32(&regs->udma3, 0);
+	out_be32(&regs->udma4, 0);
+	out_be32(&regs->udma5, 0);
+
+	priv->csel = device;
+}
+
+static int
+mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
+{
+	struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+	int tslot;
+
+	/* Clear share_cnt (all sample code do this ...) */
+	out_be32(&regs->share_cnt, 0);
+
+	/* Configure and reset host */
+	out_be32(&regs->config,
+			MPC52xx_ATA_HOSTCONF_IE |
+			MPC52xx_ATA_HOSTCONF_IORDY |
+			MPC52xx_ATA_HOSTCONF_SMR |
+			MPC52xx_ATA_HOSTCONF_FR);
+
+	udelay(10);
+
+	out_be32(&regs->config,
+			MPC52xx_ATA_HOSTCONF_IE |
+			MPC52xx_ATA_HOSTCONF_IORDY);
+
+	/* Set the time slot to 1us */
+	tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
+	out_be32(&regs->share_cnt, tslot << 16 );
+
+	/* Init timings to PIO0 */
+	memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
+
+	mpc52xx_ata_compute_pio_timings(priv, 0, 0);
+	mpc52xx_ata_compute_pio_timings(priv, 1, 0);
+
+	mpc52xx_ata_apply_timings(priv, 0);
+
+	return 0;
+}
+
+
+/* ======================================================================== */
+/* libata driver                                                            */
+/* ======================================================================== */
+
+static void
+mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+	int pio, rv;
+
+	pio = adev->pio_mode - XFER_PIO_0;
+
+	rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
+
+	if (rv) {
+		printk(KERN_ERR DRV_NAME
+			": Trying to select invalid PIO mode %d\n", pio);
+		return;
+	}
+
+	mpc52xx_ata_apply_timings(priv, adev->devno);
+}
+static void
+mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
+{
+	struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+	if (device != priv->csel)
+		mpc52xx_ata_apply_timings(priv, device);
+
+	ata_std_dev_select(ap,device);
+}
+
+static void
+mpc52xx_ata_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
+			ata_std_postreset);
+}
+
+
+
+static struct scsi_host_template mpc52xx_ata_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.max_sectors		= ATA_MAX_SECTORS,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations mpc52xx_ata_port_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= mpc52xx_ata_set_piomode,
+	.dev_select		= mpc52xx_ata_dev_select,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= mpc52xx_ata_error_handler,
+	.cable_detect		= ata_cable_40wire,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+	.port_start		= ata_port_start,
+};
+
+static int __devinit
+mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct ata_ioports *aio;
+	int rc;
+
+	host = ata_host_alloc(dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+	ap->flags		|= ATA_FLAG_SLAVE_POSS;
+	ap->pio_mask		= 0x1f;	/* Up to PIO4 */
+	ap->mwdma_mask		= 0x00;	/* No MWDMA   */
+	ap->udma_mask		= 0x00;	/* No UDMA    */
+	ap->ops			= &mpc52xx_ata_port_ops;
+	host->private_data	= priv;
+
+	aio = &ap->ioaddr;
+	aio->cmd_addr		= NULL;	/* Don't have a classic reg block */
+	aio->altstatus_addr	= &priv->ata_regs->tf_control;
+	aio->ctl_addr		= &priv->ata_regs->tf_control;
+	aio->data_addr		= &priv->ata_regs->tf_data;
+	aio->error_addr		= &priv->ata_regs->tf_features;
+	aio->feature_addr	= &priv->ata_regs->tf_features;
+	aio->nsect_addr		= &priv->ata_regs->tf_sec_count;
+	aio->lbal_addr		= &priv->ata_regs->tf_sec_num;
+	aio->lbam_addr		= &priv->ata_regs->tf_cyl_low;
+	aio->lbah_addr		= &priv->ata_regs->tf_cyl_high;
+	aio->device_addr	= &priv->ata_regs->tf_dev_head;
+	aio->status_addr	= &priv->ata_regs->tf_command;
+	aio->command_addr	= &priv->ata_regs->tf_command;
+
+	/* activate host */
+	return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0,
+				 &mpc52xx_ata_sht);
+}
+
+static struct mpc52xx_ata_priv *
+mpc52xx_ata_remove_one(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct mpc52xx_ata_priv *priv = host->private_data;
+
+	ata_host_detach(host);
+
+	return priv;
+}
+
+
+/* ======================================================================== */
+/* OF Platform driver                                                       */
+/* ======================================================================== */
+
+static int __devinit
+mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
+{
+	unsigned int ipb_freq;
+	struct resource res_mem;
+	int ata_irq = NO_IRQ;
+	struct mpc52xx_ata __iomem *ata_regs;
+	struct mpc52xx_ata_priv *priv;
+	int rv;
+
+	/* Get ipb frequency */
+	ipb_freq = mpc52xx_find_ipb_freq(op->node);
+	if (!ipb_freq) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Unable to find IPB Bus frequency\n" );
+		return -ENODEV;
+	}
+
+	/* Get IRQ and register */
+	rv = of_address_to_resource(op->node, 0, &res_mem);
+	if (rv) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while parsing device node resource\n" );
+		return rv;
+	}
+
+	ata_irq = irq_of_parse_and_map(op->node, 0);
+	if (ata_irq == NO_IRQ) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while mapping the irq\n");
+		return -EINVAL;
+	}
+
+	/* Request mem region */
+	if (!devm_request_mem_region(&op->dev, res_mem.start,
+				     sizeof(struct mpc52xx_ata), DRV_NAME)) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while requesting mem region\n");
+		rv = -EBUSY;
+		goto err;
+	}
+
+	/* Remap registers */
+	ata_regs = devm_ioremap(&op->dev, res_mem.start,
+				sizeof(struct mpc52xx_ata));
+	if (!ata_regs) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while mapping register set\n");
+		rv = -ENOMEM;
+		goto err;
+	}
+
+	/* Prepare our private structure */
+	priv = devm_kzalloc(&op->dev, sizeof(struct mpc52xx_ata_priv),
+			    GFP_ATOMIC);
+	if (!priv) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while allocating private structure\n");
+		rv = -ENOMEM;
+		goto err;
+	}
+
+	priv->ipb_period = 1000000000 / (ipb_freq / 1000);
+	priv->ata_regs = ata_regs;
+	priv->ata_irq = ata_irq;
+	priv->csel = -1;
+
+	/* Init the hw */
+	rv = mpc52xx_ata_hw_init(priv);
+	if (rv) {
+		printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+		goto err;
+	}
+
+	/* Register ourselves to libata */
+	rv = mpc52xx_ata_init_one(&op->dev, priv);
+	if (rv) {
+		printk(KERN_ERR DRV_NAME ": "
+			"Error while registering to ATA layer\n");
+		return rv;
+	}
+
+	/* Done */
+	return 0;
+
+	/* Error path */
+err:
+	irq_dispose_mapping(ata_irq);
+	return rv;
+}
+
+static int
+mpc52xx_ata_remove(struct of_device *op)
+{
+	struct mpc52xx_ata_priv *priv;
+
+	priv = mpc52xx_ata_remove_one(&op->dev);
+	irq_dispose_mapping(priv->ata_irq);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int
+mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
+{
+	return 0;	/* FIXME : What to do here ? */
+}
+
+static int
+mpc52xx_ata_resume(struct of_device *op)
+{
+	return 0;	/* FIXME : What to do here ? */
+}
+
+#endif
+
+
+static struct of_device_id mpc52xx_ata_of_match[] = {
+	{
+		.type		= "ata",
+		.compatible	= "mpc5200-ata",
+	},
+	{},
+};
+
+
+static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
+	.owner		= THIS_MODULE,
+	.name		= DRV_NAME,
+	.match_table	= mpc52xx_ata_of_match,
+	.probe		= mpc52xx_ata_probe,
+	.remove		= mpc52xx_ata_remove,
+#ifdef CONFIG_PM
+	.suspend	= mpc52xx_ata_suspend,
+	.resume		= mpc52xx_ata_resume,
+#endif
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+
+/* ======================================================================== */
+/* Module                                                                   */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_ata_init(void)
+{
+	printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
+	return of_register_platform_driver(&mpc52xx_ata_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_ata_exit(void)
+{
+	of_unregister_platform_driver(&mpc52xx_ata_of_platform_driver);
+}
+
+module_init(mpc52xx_ata_init);
+module_exit(mpc52xx_ata_exit);
+
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_mpiix.c linux-2.6.18.x86_64.p4/drivers/ata/pata_mpiix.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_mpiix.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_mpiix.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,293 @@
+/*
+ * pata_mpiix.c 	- Intel MPIIX PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * The MPIIX is different enough to the PIIX4 and friends that we give it
+ * a separate driver. The old ide/pci code handles this by just not tuning
+ * MPIIX at all.
+ *
+ * The MPIIX also differs in another important way from the majority of PIIX
+ * devices. The chip is a bridge (pardon the pun) between the old world of
+ * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
+ * IDE controller is not decoded in PCI space and the chip does not claim to
+ * be IDE class PCI. This requires slightly non-standard probe logic compared
+ * with PCI IDE and also that we do not disable the device when our driver is
+ * unloaded (as it has many other functions).
+ *
+ * The driver conciously keeps this logic internally to avoid pushing quirky
+ * PATA history into the clean libata layer.
+ *
+ * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
+ * hard disk present this driver will not detect it. This is not a bug. In this
+ * configuration the secondary port of the MPIIX is disabled and the addresses
+ * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
+ * to operate.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_mpiix"
+#define DRV_VERSION "0.7.6"
+
+enum {
+	IDETIM = 0x6C,		/* IDE control register */
+	IORDY = (1 << 1),
+	PPE = (1 << 2),
+	FTIM = (1 << 0),
+	ENABLED = (1 << 15),
+	SECONDARY = (1 << 14)
+};
+
+static int mpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
+
+	if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	mpiix_error_handler		-	probe reset
+ *	@ap: ATA port
+ *
+ *	Perform the ATA probe and bus reset sequence plus specific handling
+ *	for this hardware. The MPIIX has the enable bits in a different place
+ *	to PIIX4 and friends. As a pure PIO device it has no cable detect
+ */
+
+static void mpiix_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	mpiix_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. The MPIIX allows us to program the
+ *	IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
+ *	prefetching or IORDY are used.
+ *
+ *	This would get very ugly because we can only program timing for one
+ *	device at a time, the other gets PIO0. Fortunately libata calls
+ *	our qc_issue_prot command before a command is issued so we can
+ *	flip the timings back and forth to reduce the pain.
+ */
+
+static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int control = 0;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 idetim;
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	pci_read_config_word(pdev, IDETIM, &idetim);
+
+	/* Mask the IORDY/TIME/PPE for this device */
+	if (adev->class == ATA_DEV_ATA)
+		control |= PPE;		/* Enable prefetch/posting for disk */
+	if (ata_pio_need_iordy(adev))
+		control |= IORDY;
+	if (pio > 1)
+		control |= FTIM;	/* This drive is on the fast timing bank */
+
+	/* Mask out timing and clear both TIME bank selects */
+	idetim &= 0xCCEE;
+	idetim &= ~(0x07  << (4 * adev->devno));
+	idetim |= control << (4 * adev->devno);
+
+	idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+	pci_write_config_word(pdev, IDETIM, idetim);
+
+	/* We use ap->private_data as a pointer to the device currently
+	   loaded for timing */
+	ap->private_data = adev;
+}
+
+/**
+ *	mpiix_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If modes have been configured and the channel data is not loaded
+	   then load it. We have to check if pio_mode is set as the core code
+	   does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+	   logical */
+
+	if (adev->pio_mode && adev != ap->private_data)
+		mpiix_set_piomode(ap, adev);
+
+	return ata_qc_issue_prot(qc);
+}
+
+static struct scsi_host_template mpiix_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations mpiix_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= mpiix_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= mpiix_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= mpiix_qc_issue_prot,
+	.data_xfer	= ata_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	/* Single threaded by the PCI probe logic */
+	static int printed_version;
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *cmd_addr, *ctl_addr;
+	u16 idetim;
+	int irq;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+	host = ata_host_alloc(&dev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	/* MPIIX has many functions which can be turned on or off according
+	   to other devices present. Make sure IDE is enabled before we try
+	   and use it */
+
+	pci_read_config_word(dev, IDETIM, &idetim);
+	if (!(idetim & ENABLED))
+		return -ENODEV;
+
+	/* See if it's primary or secondary channel... */
+	if (!(idetim & SECONDARY)) {
+		irq = 14;
+		cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8);
+		ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1);
+	} else {
+		irq = 15;
+		cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8);
+		ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1);
+	}
+
+	if (!cmd_addr || !ctl_addr)
+		return -ENOMEM;
+
+	/* We do our own plumbing to avoid leaking special cases for whacko
+	   ancient hardware into the core code. There are two issues to
+	   worry about.  #1 The chip is a bridge so if in legacy mode and
+	   without BARs set fools the setup.  #2 If you pci_disable_device
+	   the MPIIX your box goes castors up */
+
+	ap = host->ports[0];
+	ap->ops = &mpiix_port_ops;
+	ap->pio_mask = 0x1F;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	ap->ioaddr.cmd_addr = cmd_addr;
+	ap->ioaddr.ctl_addr = ctl_addr;
+	ap->ioaddr.altstatus_addr = ctl_addr;
+
+	/* Let libata fill in the port details */
+	ata_std_ports(&ap->ioaddr);
+
+	/* activate host */
+	return ata_host_activate(host, irq, ata_interrupt, IRQF_SHARED,
+				 &mpiix_sht);
+}
+
+static const struct pci_device_id mpiix[] = {
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
+
+	{ },
+};
+
+static struct pci_driver mpiix_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= mpiix,
+	.probe 		= mpiix_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init mpiix_init(void)
+{
+	return pci_register_driver(&mpiix_pci_driver);
+}
+
+static void __exit mpiix_exit(void)
+{
+	pci_unregister_driver(&mpiix_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, mpiix);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(mpiix_init);
+module_exit(mpiix_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_netcell.c linux-2.6.18.x86_64.p4/drivers/ata/pata_netcell.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_netcell.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_netcell.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,153 @@
+/*
+ *    pata_netcell.c - Netcell PATA driver
+ *
+ *	(c) 2006 Red Hat  <alan@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_netcell"
+#define DRV_VERSION	"0.1.7"
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template netcell_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	/* Use standard CHS mapping rules */
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations netcell_ops = {
+	.port_disable		= ata_port_disable,
+
+	/* Task file is PCI ATA format, use helpers */
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_80wire,
+
+	/* BMDMA handling is PCI ATA format, use helpers */
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	/* IRQ-related hooks */
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	/* Generic PATA PCI ATA helpers */
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	netcell_init_one - Register Netcell ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in netcell_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &netcell_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		/* Actually we don't really care about these as the
+		   firmware deals with it */
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= 0x3f, /* UDMA 133 */
+		.port_ops	= &netcell_ops,
+	};
+	const struct ata_port_info *port_info[] = { &info, NULL };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	/* Any chip specific setup/optimisation/messages here */
+	ata_pci_clear_simplex(pdev);
+
+	/* And let the library code do the work */
+	return ata_pci_init_one(pdev, port_info);
+}
+
+static const struct pci_device_id netcell_pci_tbl[] = {
+	{ PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver netcell_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= netcell_pci_tbl,
+	.probe			= netcell_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init netcell_init(void)
+{
+	return pci_register_driver(&netcell_pci_driver);
+}
+
+static void __exit netcell_exit(void)
+{
+	pci_unregister_driver(&netcell_pci_driver);
+}
+
+module_init(netcell_init);
+module_exit(netcell_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_ns87410.c linux-2.6.18.x86_64.p4/drivers/ata/pata_ns87410.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_ns87410.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_ns87410.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,238 @@
+/*
+ * pata_ns87410.c 	- National Semiconductor 87410 PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_ns87410"
+#define DRV_VERSION "0.4.6"
+
+/**
+ *	ns87410_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Check enabled ports
+ */
+
+static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits ns87410_enable_bits[] = {
+		{ 0x43, 1, 0x08, 0x08 },
+		{ 0x47, 1, 0x08, 0x08 }
+	};
+
+	if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	ns87410_error_handler		-	probe reset
+ *	@ap: ATA port
+ *
+ *	Perform the ATA probe and bus reset sequence plus specific handling
+ *	for this hardware. The MPIIX has the enable bits in a different place
+ *	to PIIX4 and friends. As a pure PIO device it has no cable detect
+ */
+
+static void ns87410_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	ns87410_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program timing data. This is kept per channel not per device,
+ *	and only affects the data port.
+ */
+
+static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x40 + 4 * ap->port_no;
+	u8 idetcr, idefr;
+	struct ata_timing at;
+
+	static const u8 activebits[15] = {
+		0, 1, 2, 3, 4,
+		5, 5, 6, 6, 6,
+		6, 7, 7, 7, 7
+	};
+
+	static const u8 recoverbits[12] = {
+		0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
+	};
+
+	pci_read_config_byte(pdev, port + 3, &idefr);
+
+	if (ata_pio_need_iordy(adev))
+		idefr |= 0x04;	/* IORDY enable */
+	else
+		idefr &= ~0x04;
+
+	if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
+		dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
+		return;
+	}
+
+	at.active = FIT(at.active, 2, 16) - 2;
+	at.setup = FIT(at.setup, 1, 4) - 1;
+	at.recover = FIT(at.recover, 1, 12) - 1;
+
+	idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
+
+	pci_write_config_byte(pdev, port, idetcr);
+	pci_write_config_byte(pdev, port + 3, idefr);
+	/* We use ap->private_data as a pointer to the device currently
+	   loaded for timing */
+	ap->private_data = adev;
+}
+
+/**
+ *	ns87410_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary.
+ */
+
+static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	/* If modes have been configured and the channel data is not loaded
+	   then load it. We have to check if pio_mode is set as the core code
+	   does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+	   logical */
+
+	if (adev->pio_mode && adev != ap->private_data)
+		ns87410_set_piomode(ap, adev);
+
+	return ata_qc_issue_prot(qc);
+}
+
+static struct scsi_host_template ns87410_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations ns87410_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= ns87410_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ns87410_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ns87410_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &ns87410_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x0F,
+		.port_ops = &ns87410_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id ns87410[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), },
+
+	{ },
+};
+
+static struct pci_driver ns87410_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= ns87410,
+	.probe 		= ns87410_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init ns87410_init(void)
+{
+	return pci_register_driver(&ns87410_pci_driver);
+}
+
+static void __exit ns87410_exit(void)
+{
+	pci_unregister_driver(&ns87410_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87410);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ns87410_init);
+module_exit(ns87410_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_oldpiix.c linux-2.6.18.x86_64.p4/drivers/ata/pata_oldpiix.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_oldpiix.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_oldpiix.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,343 @@
+/*
+ *    pata_oldpiix.c - Intel PATA/SATA controllers
+ *
+ *	(C) 2005 Red Hat <alan@redhat.com>
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    Early PIIX differs significantly from the later PIIX as it lacks
+ *    SITRE and the slave timing registers. This means that you have to
+ *    set timing per channel, or be clever. Libata tells us whenever it
+ *    does drive selection and we use this to reload the timings.
+ *
+ *    Because of these behaviour differences PIIX gets its own driver module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_oldpiix"
+#define DRV_VERSION	"0.5.5"
+
+/**
+ *	oldpiix_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits oldpiix_enable_bits[] = {
+		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
+		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
+	};
+
+	if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	oldpiix_pata_error_handler - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *	@classes:
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void oldpiix_pata_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	oldpiix_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. Note that the early PIIX does not have the slave
+	 *	timing port at 0x44.
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	if (pio > 1)
+		control |= 1;	/* TIME */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE */
+
+	/* Intel specifies that the prefetch/posting is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE */
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	/*
+	 * Set PPE, IE and TIME as appropriate.
+	 * Clear the other drive's timing bits.
+	 */
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCE0;
+		idetm_data |= control;
+	} else {
+		idetm_data &= 0xCC0E;
+		idetm_data |= (control << 4);
+	}
+	idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	pci_write_config_word(dev, idetm_port, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	oldpiix_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *	@isich: True if the device is an ICH and has IOCFG registers
+ *
+ *	Set MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u8 idetm_port		= ap->port_no ? 0x42 : 0x40;
+	u16 idetm_data;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 0 },
+			    { 2, 1 },
+			    { 2, 3 }, };
+
+	/*
+	 * MWDMA is driven by the PIO timings. We must also enable
+	 * IORDY unconditionally along with TIME1. PPE has already
+	 * been set when the PIO timing was set.
+	 */
+
+	unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+	unsigned int control;
+	const unsigned int needed_pio[3] = {
+		XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+	};
+	int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+	pci_read_config_word(dev, idetm_port, &idetm_data);
+
+	control = 3;	/* IORDY|TIME0 */
+	/* Intel specifies that the PPE functionality is for disk only */
+	if (adev->class == ATA_DEV_ATA)
+		control |= 4;	/* PPE enable */
+
+	/* If the drive MWDMA is faster than it can do PIO then
+	   we must force PIO into PIO0 */
+
+	if (adev->pio_mode < needed_pio[mwdma])
+		/* Enable DMA timing only */
+		control |= 8;	/* PIO cycles in PIO0 */
+
+	/* Mask out the relevant control and timing bits we will load. Also
+	   clear the other drive TIME register as a precaution */
+	if (adev->devno == 0) {
+		idetm_data &= 0xCCE0;
+		idetm_data |= control;
+	} else {
+		idetm_data &= 0xCC0E;
+		idetm_data |= (control << 4);
+	}
+	idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+	pci_write_config_word(dev, idetm_port, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	oldpiix_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	if (adev != ap->private_data) {
+		oldpiix_set_piomode(ap, adev);
+		if (adev->dma_mode)
+			oldpiix_set_dmamode(ap, adev);
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+
+static struct scsi_host_template oldpiix_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations oldpiix_pata_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= oldpiix_set_piomode,
+	.set_dmamode		= oldpiix_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= oldpiix_pata_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= oldpiix_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	oldpiix_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in oldpiix_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &oldpiix_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma1-2 */
+		.port_ops	= &oldpiix_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id oldpiix_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, 0x1230), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver oldpiix_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= oldpiix_pci_tbl,
+	.probe			= oldpiix_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init oldpiix_init(void)
+{
+	return pci_register_driver(&oldpiix_pci_driver);
+}
+
+static void __exit oldpiix_exit(void)
+{
+	pci_unregister_driver(&oldpiix_pci_driver);
+}
+
+module_init(oldpiix_init);
+module_exit(oldpiix_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_opti.c linux-2.6.18.x86_64.p4/drivers/ata/pata_opti.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_opti.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_opti.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,270 @@
+/*
+ * pata_opti.c 	- ATI PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based on
+ *  linux/drivers/ide/pci/opti621.c		Version 0.7	Sept 10, 2002
+ *
+ *  Copyright (C) 1996-1998  Linus Torvalds & authors (see below)
+ *
+ * Authors:
+ * Jaromir Koutek <miri@punknet.cz>,
+ * Jan Harkes <jaharkes@cwi.nl>,
+ * Mark Lord <mlord@pobox.com>
+ * Some parts of code are from ali14xx.c and from rz1000.c.
+ *
+ * Also consulted the FreeBSD prototype driver by Kevin Day to try
+ * and resolve some confusions. Further documentation can be found in
+ * Ralf Brown's interrupt list
+ *
+ * If you have other variants of the Opti range (Viper/Vendetta) please
+ * try this driver with those PCI idents and report back. For the later
+ * chips see the pata_optidma driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_opti"
+#define DRV_VERSION "0.2.9"
+
+enum {
+	READ_REG	= 0,	/* index of Read cycle timing register */
+	WRITE_REG 	= 1,	/* index of Write cycle timing register */
+	CNTRL_REG 	= 3,	/* index of Control register */
+	STRAP_REG 	= 5,	/* index of Strap register */
+	MISC_REG 	= 6	/* index of Miscellaneous register */
+};
+
+/**
+ *	opti_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int opti_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits opti_enable_bits[] = {
+		{ 0x45, 1, 0x80, 0x00 },
+		{ 0x40, 1, 0x08, 0x00 }
+	};
+
+	if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	opti_probe_reset		-	probe reset
+ *	@ap: ATA port
+ *
+ *	Perform the ATA probe and bus reset sequence plus specific handling
+ *	for this hardware. The Opti needs little handling - we have no UDMA66
+ *	capability that needs cable detection. All we must do is check the port
+ *	is enabled.
+ */
+
+static void opti_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	opti_write_reg		-	control register setup
+ *	@ap: ATA port
+ *	@value: value
+ *	@reg: control register number
+ *
+ *	The Opti uses magic 'trapdoor' register accesses to do configuration
+ *	rather than using PCI space as other controllers do. The double inw
+ *	on the error register activates configuration mode. We can then write
+ *	the control register
+ */
+
+static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* These 3 unlock the control register access */
+	ioread16(regio + 1);
+	ioread16(regio + 1);
+	iowrite8(3, regio + 2);
+
+	/* Do the I/O */
+	iowrite8(val, regio + reg);
+
+	/* Relock */
+	iowrite8(0x83, regio + 2);
+}
+
+/**
+ *	opti_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Timing numbers are taken from
+ *	the FreeBSD driver then pre computed to keep the code clean. There
+ *	are two tables depending on the hardware clock speed.
+ */
+
+static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	int clock;
+	int pio = adev->pio_mode - XFER_PIO_0;
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+	u8 addr;
+
+	/* Address table precomputed with prefetch off and a DCLK of 2 */
+	static const u8 addr_timing[2][5] = {
+		{ 0x30, 0x20, 0x20, 0x10, 0x10 },
+		{ 0x20, 0x20, 0x10, 0x10, 0x10 }
+	};
+	static const u8 data_rec_timing[2][5] = {
+		{ 0x6B, 0x56, 0x42, 0x32, 0x31 },
+		{ 0x58, 0x44, 0x32, 0x22, 0x21 }
+	};
+
+	iowrite8(0xff, regio + 5);
+	clock = ioread16(regio + 5) & 1;
+
+	/*
+ 	 *	As with many controllers the address setup time is shared
+ 	 *	and must suit both devices if present.
+	 */
+
+	addr = addr_timing[clock][pio];
+	if (pair) {
+		/* Hardware constraint */
+		u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
+		if (pair_addr > addr)
+			addr = pair_addr;
+	}
+
+	/* Commence primary programming sequence */
+	opti_write_reg(ap, adev->devno, MISC_REG);
+	opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
+	opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
+	opti_write_reg(ap, addr, MISC_REG);
+
+	/* Programming sequence complete, override strapping */
+	opti_write_reg(ap, 0x85, CNTRL_REG);
+}
+
+static struct scsi_host_template opti_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations opti_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= opti_set_piomode,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= opti_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &opti_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &opti_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id opti[] = {
+	{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
+	{ PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
+
+	{ },
+};
+
+static struct pci_driver opti_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= opti,
+	.probe 		= opti_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init opti_init(void)
+{
+	return pci_register_driver(&opti_pci_driver);
+}
+
+static void __exit opti_exit(void)
+{
+	pci_unregister_driver(&opti_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, opti);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(opti_init);
+module_exit(opti_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_optidma.c linux-2.6.18.x86_64.p4/drivers/ata/pata_optidma.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_optidma.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_optidma.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,551 @@
+/*
+ * pata_optidma.c 	- Opti DMA PATA for new ATA layer
+ *			  (C) 2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ *	The Opti DMA controllers are related to the older PIO PCI controllers
+ *	and indeed the VLB ones. The main differences are that the timing
+ *	numbers are now based off PCI clocks not VLB and differ, and that
+ *	MWDMA is supported.
+ *
+ *	This driver should support Viper-N+, FireStar, FireStar Plus.
+ *
+ *	These devices support virtual DMA for read (aka the CS5520). Later
+ *	chips support UDMA33, but only if the rest of the board logic does,
+ *	so you have to get this right. We don't support the virtual DMA
+ *	but we do handle UDMA.
+ *
+ *	Bits that are worth knowing
+ *		Most control registers are shadowed into I/O registers
+ *		0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
+ *		Virtual DMA registers *move* between rev 0x02 and rev 0x10
+ *		UDMA requires a 66MHz FSB
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_optidma"
+#define DRV_VERSION "0.3.2"
+
+enum {
+	READ_REG	= 0,	/* index of Read cycle timing register */
+	WRITE_REG 	= 1,	/* index of Write cycle timing register */
+	CNTRL_REG 	= 3,	/* index of Control register */
+	STRAP_REG 	= 5,	/* index of Strap register */
+	MISC_REG 	= 6	/* index of Miscellaneous register */
+};
+
+static int pci_clock;	/* 0 = 33 1 = 25 */
+
+/**
+ *	optidma_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const struct pci_bits optidma_enable_bits = {
+		0x40, 1, 0x08, 0x00
+	};
+
+	if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	optidma_probe_reset		-	probe reset
+ *	@ap: ATA port
+ *
+ *	Perform the ATA probe and bus reset sequence plus specific handling
+ *	for this hardware. The Opti needs little handling - we have no UDMA66
+ *	capability that needs cable detection. All we must do is check the port
+ *	is enabled.
+ */
+
+static void optidma_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	optidma_unlock		-	unlock control registers
+ *	@ap: ATA port
+ *
+ *	Unlock the control register block for this adapter. Registers must not
+ *	be unlocked in a situation where libata might look at them.
+ */
+
+static void optidma_unlock(struct ata_port *ap)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* These 3 unlock the control register access */
+	ioread16(regio + 1);
+	ioread16(regio + 1);
+	iowrite8(3, regio + 2);
+}
+
+/**
+ *	optidma_lock		-	issue temporary relock
+ *	@ap: ATA port
+ *
+ *	Re-lock the configuration register settings.
+ */
+
+static void optidma_lock(struct ata_port *ap)
+{
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+
+	/* Relock */
+	iowrite8(0x83, regio + 2);
+}
+
+/**
+ *	optidma_mode_setup	-	set mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@mode: Mode to set
+ *
+ *	Called to do the DMA or PIO mode setup. Timing numbers are all
+ *	pre computed to keep the code clean. There are two tables depending
+ *	on the hardware clock speed.
+ *
+ *	WARNING: While we do this the IDE registers vanish. If we take an
+ *	IRQ here we depend on the host set locking to avoid catastrophe.
+ */
+
+static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct ata_device *pair = ata_dev_pair(adev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+	int dma = adev->dma_mode - XFER_MW_DMA_0;
+	void __iomem *regio = ap->ioaddr.cmd_addr;
+	u8 addr;
+
+	/* Address table precomputed with a DCLK of 2 */
+	static const u8 addr_timing[2][5] = {
+		{ 0x30, 0x20, 0x20, 0x10, 0x10 },
+		{ 0x20, 0x20, 0x10, 0x10, 0x10 }
+	};
+	static const u8 data_rec_timing[2][5] = {
+		{ 0x59, 0x46, 0x30, 0x20, 0x20 },
+		{ 0x46, 0x32, 0x20, 0x20, 0x10 }
+	};
+	static const u8 dma_data_rec_timing[2][3] = {
+		{ 0x76, 0x20, 0x20 },
+		{ 0x54, 0x20, 0x10 }
+	};
+
+	/* Switch from IDE to control mode */
+	optidma_unlock(ap);
+
+
+	/*
+ 	 *	As with many controllers the address setup time is shared
+ 	 *	and must suit both devices if present. FIXME: Check if we
+ 	 *	need to look at slowest of PIO/DMA mode of either device
+	 */
+
+	if (mode >= XFER_MW_DMA_0)
+		addr = 0;
+	else
+		addr = addr_timing[pci_clock][pio];
+
+	if (pair) {
+		u8 pair_addr;
+		/* Hardware constraint */
+		if (pair->dma_mode)
+			pair_addr = 0;
+		else
+			pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
+		if (pair_addr > addr)
+			addr = pair_addr;
+	}
+
+	/* Commence primary programming sequence */
+	/* First we load the device number into the timing select */
+	iowrite8(adev->devno, regio + MISC_REG);
+	/* Now we load the data timings into read data/write data */
+	if (mode < XFER_MW_DMA_0) {
+		iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
+		iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
+	} else if (mode < XFER_UDMA_0) {
+		iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
+		iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
+	}
+	/* Finally we load the address setup into the misc register */
+	iowrite8(addr | adev->devno, regio + MISC_REG);
+
+	/* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
+	iowrite8(0x85, regio + CNTRL_REG);
+
+	/* Switch back to IDE mode */
+	optidma_lock(ap);
+
+	/* Note: at this point our programming is incomplete. We are
+	   not supposed to program PCI 0x43 "things we hacked onto the chip"
+	   until we've done both sets of PIO/DMA timings */
+}
+
+/**
+ *	optiplus_mode_setup	-	DMA setup for Firestar Plus
+ *	@ap: ATA port
+ *	@adev: device
+ *	@mode: desired mode
+ *
+ *	The Firestar plus has additional UDMA functionality for UDMA0-2 and
+ *	requires we do some additional work. Because the base work we must do
+ *	is mostly shared we wrap the Firestar setup functionality in this
+ *	one
+ */
+
+static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 udcfg;
+	u8 udslave;
+	int dev2 = 2 * adev->devno;
+	int unit = 2 * ap->port_no + adev->devno;
+	int udma = mode - XFER_UDMA_0;
+
+	pci_read_config_byte(pdev, 0x44, &udcfg);
+	if (mode <= XFER_UDMA_0) {
+		udcfg &= ~(1 << unit);
+		optidma_mode_setup(ap, adev, adev->dma_mode);
+	} else {
+		udcfg |=  (1 << unit);
+		if (ap->port_no) {
+			pci_read_config_byte(pdev, 0x45, &udslave);
+			udslave &= ~(0x03 << dev2);
+			udslave |= (udma << dev2);
+			pci_write_config_byte(pdev, 0x45, udslave);
+		} else {
+			udcfg &= ~(0x30 << dev2);
+			udcfg |= (udma << dev2);
+		}
+	}
+	pci_write_config_byte(pdev, 0x44, udcfg);
+}
+
+/**
+ *	optidma_set_pio_mode	-	PIO setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optidma_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	optidma_set_dma_mode	-	DMA setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optidma_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	optiplus_set_pio_mode	-	PIO setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optiplus_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	optiplus_set_dma_mode	-	DMA setup callback
+ *	@ap: ATA port
+ *	@adev: Device
+ *
+ *	The libata core provides separate functions for handling PIO and
+ *	DMA programming. The architecture of the Firestar makes it easier
+ *	for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	optiplus_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ *	optidma_make_bits	-	PCI setup helper
+ *	@adev: ATA device
+ *
+ *	Turn the ATA device setup into PCI configuration bits
+ *	for register 0x43 and return the two bits needed.
+ */
+
+static u8 optidma_make_bits43(struct ata_device *adev)
+{
+	static const u8 bits43[5] = {
+		0, 0, 0, 1, 2
+	};
+	if (!ata_dev_enabled(adev))
+		return 0;
+	if (adev->dma_mode)
+		return adev->dma_mode - XFER_MW_DMA_0;
+	return bits43[adev->pio_mode - XFER_PIO_0];
+}
+
+/**
+ *	optidma_set_mode	-	mode setup
+ *	@ap: port to set up
+ *
+ *	Use the standard setup to tune the chipset and then finalise the
+ *	configuration by writing the nibble of extra bits of data into
+ *	the chip.
+ */
+
+static int optidma_set_mode(struct ata_port *ap, struct ata_device **r_failed)
+{
+	u8 r;
+	int nybble = 4 * ap->port_no;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int rc  = ata_do_set_mode(ap, r_failed);
+	if (rc == 0) {
+		pci_read_config_byte(pdev, 0x43, &r);
+
+		r &= (0x0F << nybble);
+		r |= (optidma_make_bits43(&ap->device[0]) +
+		     (optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
+		pci_write_config_byte(pdev, 0x43, r);
+	}
+	return rc;
+}
+
+static struct scsi_host_template optidma_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations optidma_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= optidma_set_pio_mode,
+	.set_dmamode	= optidma_set_dma_mode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.error_handler	= optidma_error_handler,
+	.set_mode	= optidma_set_mode,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations optiplus_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= optiplus_set_pio_mode,
+	.set_dmamode	= optiplus_set_dma_mode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.error_handler	= optidma_error_handler,
+	.set_mode	= optidma_set_mode,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	optiplus_with_udma	-	Look for UDMA capable setup
+ *	@pdev; ATA controller
+ */
+
+static int optiplus_with_udma(struct pci_dev *pdev)
+{
+	u8 r;
+	int ret = 0;
+	int ioport = 0x22;
+	struct pci_dev *dev1;
+
+	/* Find function 1 */
+	dev1 = pci_get_device(0x1045, 0xC701, NULL);
+	if(dev1 == NULL)
+		return 0;
+
+	/* Rev must be >= 0x10 */
+	pci_read_config_byte(dev1, 0x08, &r);
+	if (r < 0x10)
+		goto done_nomsg;
+	/* Read the chipset system configuration to check our mode */
+	pci_read_config_byte(dev1, 0x5F, &r);
+	ioport |= (r << 8);
+	outb(0x10, ioport);
+	/* Must be 66Mhz sync */
+	if ((inb(ioport + 2) & 1) == 0)
+		goto done;
+
+	/* Check the ATA arbitration/timing is suitable */
+	pci_read_config_byte(pdev, 0x42, &r);
+	if ((r & 0x36) != 0x36)
+		goto done;
+	pci_read_config_byte(dev1, 0x52, &r);
+	if (r & 0x80)	/* IDEDIR disabled */
+		ret = 1;
+done:
+	printk(KERN_WARNING "UDMA not supported in this configuration.\n");
+done_nomsg:		/* Wrong chip revision */
+	pci_dev_put(dev1);
+	return ret;
+}
+
+static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_82c700 = {
+		.sht = &optidma_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &optidma_port_ops
+	};
+	static const struct ata_port_info info_82c700_udma = {
+		.sht = &optidma_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,
+		.port_ops = &optiplus_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info_82c700, NULL };
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+	/* Fixed location chipset magic */
+	inw(0x1F1);
+	inw(0x1F1);
+	pci_clock = inb(0x1F5) & 1;		/* 0 = 33Mhz, 1 = 25Mhz */
+
+	if (optiplus_with_udma(dev))
+		ppi[0] = &info_82c700_udma;
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id optidma[] = {
+	{ PCI_VDEVICE(OPTI, 0xD568), },		/* Opti 82C700 */
+
+	{ },
+};
+
+static struct pci_driver optidma_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= optidma,
+	.probe 		= optidma_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init optidma_init(void)
+{
+	return pci_register_driver(&optidma_pci_driver);
+}
+
+static void __exit optidma_exit(void)
+{
+	pci_unregister_driver(&optidma_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, optidma);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(optidma_init);
+module_exit(optidma_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_pcmcia.c linux-2.6.18.x86_64.p4/drivers/ata/pata_pcmcia.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_pcmcia.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_pcmcia.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,438 @@
+/*
+ *   pata_pcmcia.c - PCMCIA PATA controller driver.
+ *   Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ *   PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
+ *						<openembedded@hrw.one.pl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *   Heavily based upon ide-cs.c
+ *   The initial developer of the original code is David A. Hinds
+ *   <dahinds@users.sourceforge.net>.  Portions created by David A. Hinds
+ *   are Copyright (C) 1999 David A. Hinds.  All Rights Reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+
+#define DRV_NAME "pata_pcmcia"
+#define DRV_VERSION "0.3.1"
+
+/*
+ *	Private data structure to glue stuff together
+ */
+
+struct ata_pcmcia_info {
+	struct pcmcia_device *pdev;
+	int		ndev;
+	dev_node_t	node;
+};
+
+/**
+ *	pcmcia_set_mode	-	PCMCIA specific mode setup
+ *	@ap: Port
+ *	@r_failed_dev: Return pointer for failed device
+ *
+ *	Perform the tuning and setup of the devices and timings, which
+ *	for PCMCIA is the same as any other controller. We wrap it however
+ *	as we need to spot hardware with incorrect or missing master/slave
+ *	decode, which alas is embarrassingly common in the PC world
+ */
+
+static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+	struct ata_device *master = &ap->device[0];
+	struct ata_device *slave = &ap->device[1];
+
+	if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
+		return ata_do_set_mode(ap, r_failed_dev);
+
+	if (memcmp(master->id + ATA_ID_FW_REV,  slave->id + ATA_ID_FW_REV,
+			   ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0)
+	{
+		/* Suspicious match, but could be two cards from
+		   the same vendor - check serial */
+		if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
+			   ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
+			ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n");
+			ata_dev_disable(slave);
+		}
+	}
+	return ata_do_set_mode(ap, r_failed_dev);
+}
+
+static struct scsi_host_template pcmcia_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations pcmcia_port_ops = {
+	.set_mode	= pcmcia_set_mode,
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer_noirq,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+/**
+ *	pcmcia_init_one		-	attach a PCMCIA interface
+ *	@pdev: pcmcia device
+ *
+ *	Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
+ *	shared IRQ.
+ */
+
+static int pcmcia_init_one(struct pcmcia_device *pdev)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct ata_pcmcia_info *info;
+	tuple_t tuple;
+	struct {
+		unsigned short buf[128];
+		cisparse_t parse;
+		config_info_t conf;
+		cistpl_cftable_entry_t dflt;
+	} *stk = NULL;
+	cistpl_cftable_entry_t *cfg;
+	int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
+	unsigned long io_base, ctl_base;
+	void __iomem *io_addr, *ctl_addr;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (info == NULL)
+		return -ENOMEM;
+
+	/* Glue stuff together. FIXME: We may be able to get rid of info with care */
+	info->pdev = pdev;
+	pdev->priv = info;
+
+	/* Set up attributes in order to probe card and get resources */
+	pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+	pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+	pdev->io.IOAddrLines = 3;
+	pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
+	pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+	pdev->conf.Attributes = CONF_ENABLE_IRQ;
+	pdev->conf.IntType = INT_MEMORY_AND_IO;
+
+	/* Allocate resoure probing structures */
+
+	stk = kzalloc(sizeof(*stk), GFP_KERNEL);
+	if (!stk)
+		goto out1;
+
+	cfg = &stk->parse.cftable_entry;
+
+	/* Tuples we are walking */
+	tuple.TupleData = (cisdata_t *)&stk->buf;
+	tuple.TupleOffset = 0;
+	tuple.TupleDataMax = 255;
+	tuple.Attributes = 0;
+
+	/* See if we have a manufacturer identifier. Use it to set is_kme for
+	   vendor quirks */
+	is_kme = ((pdev->manf_id == MANFID_KME) &&
+		  ((pdev->card_id == PRODID_KME_KXLC005_A) ||
+		   (pdev->card_id == PRODID_KME_KXLC005_B)));
+
+	/* Not sure if this is right... look up the current Vcc */
+	CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
+/*	link->conf.Vcc = stk->conf.Vcc; */
+
+	pass = io_base = ctl_base = 0;
+	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+	tuple.Attributes = 0;
+	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
+
+	/* Now munch the resources looking for a suitable set */
+	while (1) {
+		if (pcmcia_get_tuple_data(pdev, &tuple) != 0)
+			goto next_entry;
+		if (pcmcia_parse_tuple(pdev, &tuple, &stk->parse) != 0)
+			goto next_entry;
+		/* Check for matching Vcc, unless we're desperate */
+		if (!pass) {
+			if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+				if (stk->conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
+					goto next_entry;
+			} else if (stk->dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
+				if (stk->conf.Vcc != stk->dflt.vcc.param[CISTPL_POWER_VNOM] / 10000)
+					goto next_entry;
+			}
+		}
+
+		if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+			pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+		else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
+			pdev->conf.Vpp = stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+		if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
+			cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &stk->dflt.io;
+			pdev->conf.ConfigIndex = cfg->index;
+			pdev->io.BasePort1 = io->win[0].base;
+			pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+			if (!(io->flags & CISTPL_IO_16BIT))
+				pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+			if (io->nwin == 2) {
+				pdev->io.NumPorts1 = 8;
+				pdev->io.BasePort2 = io->win[1].base;
+				pdev->io.NumPorts2 = (is_kme) ? 2 : 1;
+				if (pcmcia_request_io(pdev, &pdev->io) != 0)
+					goto next_entry;
+				io_base = pdev->io.BasePort1;
+				ctl_base = pdev->io.BasePort2;
+			} else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
+				pdev->io.NumPorts1 = io->win[0].len;
+				pdev->io.NumPorts2 = 0;
+				if (pcmcia_request_io(pdev, &pdev->io) != 0)
+					goto next_entry;
+				io_base = pdev->io.BasePort1;
+				ctl_base = pdev->io.BasePort1 + 0x0e;
+			} else goto next_entry;
+			/* If we've got this far, we're done */
+			break;
+		}
+next_entry:
+		if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+			memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
+		if (pass) {
+			CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(pdev, &tuple));
+		} else if (pcmcia_get_next_tuple(pdev, &tuple) != 0) {
+			CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
+			memset(&stk->dflt, 0, sizeof(stk->dflt));
+			pass++;
+		}
+	}
+
+	CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
+	CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
+
+	/* iomap */
+	ret = -ENOMEM;
+	io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
+	ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
+	if (!io_addr || !ctl_addr)
+		goto failed;
+
+	/* Success. Disable the IRQ nIEN line, do quirks */
+	iowrite8(0x02, ctl_addr);
+	if (is_kme)
+		iowrite8(0x81, ctl_addr + 0x01);
+
+	/* FIXME: Could be more ports at base + 0x10 but we only deal with
+	   one right now */
+	if (pdev->io.NumPorts1 >= 0x20)
+		printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
+
+	/*
+ 	 *	Having done the PCMCIA plumbing the ATA side is relatively
+ 	 *	sane.
+	 */
+	ret = -ENOMEM;
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		goto failed;
+	ap = host->ports[0];
+
+	ap->ops = &pcmcia_port_ops;
+	ap->pio_mask = 1;		/* ISA so PIO 0 cycles */
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+	ap->ioaddr.cmd_addr = io_addr;
+	ap->ioaddr.altstatus_addr = ctl_addr;
+	ap->ioaddr.ctl_addr = ctl_addr;
+	ata_std_ports(&ap->ioaddr);
+
+	/* activate */
+	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
+				IRQF_SHARED, &pcmcia_sht);
+	if (ret)
+		goto failed;
+
+	info->ndev = 1;
+	kfree(stk);
+	return 0;
+
+cs_failed:
+	cs_error(pdev, last_fn, last_ret);
+failed:
+	kfree(stk);
+	info->ndev = 0;
+	pcmcia_disable_device(pdev);
+out1:
+	kfree(info);
+	return ret;
+}
+
+/**
+ *	pcmcia_remove_one	-	unplug an pcmcia interface
+ *	@pdev: pcmcia device
+ *
+ *	A PCMCIA ATA device has been unplugged. Perform the needed
+ *	cleanup. Also called on module unload for any active devices.
+ */
+
+static void pcmcia_remove_one(struct pcmcia_device *pdev)
+{
+	struct ata_pcmcia_info *info = pdev->priv;
+	struct device *dev = &pdev->dev;
+
+	if (info != NULL) {
+		/* If we have attached the device to the ATA layer, detach it */
+		if (info->ndev) {
+			struct ata_host *host = dev_get_drvdata(dev);
+			ata_host_detach(host);
+		}
+		info->ndev = 0;
+		pdev->priv = NULL;
+	}
+	pcmcia_disable_device(pdev);
+	kfree(info);
+}
+
+static struct pcmcia_device_id pcmcia_devices[] = {
+	PCMCIA_DEVICE_FUNC_ID(4),
+	PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000),	/* Hitachi */
+	PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000),	/* I-O Data CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001),	/* Mitsubishi CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+	PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
+	PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),	/* SanDisk CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
+	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
+	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
+ 	PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),	/* Hitachi */
+	PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100),	/* Viking CFA */
+	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200),	/* Lexar, Viking CFA */
+	PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
+	PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
+	PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
+	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
+	PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
+	PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
+	PCMCIA_DEVICE_PROD_ID12("EXP   ", "CD-ROM", 0x0a5c52fd, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("EXP   ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
+	PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
+	PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
+	PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
+	PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
+	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
+	PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2      ", 0xe37be2b5, 0x8671043b),
+	PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
+	PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
+	PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
+	PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+	PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
+	PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
+	PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
+	PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
+	PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
+	PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
+	PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+	PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+	PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
+	PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
+	PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
+	PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
+	PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
+
+static struct pcmcia_driver pcmcia_driver = {
+	.owner		= THIS_MODULE,
+	.drv = {
+		.name		= DRV_NAME,
+	},
+	.id_table	= pcmcia_devices,
+	.probe		= pcmcia_init_one,
+	.remove		= pcmcia_remove_one,
+};
+
+static int __init pcmcia_init(void)
+{
+	return pcmcia_register_driver(&pcmcia_driver);
+}
+
+static void __exit pcmcia_exit(void)
+{
+	pcmcia_unregister_driver(&pcmcia_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pcmcia_init);
+module_exit(pcmcia_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_pdc2027x.c linux-2.6.18.x86_64.p4/drivers/ata/pata_pdc2027x.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_pdc2027x.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_pdc2027x.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,855 @@
+/*
+ *  Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ *  Ported to libata by:
+ *  Albert Lee <albertcc@tw.ibm.com> IBM Corporation
+ *
+ *  Copyright (C) 1998-2002		Andre Hedrick <andre@linux-ide.org>
+ *  Portions Copyright (C) 1999 Promise Technology, Inc.
+ *
+ *  Author: Frank Tiernan (frankt@promise.com)
+ *  Released under terms of General Public License
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/DocBook/libata.*
+ *
+ *  Hardware information only available under NDA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_pdc2027x"
+#define DRV_VERSION	"0.9"
+#undef PDC_DEBUG
+
+#ifdef PDC_DEBUG
+#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#else
+#define PDPRINTK(fmt, args...)
+#endif
+
+enum {
+	PDC_MMIO_BAR		= 5,
+
+	PDC_UDMA_100		= 0,
+	PDC_UDMA_133		= 1,
+
+	PDC_100_MHZ		= 100000000,
+	PDC_133_MHZ		= 133333333,
+
+	PDC_SYS_CTL		= 0x1100,
+	PDC_ATA_CTL		= 0x1104,
+	PDC_GLOBAL_CTL		= 0x1108,
+	PDC_CTCR0		= 0x110C,
+	PDC_CTCR1		= 0x1110,
+	PDC_BYTE_COUNT		= 0x1120,
+	PDC_PLL_CTL		= 0x1202,
+};
+
+static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void pdc2027x_error_handler(struct ata_port *ap);
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
+static int pdc2027x_cable_detect(struct ata_port *ap);
+static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed);
+
+/*
+ * ATA Timing Tables based on 133MHz controller clock.
+ * These tables are only used when the controller is in 133MHz clock.
+ * If the controller is in 100MHz clock, the ASIC hardware will
+ * set the timing registers automatically when "set feature" command
+ * is issued to the device. However, if the controller clock is 133MHz,
+ * the following tables must be used.
+ */
+static struct pdc2027x_pio_timing {
+	u8 value0, value1, value2;
+} pdc2027x_pio_timing_tbl [] = {
+	{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
+	{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
+	{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
+	{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
+	{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
+};
+
+static struct pdc2027x_mdma_timing {
+	u8 value0, value1;
+} pdc2027x_mdma_timing_tbl [] = {
+	{ 0xdf, 0x5f }, /* MDMA mode 0 */
+	{ 0x6b, 0x27 }, /* MDMA mode 1 */
+	{ 0x69, 0x25 }, /* MDMA mode 2 */
+};
+
+static struct pdc2027x_udma_timing {
+	u8 value0, value1, value2;
+} pdc2027x_udma_timing_tbl [] = {
+	{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
+	{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
+	{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
+	{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
+	{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
+	{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
+	{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
+};
+
+static const struct pci_device_id pdc2027x_pci_tbl[] = {
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pdc2027x_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pdc2027x_pci_tbl,
+	.probe			= pdc2027x_init_one,
+	.remove			= ata_pci_remove_one,
+};
+
+static struct scsi_host_template pdc2027x_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations pdc2027x_pata100_ops = {
+	.port_disable		= ata_port_disable,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.check_atapi_dma	= pdc2027x_check_atapi_dma,
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= pdc2027x_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= pdc2027x_cable_detect,
+
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static struct ata_port_operations pdc2027x_pata133_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= pdc2027x_set_piomode,
+	.set_dmamode		= pdc2027x_set_dmamode,
+	.set_mode		= pdc2027x_set_mode,
+	.mode_filter		= pdc2027x_mode_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.check_atapi_dma	= pdc2027x_check_atapi_dma,
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= pdc2027x_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= pdc2027x_cable_detect,
+
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static struct ata_port_info pdc2027x_port_info[] = {
+	/* PDC_UDMA_100 */
+	{
+		.flags		= ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
+		                  ATA_FLAG_MMIO,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= ATA_UDMA5, /* udma0-5 */
+		.port_ops	= &pdc2027x_pata100_ops,
+	},
+	/* PDC_UDMA_133 */
+	{
+		.flags		= ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
+                        	  ATA_FLAG_MMIO,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= ATA_UDMA6, /* udma0-6 */
+		.port_ops	= &pdc2027x_pata133_ops,
+	},
+};
+
+MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
+MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
+
+/**
+ *	port_mmio - Get the MMIO address of PDC2027x extended registers
+ *	@ap: Port
+ *	@offset: offset from mmio base
+ */
+static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
+{
+	return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
+}
+
+/**
+ *	dev_mmio - Get the MMIO address of PDC2027x extended registers
+ *	@ap: Port
+ *	@adev: device
+ *	@offset: offset from mmio base
+ */
+static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
+{
+	u8 adj = (adev->devno) ? 0x08 : 0x00;
+	return port_mmio(ap, offset) + adj;
+}
+
+/**
+ *	pdc2027x_pata_cable_detect - Probe host controller cable detect info
+ *	@ap: Port for which cable detect info is desired
+ *
+ *	Read 80c cable indicator from Promise extended register.
+ *      This register is latched when the system is reset.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static int pdc2027x_cable_detect(struct ata_port *ap)
+{
+	u32 cgcr;
+
+	/* check cable detect results */
+	cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL));
+	if (cgcr & (1 << 26))
+		goto cbl40;
+
+	PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
+
+	return ATA_CBL_PATA80;
+cbl40:
+	printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
+	return ATA_CBL_PATA40;
+}
+
+/**
+ * pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
+ * @ap: Port to check
+ */
+static inline int pdc2027x_port_enabled(struct ata_port *ap)
+{
+	return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
+}
+
+/**
+ *	pdc2027x_prereset - prereset for PATA host controller
+ *	@ap: Target port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Probeinit including cable detection.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static int pdc2027x_prereset(struct ata_port *ap, unsigned long deadline)
+{
+	/* Check whether port enabled */
+	if (!pdc2027x_port_enabled(ap))
+		return -ENOENT;
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	pdc2027x_error_handler - Perform reset on PATA port and classify
+ *	@ap: Port to reset
+ *
+ *	Reset PATA phy and classify attached devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void pdc2027x_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	pdc2720x_mode_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: list of modes proposed
+ *
+ *	Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
+		return ata_pci_default_filter(adev, mask);
+
+	/* Check for slave of a Maxtor at UDMA6 */
+	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
+			  ATA_ID_PROD_LEN + 1);
+	/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
+	if(strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
+		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
+
+	return ata_pci_default_filter(adev, mask);
+}
+
+/**
+ *	pdc2027x_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port to configure
+ *	@adev: um
+ *	@pio: PIO mode, 0 - 4
+ *
+ *	Set PIO mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio = adev->pio_mode - XFER_PIO_0;
+	u32 ctcr0, ctcr1;
+
+	PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
+
+	/* Sanity check */
+	if (pio > 4) {
+		printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
+		return;
+
+	}
+
+	/* Set the PIO timing registers using value table for 133MHz */
+	PDPRINTK("Set pio regs... \n");
+
+	ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
+	ctcr0 &= 0xffff0000;
+	ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
+		(pdc2027x_pio_timing_tbl[pio].value1 << 8);
+	writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+
+	ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
+	ctcr1 &= 0x00ffffff;
+	ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
+	writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+	PDPRINTK("Set pio regs done\n");
+
+	PDPRINTK("Set to pio mode[%u] \n", pio);
+}
+
+/**
+ *	pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
+ *	@ap: Port to configure
+ *	@adev: um
+ *	@udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
+ *
+ *	Set UDMA mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int dma_mode = adev->dma_mode;
+	u32 ctcr0, ctcr1;
+
+	if ((dma_mode >= XFER_UDMA_0) &&
+	   (dma_mode <= XFER_UDMA_6)) {
+		/* Set the UDMA timing registers with value table for 133MHz */
+		unsigned int udma_mode = dma_mode & 0x07;
+
+		if (dma_mode == XFER_UDMA_2) {
+			/*
+			 * Turn off tHOLD.
+			 * If tHOLD is '1', the hardware will add half clock for data hold time.
+			 * This code segment seems to be no effect. tHOLD will be overwritten below.
+			 */
+			ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
+			writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
+		}
+
+		PDPRINTK("Set udma regs... \n");
+
+		ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
+		ctcr1 &= 0xff000000;
+		ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
+			(pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
+			(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
+		writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+		PDPRINTK("Set udma regs done\n");
+
+		PDPRINTK("Set to udma mode[%u] \n", udma_mode);
+
+	} else  if ((dma_mode >= XFER_MW_DMA_0) &&
+		   (dma_mode <= XFER_MW_DMA_2)) {
+		/* Set the MDMA timing registers with value table for 133MHz */
+		unsigned int mdma_mode = dma_mode & 0x07;
+
+		PDPRINTK("Set mdma regs... \n");
+		ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
+
+		ctcr0 &= 0x0000ffff;
+		ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
+			(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
+
+		writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+		PDPRINTK("Set mdma regs done\n");
+
+		PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
+	} else {
+		printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
+	}
+}
+
+/**
+ *	pdc2027x_set_mode - Set the timing registers back to correct values.
+ *	@ap: Port to configure
+ *	@r_failed: Returned device for failure
+ *
+ *	The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
+ *	automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
+ *	This function overwrites the possibly incorrect values set by the hardware to be correct.
+ */
+static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed)
+{
+	int i;
+
+	i = ata_do_set_mode(ap, r_failed);
+	if (i < 0)
+		return i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+
+		if (ata_dev_enabled(dev)) {
+
+			pdc2027x_set_piomode(ap, dev);
+
+			/*
+			 * Enable prefetch if the device support PIO only.
+			 */
+			if (dev->xfer_shift == ATA_SHIFT_PIO) {
+				u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1));
+				ctcr1 |= (1 << 25);
+				writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
+
+				PDPRINTK("Turn on prefetch\n");
+			} else {
+				pdc2027x_set_dmamode(ap, dev);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ *	pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *		 1 otherwise
+ */
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct scsi_cmnd *cmd = qc->scsicmd;
+	u8 *scsicmd = cmd->cmnd;
+	int rc = 1; /* atapi dma off by default */
+
+	/*
+	 * This workaround is from Promise's GPL driver.
+	 * If ATAPI DMA is used for commands not in the
+	 * following white list, say MODE_SENSE and REQUEST_SENSE,
+	 * pdc2027x might hit the irq lost problem.
+	 */
+	switch (scsicmd[0]) {
+	case READ_10:
+	case WRITE_10:
+	case READ_12:
+	case WRITE_12:
+	case READ_6:
+	case WRITE_6:
+	case 0xad: /* READ_DVD_STRUCTURE */
+	case 0xbe: /* READ_CD */
+		/* ATAPI DMA is ok */
+		rc = 0;
+		break;
+	default:
+		;
+	}
+
+	return rc;
+}
+
+/**
+ * pdc_read_counter - Read the ctr counter
+ * @host: target ATA host
+ */
+
+static long pdc_read_counter(struct ata_host *host)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	long counter;
+	int retry = 1;
+	u32 bccrl, bccrh, bccrlv, bccrhv;
+
+retry:
+	bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
+	bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
+	rmb();
+
+	/* Read the counter values again for verification */
+	bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
+	bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
+	rmb();
+
+	counter = (bccrh << 15) | bccrl;
+
+	PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh,  bccrl);
+	PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
+
+	/*
+	 * The 30-bit decreasing counter are read by 2 pieces.
+	 * Incorrect value may be read when both bccrh and bccrl are changing.
+	 * Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
+	 */
+	if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
+		retry--;
+		PDPRINTK("rereading counter\n");
+		goto retry;
+	}
+
+	return counter;
+}
+
+/**
+ * adjust_pll - Adjust the PLL input clock in Hz.
+ *
+ * @pdc_controller: controller specific information
+ * @host: target ATA host
+ * @pll_clock: The input of PLL in HZ
+ */
+static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int board_idx)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	u16 pll_ctl;
+	long pll_clock_khz = pll_clock / 1000;
+	long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
+	long ratio = pout_required / pll_clock_khz;
+	int F, R;
+
+	/* Sanity check */
+	if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
+		printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
+		return;
+	}
+
+#ifdef PDC_DEBUG
+	PDPRINTK("pout_required is %ld\n", pout_required);
+
+	/* Show the current clock value of PLL control register
+	 * (maybe already configured by the firmware)
+	 */
+	pll_ctl = readw(mmio_base + PDC_PLL_CTL);
+
+	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+	/*
+	 * Calculate the ratio of F, R and OD
+	 * POUT = (F + 2) / (( R + 2) * NO)
+	 */
+	if (ratio < 8600L) { /* 8.6x */
+		/* Using NO = 0x01, R = 0x0D */
+		R = 0x0d;
+	} else if (ratio < 12900L) { /* 12.9x */
+		/* Using NO = 0x01, R = 0x08 */
+		R = 0x08;
+	} else if (ratio < 16100L) { /* 16.1x */
+		/* Using NO = 0x01, R = 0x06 */
+		R = 0x06;
+	} else if (ratio < 64000L) { /* 64x */
+		R = 0x00;
+	} else {
+		/* Invalid ratio */
+		printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
+		return;
+	}
+
+	F = (ratio * (R+2)) / 1000 - 2;
+
+	if (unlikely(F < 0 || F > 127)) {
+		/* Invalid F */
+		printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
+		return;
+	}
+
+	PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
+
+	pll_ctl = (R << 8) | F;
+
+	PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
+
+	writew(pll_ctl, mmio_base + PDC_PLL_CTL);
+	readw(mmio_base + PDC_PLL_CTL); /* flush */
+
+	/* Wait the PLL circuit to be stable */
+	mdelay(30);
+
+#ifdef PDC_DEBUG
+	/*
+	 *  Show the current clock value of PLL control register
+	 * (maybe configured by the firmware)
+	 */
+	pll_ctl = readw(mmio_base + PDC_PLL_CTL);
+
+	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+	return;
+}
+
+/**
+ * detect_pll_input_clock - Detect the PLL input clock in Hz.
+ * @host: target ATA host
+ * Ex. 16949000 on 33MHz PCI bus for pdc20275.
+ *     Half of the PCI clock.
+ */
+static long pdc_detect_pll_input_clock(struct ata_host *host)
+{
+	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+	u32 scr;
+	long start_count, end_count;
+	long pll_clock;
+
+	/* Read current counter value */
+	start_count = pdc_read_counter(host);
+
+	/* Start the test mode */
+	scr = readl(mmio_base + PDC_SYS_CTL);
+	PDPRINTK("scr[%X]\n", scr);
+	writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
+	readl(mmio_base + PDC_SYS_CTL); /* flush */
+
+	/* Let the counter run for 100 ms. */
+	mdelay(100);
+
+	/* Read the counter values again */
+	end_count = pdc_read_counter(host);
+
+	/* Stop the test mode */
+	scr = readl(mmio_base + PDC_SYS_CTL);
+	PDPRINTK("scr[%X]\n", scr);
+	writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
+	readl(mmio_base + PDC_SYS_CTL); /* flush */
+
+	/* calculate the input clock in Hz */
+	pll_clock = (start_count - end_count) * 10;
+
+	PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
+	PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
+
+	return pll_clock;
+}
+
+/**
+ * pdc_hardware_init - Initialize the hardware.
+ * @host: target ATA host
+ * @board_idx: board identifier
+ */
+static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+{
+	long pll_clock;
+
+	/*
+	 * Detect PLL input clock rate.
+	 * On some system, where PCI bus is running at non-standard clock rate.
+	 * Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
+	 * The pdc20275 controller employs PLL circuit to help correct timing registers setting.
+	 */
+	pll_clock = pdc_detect_pll_input_clock(host);
+
+	if (pll_clock < 0) /* counter overflow? Try again. */
+		pll_clock = pdc_detect_pll_input_clock(host);
+
+	dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
+
+	/* Adjust PLL control register */
+	pdc_adjust_pll(host, pll_clock, board_idx);
+
+	return 0;
+}
+
+/**
+ * pdc_ata_setup_port - setup the mmio address
+ * @port: ata ioports to setup
+ * @base: base address
+ */
+static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+	port->cmd_addr		=
+	port->data_addr		= base;
+	port->feature_addr	=
+	port->error_addr	= base + 0x05;
+	port->nsect_addr	= base + 0x0a;
+	port->lbal_addr		= base + 0x0f;
+	port->lbam_addr		= base + 0x10;
+	port->lbah_addr		= base + 0x15;
+	port->device_addr	= base + 0x1a;
+	port->command_addr	=
+	port->status_addr	= base + 0x1f;
+	port->altstatus_addr	=
+	port->ctl_addr		= base + 0x81a;
+}
+
+/**
+ * pdc2027x_init_one - PCI probe function
+ * Called when an instance of PCI adapter is inserted.
+ * This function checks whether the hardware is supported,
+ * initialize hardware and register an instance of ata_host to
+ * libata.  (implements struct pci_driver.probe() )
+ *
+ * @pdev: instance of pci_dev found
+ * @ent:  matching entry in the id_tbl[]
+ */
+static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] =
+		{ &pdc2027x_port_info[board_idx], NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int rc;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	mmio_base = host->iomap[PDC_MMIO_BAR];
+
+	pdc_ata_setup_port(&host->ports[0]->ioaddr, mmio_base + 0x17c0);
+	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x1000;
+	pdc_ata_setup_port(&host->ports[1]->ioaddr, mmio_base + 0x15c0);
+	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x1008;
+
+	//pci_enable_intx(pdev);
+
+	/* initialize adapter */
+	if (pdc_hardware_init(host, board_idx) != 0)
+		return -EIO;
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &pdc2027x_sht);
+}
+
+/**
+ * pdc2027x_init - Called after this module is loaded into the kernel.
+ */
+static int __init pdc2027x_init(void)
+{
+	return pci_register_driver(&pdc2027x_pci_driver);
+}
+
+/**
+ * pdc2027x_exit - Called before this module unloaded from the kernel
+ */
+static void __exit pdc2027x_exit(void)
+{
+	pci_unregister_driver(&pdc2027x_pci_driver);
+}
+
+module_init(pdc2027x_init);
+module_exit(pdc2027x_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_pdc202xx_old.c linux-2.6.18.x86_64.p4/drivers/ata/pata_pdc202xx_old.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_pdc202xx_old.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_pdc202xx_old.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,400 @@
+/*
+ * pata_pdc202xx_old.c 	- Promise PDC202xx PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *			  (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
+ *
+ * First cut with LBA48/ATAPI
+ *
+ * TODO:
+ *	Channel interlock/reset on both required
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_pdc202xx_old"
+#define DRV_VERSION "0.4.2"
+
+static int pdc2026x_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 cis;
+
+	pci_read_config_word(pdev, 0x50, &cis);
+	if (cis & (1 << (10 + ap->port_no)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	pdc202xx_configure_piomode	-	set chip PIO timing
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@pio: PIO mode
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	so a configure_dmamode call will undo any work we do here and vice
+ *	versa
+ */
+
+static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+	static u16 pio_timing[5] = {
+		0x0913, 0x050C , 0x0308, 0x0206, 0x0104
+	};
+	u8 r_ap, r_bp;
+
+	pci_read_config_byte(pdev, port, &r_ap);
+	pci_read_config_byte(pdev, port + 1, &r_bp);
+	r_ap &= ~0x3F;	/* Preserve ERRDY_EN, SYNC_IN */
+	r_bp &= ~0x1F;
+	r_ap |= (pio_timing[pio] >> 8);
+	r_bp |= (pio_timing[pio] & 0xFF);
+
+	if (ata_pio_need_iordy(adev))
+		r_ap |= 0x20;	/* IORDY enable */
+	if (adev->class == ATA_DEV_ATA)
+		r_ap |= 0x10;	/* FIFO enable */
+	pci_write_config_byte(pdev, port, r_ap);
+	pci_write_config_byte(pdev, port + 1, r_bp);
+}
+
+/**
+ *	pdc202xx_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	but we want to set the PIO timing by default.
+ */
+
+static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	pdc202xx_configure_dmamode	-	set DMA mode in chip
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Load DMA cycle times into the chip ready for a DMA transfer
+ *	to occur.
+ */
+
+static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+	static u8 udma_timing[6][2] = {
+		{ 0x60, 0x03 },	/* 33 Mhz Clock */
+		{ 0x40, 0x02 },
+		{ 0x20, 0x01 },
+		{ 0x40, 0x02 },	/* 66 Mhz Clock */
+		{ 0x20, 0x01 },
+		{ 0x20, 0x01 }
+	};
+	static u8 mdma_timing[3][2] = {
+		{ 0x60, 0x03 },
+		{ 0x60, 0x04 },
+		{ 0xe0, 0x0f },
+	};
+	u8 r_bp, r_cp;
+
+	pci_read_config_byte(pdev, port + 1, &r_bp);
+	pci_read_config_byte(pdev, port + 2, &r_cp);
+
+	r_bp &= ~0xE0;
+	r_cp &= ~0x0F;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		int speed = adev->dma_mode - XFER_UDMA_0;
+		r_bp |= udma_timing[speed][0];
+		r_cp |= udma_timing[speed][1];
+
+	} else {
+		int speed = adev->dma_mode - XFER_MW_DMA_0;
+		r_bp |= mdma_timing[speed][0];
+		r_cp |= mdma_timing[speed][1];
+	}
+	pci_write_config_byte(pdev, port + 1, r_bp);
+	pci_write_config_byte(pdev, port + 2, r_cp);
+
+}
+
+/**
+ *	pdc2026x_bmdma_start		-	DMA engine begin
+ *	@qc: ATA command
+ *
+ *	In UDMA3 or higher we have to clock switch for the duration of the
+ *	DMA transfer sequence.
+ */
+
+static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_taskfile *tf = &qc->tf;
+	int sel66 = ap->port_no ? 0x08: 0x02;
+
+	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+	void __iomem *clock = master + 0x11;
+	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+	u32 len;
+
+	/* Check we keep host level locking here */
+	if (adev->dma_mode >= XFER_UDMA_2)
+		iowrite8(ioread8(clock) | sel66, clock);
+	else
+		iowrite8(ioread8(clock) & ~sel66, clock);
+
+	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
+	   and move to qc_issue ? */
+	pdc202xx_set_dmamode(ap, qc->dev);
+
+	/* Cases the state machine will not complete correctly without help */
+	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATA_PROT_ATAPI_DMA)
+	{
+		len = qc->nbytes / 2;
+
+		if (tf->flags & ATA_TFLAG_WRITE)
+			len |= 0x06000000;
+		else
+			len |= 0x05000000;
+
+		iowrite32(len, atapi_reg);
+	}
+
+	/* Activate DMA */
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	pdc2026x_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	After a DMA completes we need to put the clock back to 33MHz for
+ *	PIO timings.
+ */
+
+static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_taskfile *tf = &qc->tf;
+
+	int sel66 = ap->port_no ? 0x08: 0x02;
+	/* The clock bits are in the same register for both channels */
+	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+	void __iomem *clock = master + 0x11;
+	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+	/* Cases the state machine will not complete correctly */
+	if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
+		iowrite32(0, atapi_reg);
+		iowrite8(ioread8(clock) & ~sel66, clock);
+	}
+	/* Check we keep host level locking here */
+	/* Flip back to 33Mhz for PIO */
+	if (adev->dma_mode >= XFER_UDMA_2)
+		iowrite8(ioread8(clock) & ~sel66, clock);
+
+	ata_bmdma_stop(qc);
+}
+
+/**
+ *	pdc2026x_dev_config	-	device setup hook
+ *	@adev: newly found device
+ *
+ *	Perform chip specific early setup. We need to lock the transfer
+ *	sizes to 8bit to avoid making the state engine on the 2026x cards
+ *	barf.
+ */
+
+static void pdc2026x_dev_config(struct ata_device *adev)
+{
+	adev->max_sectors = 256;
+}
+
+static struct scsi_host_template pdc202xx_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations pdc2024x_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= pdc202xx_set_piomode,
+	.set_dmamode	= pdc202xx_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations pdc2026x_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= pdc202xx_set_piomode,
+	.set_dmamode	= pdc202xx_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+	.dev_config	= pdc2026x_dev_config,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= pdc2026x_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= pdc2026x_bmdma_start,
+	.bmdma_stop	= pdc2026x_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[3] = {
+		{
+			.sht = &pdc202xx_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA2,
+			.port_ops = &pdc2024x_port_ops
+		},
+		{
+			.sht = &pdc202xx_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA4,
+			.port_ops = &pdc2026x_port_ops
+		},
+		{
+			.sht = &pdc202xx_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = ATA_UDMA5,
+			.port_ops = &pdc2026x_port_ops
+		}
+
+	};
+	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+
+	if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
+		struct pci_dev *bridge = dev->bus->self;
+		/* Don't grab anything behind a Promise I2O RAID */
+		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
+			if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
+				return -ENODEV;
+			if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
+				return -ENODEV;
+		}
+	}
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id pdc202xx[] = {
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
+
+	{ },
+};
+
+static struct pci_driver pdc202xx_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pdc202xx,
+	.probe 		= pdc202xx_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init pdc202xx_init(void)
+{
+	return pci_register_driver(&pdc202xx_pci_driver);
+}
+
+static void __exit pdc202xx_exit(void)
+{
+	pci_unregister_driver(&pdc202xx_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc202xx);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pdc202xx_init);
+module_exit(pdc202xx_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_platform.c linux-2.6.18.x86_64.p4/drivers/ata/pata_platform.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_platform.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_platform.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,261 @@
+/*
+ * Generic platform device PATA driver
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * Based on pata_pcmcia:
+ *
+ *   Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/pata_platform.h>
+
+#define DRV_NAME "pata_platform"
+#define DRV_VERSION "1.0"
+
+static int pio_mask = 1;
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unused)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+
+		if (ata_dev_enabled(dev)) {
+			/* We don't really care */
+			dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+		}
+	}
+	return 0;
+}
+
+static int ata_dummy_ret0(struct ata_port *ap)	{ return 0; }
+
+static struct scsi_host_template pata_platform_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations pata_platform_port_ops = {
+	.set_mode		= pata_platform_set_mode,
+
+	.port_disable		= ata_port_disable,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_unknown,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.data_xfer		= ata_data_xfer_noirq,
+
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_dummy_ret0,
+};
+
+static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+				     struct pata_platform_info *info)
+{
+	unsigned int shift = 0;
+
+	/* Fixup the port shift for platforms that need it */
+	if (info && info->ioport_shift)
+		shift = info->ioport_shift;
+
+	ioaddr->data_addr	= ioaddr->cmd_addr + (ATA_REG_DATA    << shift);
+	ioaddr->error_addr	= ioaddr->cmd_addr + (ATA_REG_ERR     << shift);
+	ioaddr->feature_addr	= ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
+	ioaddr->nsect_addr	= ioaddr->cmd_addr + (ATA_REG_NSECT   << shift);
+	ioaddr->lbal_addr	= ioaddr->cmd_addr + (ATA_REG_LBAL    << shift);
+	ioaddr->lbam_addr	= ioaddr->cmd_addr + (ATA_REG_LBAM    << shift);
+	ioaddr->lbah_addr	= ioaddr->cmd_addr + (ATA_REG_LBAH    << shift);
+	ioaddr->device_addr	= ioaddr->cmd_addr + (ATA_REG_DEVICE  << shift);
+	ioaddr->status_addr	= ioaddr->cmd_addr + (ATA_REG_STATUS  << shift);
+	ioaddr->command_addr	= ioaddr->cmd_addr + (ATA_REG_CMD     << shift);
+}
+
+/**
+ *	pata_platform_probe		-	attach a platform interface
+ *	@pdev: platform device
+ *
+ *	Register a platform bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ *
+ *	Platform devices are expected to contain 3 resources per port:
+ *
+ *		- I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *		- CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *		- IRQ	   (IORESOURCE_IRQ)
+ *
+ *	If the base resources are both mem types, the ioremap() is handled
+ *	here. For IORESOURCE_IO, it's assumed that there's no remapping
+ *	necessary.
+ */
+static int __devinit pata_platform_probe(struct platform_device *pdev)
+{
+	struct resource *io_res, *ctl_res;
+	struct ata_host *host;
+	struct ata_port *ap;
+	unsigned int mmio;
+
+	/*
+	 * Simple resource validation ..
+	 */
+	if (unlikely(pdev->num_resources != 3)) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the I/O base first
+	 */
+	io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (io_res == NULL) {
+		io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (unlikely(io_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * Then the CTL base
+	 */
+	ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+	if (ctl_res == NULL) {
+		ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (unlikely(ctl_res == NULL))
+			return -EINVAL;
+	}
+
+	/*
+	 * Check for MMIO
+	 */
+	mmio = (( io_res->flags == IORESOURCE_MEM) &&
+		(ctl_res->flags == IORESOURCE_MEM));
+
+	/*
+	 * Now that that's out of the way, wire up the port..
+	 */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		return -ENOMEM;
+	ap = host->ports[0];
+
+	ap->ops = &pata_platform_port_ops;
+	ap->pio_mask = pio_mask;
+	ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+	/*
+	 * Handle the MMIO case
+	 */
+	if (mmio) {
+		ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, io_res->start,
+				io_res->end - io_res->start + 1);
+		ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
+				ctl_res->end - ctl_res->start + 1);
+	} else {
+		ap->ioaddr.cmd_addr = devm_ioport_map(&pdev->dev, io_res->start,
+				io_res->end - io_res->start + 1);
+		ap->ioaddr.ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start,
+				ctl_res->end - ctl_res->start + 1);
+	}
+	if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
+		dev_err(&pdev->dev, "failed to map IO/CTL base\n");
+		return -ENOMEM;
+	}
+
+	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+	pata_platform_setup_port(&ap->ioaddr, pdev->dev.platform_data);
+
+	/* activate */
+	return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt,
+				 0, &pata_platform_sht);
+}
+
+/**
+ *	pata_platform_remove	-	unplug a platform interface
+ *	@pdev: platform device
+ *
+ *	A platform bus ATA device has been unplugged. Perform the needed
+ *	cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit pata_platform_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_detach(host);
+
+	return 0;
+}
+
+static struct platform_driver pata_platform_driver = {
+	.probe		= pata_platform_probe,
+	.remove		= __devexit_p(pata_platform_remove),
+	.driver = {
+		.name		= DRV_NAME,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init pata_platform_init(void)
+{
+	return platform_driver_register(&pata_platform_driver);
+}
+
+static void __exit pata_platform_exit(void)
+{
+	platform_driver_unregister(&pata_platform_driver);
+}
+module_init(pata_platform_init);
+module_exit(pata_platform_exit);
+
+module_param(pio_mask, int, 0);
+
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("low-level driver for platform device ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_qdi.c linux-2.6.18.x86_64.p4/drivers/ata/pata_qdi.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_qdi.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_qdi.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,418 @@
+/*
+ *    pata_qdi.c - QDI VLB ATA controllers
+ *	(C) 2006 Red Hat <alan@redhat.com>
+ *
+ * This driver mostly exists as a proof of concept for non PCI devices under
+ * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
+ * useful.
+ *
+ * Tuning code written from the documentation at
+ * http://www.ryston.cz/petr/vlb/qd6500.html
+ * http://www.ryston.cz/petr/vlb/qd6580.html
+ *
+ * Probe code based on drivers/ide/legacy/qd65xx.c
+ * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
+ * Samuel Thibault <samuel.thibault@fnac.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_qdi"
+#define DRV_VERSION "0.3.1"
+
+#define NR_HOST 4	/* Two 6580s */
+
+struct qdi_data {
+	unsigned long timing;
+	u8 clock[2];
+	u8 last;
+	int fast;
+	struct platform_device *platform_dev;
+
+};
+
+static struct ata_host *qdi_host[NR_HOST];
+static struct qdi_data qdi_data[NR_HOST];
+static int nr_qdi_host;
+
+#ifdef MODULE
+static int probe_qdi = 1;
+#else
+static int probe_qdi;
+#endif
+
+static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct qdi_data *qdi = ap->host->private_data;
+	int active, recovery;
+	u8 timing;
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	if (qdi->fast) {
+		active = 8 - FIT(t.active, 1, 8);
+		recovery = 18 - FIT(t.recover, 3, 18);
+	} else {
+		active = 9 - FIT(t.active, 2, 9);
+		recovery = 15 - FIT(t.recover, 0, 15);
+	}
+	timing = (recovery << 4) | active | 0x08;
+
+	qdi->clock[adev->devno] = timing;
+
+	outb(timing, qdi->timing);
+}
+
+static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct qdi_data *qdi = ap->host->private_data;
+	int active, recovery;
+	u8 timing;
+
+	/* Get the timing data in cycles */
+	ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	if (qdi->fast) {
+		active = 8 - FIT(t.active, 1, 8);
+		recovery = 18 - FIT(t.recover, 3, 18);
+	} else {
+		active = 9 - FIT(t.active, 2, 9);
+		recovery = 15 - FIT(t.recover, 0, 15);
+	}
+	timing = (recovery << 4) | active | 0x08;
+
+	qdi->clock[adev->devno] = timing;
+
+	outb(timing, qdi->timing);
+
+	/* Clear the FIFO */
+	if (adev->class != ATA_DEV_ATA)
+		outb(0x5F, (qdi->timing & 0xFFF0) + 3);
+}
+
+/**
+ *	qdi_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings.
+ */
+
+static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct qdi_data *qdi = ap->host->private_data;
+
+	if (qdi->clock[adev->devno] != qdi->last) {
+		if (adev->pio_mode) {
+			qdi->last = qdi->clock[adev->devno];
+			outb(qdi->clock[adev->devno], qdi->timing);
+		}
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->ap;
+	int slop = buflen & 3;
+
+	if (ata_id_has_dword_io(adev->id)) {
+		if (write_data)
+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			u32 pad;
+			if (write_data) {
+				memcpy(&pad, buf + buflen - slop, slop);
+				pad = le32_to_cpu(pad);
+				iowrite32(pad, ap->ioaddr.data_addr);
+			} else {
+				pad = ioread32(ap->ioaddr.data_addr);
+				pad = cpu_to_le32(pad);
+				memcpy(buf + buflen - slop, &pad, slop);
+			}
+		}
+	} else
+		ata_data_xfer(adev, buf, buflen, write_data);
+}
+
+static struct scsi_host_template qdi_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations qdi6500_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= qdi6500_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= qdi_qc_issue_prot,
+
+	.data_xfer	= qdi_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations qdi6580_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= qdi6580_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= qdi_qc_issue_prot,
+
+	.data_xfer	= qdi_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	qdi_init_one		-	attach a qdi interface
+ *	@type: Type to display
+ *	@io: I/O port start
+ *	@irq: interrupt line
+ *	@fast: True if on a > 33Mhz VLB
+ *
+ *	Register an ISA bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ */
+
+static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
+{
+	struct platform_device *pdev;
+	struct ata_host *host;
+	struct ata_port *ap;
+	void __iomem *io_addr, *ctl_addr;
+	int ret;
+
+	/*
+	 *	Fill in a probe structure first of all
+	 */
+
+	pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	ret = -ENOMEM;
+	io_addr = devm_ioport_map(&pdev->dev, io, 8);
+	ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1);
+	if (!io_addr || !ctl_addr)
+		goto fail;
+
+	ret = -ENOMEM;
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		goto fail;
+	ap = host->ports[0];
+
+	if (type == 6580) {
+		ap->ops = &qdi6580_port_ops;
+		ap->pio_mask = 0x1F;
+		ap->flags |= ATA_FLAG_SLAVE_POSS;
+	} else {
+		ap->ops = &qdi6500_port_ops;
+		ap->pio_mask = 0x07;	/* Actually PIO3 !IORDY is possible */
+		ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+	}
+
+	ap->ioaddr.cmd_addr = io_addr;
+	ap->ioaddr.altstatus_addr = ctl_addr;
+	ap->ioaddr.ctl_addr = ctl_addr;
+	ata_std_ports(&ap->ioaddr);
+
+	/*
+	 *	Hook in a private data structure per channel
+	 */
+	ap->private_data = &qdi_data[nr_qdi_host];
+
+	qdi_data[nr_qdi_host].timing = port;
+	qdi_data[nr_qdi_host].fast = fast;
+	qdi_data[nr_qdi_host].platform_dev = pdev;
+
+	printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
+
+	/* activate */
+	ret = ata_host_activate(host, irq, ata_interrupt, 0, &qdi_sht);
+	if (ret)
+		goto fail;
+
+	qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
+	return 0;
+
+ fail:
+	platform_device_unregister(pdev);
+	return ret;
+}
+
+/**
+ *	qdi_init		-	attach qdi interfaces
+ *
+ *	Attach qdi IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int qdi_init(void)
+{
+	unsigned long flags;
+	static const unsigned long qd_port[2] = { 0x30, 0xB0 };
+	static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
+	static const int ide_irq[2] = { 14, 15 };
+
+	int ct = 0;
+	int i;
+
+	if (probe_qdi == 0)
+		return -ENODEV;
+
+	/*
+ 	 *	Check each possible QD65xx base address
+	 */
+
+	for (i = 0; i < 2; i++) {
+		unsigned long port = qd_port[i];
+		u8 r, res;
+
+
+		if (request_region(port, 2, "pata_qdi")) {
+			/* Check for a card */
+			local_irq_save(flags);
+			r = inb_p(port);
+			outb_p(0x19, port);
+			res = inb_p(port);
+			outb_p(r, port);
+			local_irq_restore(flags);
+
+			/* Fail */
+			if (res == 0x19)
+			{
+				release_region(port, 2);
+				continue;
+			}
+
+			/* Passes the presence test */
+			r = inb_p(port + 1);	/* Check port agrees with port set */
+			if ((r & 2) >> 1 != i) {
+				release_region(port, 2);
+				continue;
+			}
+
+			/* Check card type */
+			if ((r & 0xF0) == 0xC0) {
+				/* QD6500: single channel */
+				if (r & 8) {
+					/* Disabled ? */
+					release_region(port, 2);
+					continue;
+				}
+				if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
+					ct++;
+			}
+			if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
+				/* QD6580: dual channel */
+				if (!request_region(port + 2 , 2, "pata_qdi"))
+				{
+					release_region(port, 2);
+					continue;
+				}
+				res = inb(port + 3);
+				if (res & 1) {
+					/* Single channel mode */
+					if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
+						ct++;
+				} else {
+					/* Dual channel mode */
+					if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
+						ct++;
+					if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
+						ct++;
+				}
+			}
+		}
+	}
+	if (ct != 0)
+		return 0;
+	return -ENODEV;
+}
+
+static __exit void qdi_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_qdi_host; i++) {
+		ata_host_detach(qdi_host[i]);
+		/* Free the control resource. The 6580 dual channel has the resources
+		 * claimed as a pair of 2 byte resources so we need no special cases...
+		 */
+		release_region(qdi_data[i].timing, 2);
+		platform_device_unregister(qdi_data[i].platform_dev);
+	}
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for qdi ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(qdi_init);
+module_exit(qdi_exit);
+
+module_param(probe_qdi, int, 0);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_radisys.c linux-2.6.18.x86_64.p4/drivers/ata/pata_radisys.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_radisys.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_radisys.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,310 @@
+/*
+ *    pata_radisys.c - Intel PATA/SATA controllers
+ *
+ *	(C) 2006 Red Hat <alan@redhat.com>
+ *
+ *    Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ *    A PIIX relative, this device has a single ATA channel and no
+ *    slave timings, SITRE or PPE. In that sense it is a close relative
+ *    of the original PIIX. It does however support UDMA 33/66 per channel
+ *    although no other modes/timings. Also lacking is 32bit I/O on the ATA
+ *    port.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_radisys"
+#define DRV_VERSION	"0.4.4"
+
+/**
+ *	radisys_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: ATA port
+ *	@adev: Device whose timings we are configuring
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 idetm_data;
+	int control = 0;
+
+	/*
+	 *	See Intel Document 298600-004 for the timing programing rules
+	 *	for PIIX/ICH. Note that the early PIIX does not have the slave
+	 *	timing port at 0x44. The Radisys is a relative of the PIIX
+	 *	but not the same so be careful.
+	 */
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },	/* Check me */
+			    { 0, 0 },
+			    { 1, 1 },
+			    { 2, 2 },
+			    { 3, 3 }, };
+
+	if (pio > 0)
+		control |= 1;	/* TIME1 enable */
+	if (ata_pio_need_iordy(adev))
+		control |= 2;	/* IE IORDY */
+
+	pci_read_config_word(dev, 0x40, &idetm_data);
+
+	/* Enable IE and TIME as appropriate. Clear the other
+	   drive timing bits */
+	idetm_data &= 0xCCCC;
+	idetm_data |= (control << (4 * adev->devno));
+	idetm_data |= (timings[pio][0] << 12) |
+			(timings[pio][1] << 8);
+	pci_write_config_word(dev, 0x40, idetm_data);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	radisys_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *	@isich: True if the device is an ICH and has IOCFG registers
+ *
+ *	Set MWDMA mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	u16 idetm_data;
+	u8 udma_enable;
+
+	static const	 /* ISP  RTC */
+	u8 timings[][2]	= { { 0, 0 },
+			    { 0, 0 },
+			    { 1, 1 },
+			    { 2, 2 },
+			    { 3, 3 }, };
+
+	/*
+	 * MWDMA is driven by the PIO timings. We must also enable
+	 * IORDY unconditionally.
+	 */
+
+	pci_read_config_word(dev, 0x40, &idetm_data);
+	pci_read_config_byte(dev, 0x48, &udma_enable);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		unsigned int mwdma	= adev->dma_mode - XFER_MW_DMA_0;
+		const unsigned int needed_pio[3] = {
+			XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+		};
+		int pio = needed_pio[mwdma] - XFER_PIO_0;
+		int control = 3;	/* IORDY|TIME0 */
+
+		/* If the drive MWDMA is faster than it can do PIO then
+		   we must force PIO0 for PIO cycles. */
+
+		if (adev->pio_mode < needed_pio[mwdma])
+			control = 1;
+
+		/* Mask out the relevant control and timing bits we will load. Also
+		   clear the other drive TIME register as a precaution */
+
+		idetm_data &= 0xCCCC;
+		idetm_data |= control << (4 * adev->devno);
+		idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+
+		udma_enable &= ~(1 << adev->devno);
+	} else {
+		u8 udma_mode;
+
+		/* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
+
+		pci_read_config_byte(dev, 0x4A, &udma_mode);
+
+		if (adev->xfer_mode == XFER_UDMA_2)
+			udma_mode &= ~ (1 << adev->devno);
+		else /* UDMA 4 */
+			udma_mode |= (1 << adev->devno);
+
+		pci_write_config_byte(dev, 0x4A, udma_mode);
+
+		udma_enable |= (1 << adev->devno);
+	}
+	pci_write_config_word(dev, 0x40, idetm_data);
+	pci_write_config_byte(dev, 0x48, udma_enable);
+
+	/* Track which port is configured */
+	ap->private_data = adev;
+}
+
+/**
+ *	radisys_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	that, even if we get this wrong, cycles to the other device will
+ *	be made PIO0.
+ */
+
+static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+
+	if (adev != ap->private_data) {
+		/* UDMA timing is not shared */
+		if (adev->dma_mode < XFER_UDMA_0) {
+			if (adev->dma_mode)
+				radisys_set_dmamode(ap, adev);
+			else if (adev->pio_mode)
+				radisys_set_piomode(ap, adev);
+		}
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+
+static struct scsi_host_template radisys_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations radisys_pata_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= radisys_set_piomode,
+	.set_dmamode		= radisys_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_unknown,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= radisys_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+
+/**
+ *	radisys_init_one - Register PIIX ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in radisys_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &radisys_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma1-2 */
+		.udma_mask	= 0x14, /* UDMA33/66 only */
+		.port_ops	= &radisys_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id radisys_pci_tbl[] = {
+	{ PCI_VDEVICE(RADISYS, 0x8201), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver radisys_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= radisys_pci_tbl,
+	.probe			= radisys_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init radisys_init(void)
+{
+	return pci_register_driver(&radisys_pci_driver);
+}
+
+static void __exit radisys_exit(void)
+{
+	pci_unregister_driver(&radisys_pci_driver);
+}
+
+module_init(radisys_init);
+module_exit(radisys_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_rz1000.c linux-2.6.18.x86_64.p4/drivers/ata/pata_rz1000.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_rz1000.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_rz1000.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,200 @@
+/*
+ *  RZ1000/1001 driver based upon
+ *
+ *  linux/drivers/ide/pci/rz1000.c	Version 0.06	January 12, 2003
+ *  Copyright (C) 1995-1998  Linus Torvalds & author (see below)
+ *  Principal Author:  mlord@pobox.com (Mark Lord)
+ *
+ *  See linux/MAINTAINERS for address of current maintainer.
+ *
+ *  This file provides support for disabling the buggy read-ahead
+ *  mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"pata_rz1000"
+#define DRV_VERSION	"0.2.4"
+
+
+/**
+ *	rz1000_set_mode		-	mode setting function
+ *	@ap: ATA interface
+ *	@unused: returned device on set_mode failure
+ *
+ *	Use a non standard set_mode function. We don't want to be tuned. We
+ *	would prefer to be BIOS generic but for the fact our hardware is
+ *	whacked out.
+ */
+
+static int rz1000_set_mode(struct ata_port *ap, struct ata_device **unused)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_enabled(dev)) {
+			/* We don't really care */
+			dev->pio_mode = XFER_PIO_0;
+			dev->xfer_mode = XFER_PIO_0;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			dev->flags |= ATA_DFLAG_PIO;
+			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+		}
+	}
+	return 0;
+}
+
+
+static struct scsi_host_template rz1000_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations rz1000_port_ops = {
+	.set_mode	= rz1000_set_mode,
+
+	.port_disable	= ata_port_disable,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int rz1000_fifo_disable(struct pci_dev *pdev)
+{
+	u16 reg;
+	/* Be exceptionally paranoid as we must be sure to apply the fix */
+	if (pci_read_config_word(pdev, 0x40, &reg) != 0)
+		return -1;
+	reg &= 0xDFFF;
+	if (pci_write_config_word(pdev, 0x40, reg) != 0)
+		return -1;
+	printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+	return 0;
+}
+
+/**
+ *	rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in rz1000_pci_tbl matching with @pdev
+ *
+ *	Configure an RZ1000 interface. This doesn't require much special
+ *	handling except that we *MUST* kill the chipset readahead or the
+ *	user may experience data corruption.
+ */
+
+static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht = &rz1000_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &rz1000_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+
+	if (!printed_version++)
+		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+	if (rz1000_fifo_disable(pdev) == 0)
+		return ata_pci_init_one(pdev, ppi);
+
+	printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
+	/* Not safe to use so skip */
+	return -ENODEV;
+}
+
+#ifdef CONFIG_PM
+static int rz1000_reinit_one(struct pci_dev *pdev)
+{
+	/* If this fails on resume (which is a "cant happen" case), we
+	   must stop as any progress risks data loss */
+	if (rz1000_fifo_disable(pdev))
+		panic("rz1000 fifo");
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id pata_rz1000[] = {
+	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
+	{ PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
+
+	{ },
+};
+
+static struct pci_driver rz1000_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= pata_rz1000,
+	.probe 		= rz1000_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= rz1000_reinit_one,
+#endif
+};
+
+static int __init rz1000_init(void)
+{
+	return pci_register_driver(&rz1000_pci_driver);
+}
+
+static void __exit rz1000_exit(void)
+{
+	pci_unregister_driver(&rz1000_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_rz1000);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(rz1000_init);
+module_exit(rz1000_exit);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_sc1200.c linux-2.6.18.x86_64.p4/drivers/ata/pata_sc1200.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_sc1200.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_sc1200.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,294 @@
+/*
+ * New ATA layer SC1200 driver		Alan Cox <alan@redhat.com>
+ *
+ * TODO: Mode selection filtering
+ * TODO: Can't enable second channel until ATA core has serialize
+ * TODO: Needs custom DMA cleanup code
+ *
+ * Based very heavily on
+ *
+ * linux/drivers/ide/pci/sc1200.c		Version 0.91	28-Jan-2003
+ *
+ * Copyright (C) 2000-2002		Mark Lord <mlord@pobox.com>
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Development of this chipset driver was funded
+ * by the nice folks at National Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME	"sc1200"
+#define DRV_VERSION	"0.2.5"
+
+#define SC1200_REV_A	0x00
+#define SC1200_REV_B1	0x01
+#define SC1200_REV_B3	0x02
+#define SC1200_REV_C1	0x03
+#define SC1200_REV_D1	0x04
+
+/**
+ *	sc1200_clock	-	PCI clock
+ *
+ *	Return the PCI bus clocking for the SC1200 chipset configuration
+ *	in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
+ */
+
+static int sc1200_clock(void)
+{
+	/* Magic registers that give us the chipset data */
+	u8 chip_id = inb(0x903C);
+	u8 silicon_rev = inb(0x903D);
+	u16 pci_clock;
+
+	if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
+		return 0;	/* 33 MHz mode */
+
+	/* Clock generator configuration 0x901E its 8/9 are the PCI clocking
+	   0/3 is 33Mhz 1 is 48 2 is 66 */
+
+	pci_clock = inw(0x901E);
+	pci_clock >>= 8;
+	pci_clock &= 0x03;
+	if (pci_clock == 3)
+		pci_clock = 0;
+	return pci_clock;
+}
+
+/**
+ *	sc1200_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ *
+ *	Set our PIO requirements. This is fairly simple on the SC1200
+ */
+
+static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 pio_timings[4][5] = {
+		{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},	// format0  33Mhz
+		{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010},	// format1, 33Mhz
+		{0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021},	// format1, 48Mhz
+		{0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131}	// format1, 66Mhz
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 format;
+	unsigned int reg = 0x40 + 0x10 * ap->port_no;
+	int mode = adev->pio_mode - XFER_PIO_0;
+
+	pci_read_config_dword(pdev, reg + 4, &format);
+	format >>= 31;
+	format += sc1200_clock();
+	pci_write_config_dword(pdev, reg + 8 * adev->devno,
+				pio_timings[format][mode]);
+}
+
+/**
+ *	sc1200_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ *	We cannot mix MWDMA and UDMA without reloading timings each switch
+ *	master to slave.
+ */
+
+static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u32 udma_timing[3][3] = {
+		{ 0x00921250, 0x00911140, 0x00911030 },
+		{ 0x00932470, 0x00922260, 0x00922140 },
+		{ 0x009436A1, 0x00933481, 0x00923261 }
+	};
+
+	static const u32 mwdma_timing[3][3] = {
+		{ 0x00077771, 0x00012121, 0x00002020 },
+		{ 0x000BBBB2, 0x00024241, 0x00013131 },
+		{ 0x000FFFF3, 0x00035352, 0x00015151 }
+	};
+
+	int clock = sc1200_clock();
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int reg = 0x40 + 0x10 * ap->port_no;
+	int mode = adev->dma_mode;
+	u32 format;
+
+	if (mode >= XFER_UDMA_0)
+		format = udma_timing[clock][mode - XFER_UDMA_0];
+	else
+		format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
+
+	if (adev->devno == 0) {
+		u32 timings;
+
+		pci_read_config_dword(pdev, reg + 4, &timings);
+		timings &= 0x80000000UL;
+		timings |= format;
+		pci_write_config_dword(pdev, reg + 4, timings);
+	} else
+		pci_write_config_dword(pdev, reg + 12, format);
+}
+
+/**
+ *	sc1200_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary.  Specifically we have a problem that there is only
+ *	one MWDMA/UDMA bit.
+ */
+
+static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct ata_device *prev = ap->private_data;
+
+	/* See if the DMA settings could be wrong */
+	if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
+		/* Maybe, but do the channels match MWDMA/UDMA ? */
+		if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
+		    (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
+		    	/* Switch the mode bits */
+		    	sc1200_set_dmamode(ap, adev);
+	}
+
+	return ata_qc_issue_prot(qc);
+}
+
+static struct scsi_host_template sc1200_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations sc1200_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= sc1200_set_piomode,
+	.set_dmamode	= sc1200_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= sc1200_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	sc1200_init_one		-	Initialise an SC1200
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ *	Just throw the needed data at the libata helper and it does all
+ *	our work.
+ */
+
+static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &sc1200_sht,
+		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x07,
+		.port_ops = &sc1200_port_ops
+	};
+	/* Can't enable port 2 yet, see top comments */
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id sc1200[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
+
+	{ },
+};
+
+static struct pci_driver sc1200_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sc1200,
+	.probe 		= sc1200_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init sc1200_init(void)
+{
+	return pci_register_driver(&sc1200_pci_driver);
+}
+
+static void __exit sc1200_exit(void)
+{
+	pci_unregister_driver(&sc1200_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox, Mark Lord");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sc1200);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sc1200_init);
+module_exit(sc1200_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_scc.c linux-2.6.18.x86_64.p4/drivers/ata/pata_scc.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_scc.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_scc.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,1217 @@
+/*
+ * Support for IDE interfaces on Celleb platform
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This code is based on drivers/ata/ata_piix.c:
+ *  Copyright 2003-2005 Red Hat Inc
+ *  Copyright 2003-2005 Jeff Garzik
+ *  Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ *  Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ *  Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
+ *
+ * and drivers/ata/ahci.c:
+ *  Copyright 2004-2005 Red Hat, Inc.
+ *
+ * and drivers/ata/libata-core.c:
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME		"pata_scc"
+#define DRV_VERSION		"0.2"
+
+#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA		0x01b4
+
+/* PCI BARs */
+#define SCC_CTRL_BAR		0
+#define SCC_BMID_BAR		1
+
+/* offset of CTRL registers */
+#define SCC_CTL_PIOSHT		0x000
+#define SCC_CTL_PIOCT		0x004
+#define SCC_CTL_MDMACT		0x008
+#define SCC_CTL_MCRCST		0x00C
+#define SCC_CTL_SDMACT		0x010
+#define SCC_CTL_SCRCST		0x014
+#define SCC_CTL_UDENVT		0x018
+#define SCC_CTL_TDVHSEL 	0x020
+#define SCC_CTL_MODEREG 	0x024
+#define SCC_CTL_ECMODE		0xF00
+#define SCC_CTL_MAEA0		0xF50
+#define SCC_CTL_MAEC0		0xF54
+#define SCC_CTL_CCKCTRL 	0xFF0
+
+/* offset of BMID registers */
+#define SCC_DMA_CMD		0x000
+#define SCC_DMA_STATUS		0x004
+#define SCC_DMA_TABLE_OFS	0x008
+#define SCC_DMA_INTMASK 	0x010
+#define SCC_DMA_INTST		0x014
+#define SCC_DMA_PTERADD 	0x018
+#define SCC_REG_CMD_ADDR	0x020
+#define SCC_REG_DATA		0x000
+#define SCC_REG_ERR		0x004
+#define SCC_REG_FEATURE 	0x004
+#define SCC_REG_NSECT		0x008
+#define SCC_REG_LBAL		0x00C
+#define SCC_REG_LBAM		0x010
+#define SCC_REG_LBAH		0x014
+#define SCC_REG_DEVICE		0x018
+#define SCC_REG_STATUS		0x01C
+#define SCC_REG_CMD		0x01C
+#define SCC_REG_ALTSTATUS	0x020
+
+/* register value */
+#define TDVHSEL_MASTER		0x00000001
+#define TDVHSEL_SLAVE		0x00000004
+
+#define MODE_JCUSFEN		0x00000080
+
+#define ECMODE_VALUE		0x01
+
+#define CCKCTRL_ATARESET	0x00040000
+#define CCKCTRL_BUFCNT		0x00020000
+#define CCKCTRL_CRST		0x00010000
+#define CCKCTRL_OCLKEN		0x00000100
+#define CCKCTRL_ATACLKOEN	0x00000002
+#define CCKCTRL_LCLKEN		0x00000001
+
+#define QCHCD_IOS_SS		0x00000001
+
+#define QCHSD_STPDIAG		0x00020000
+
+#define INTMASK_MSK		0xD1000012
+#define INTSTS_SERROR		0x80000000
+#define INTSTS_PRERR		0x40000000
+#define INTSTS_RERR		0x10000000
+#define INTSTS_ICERR		0x01000000
+#define INTSTS_BMSINT		0x00000010
+#define INTSTS_BMHE		0x00000008
+#define INTSTS_IOIRQS		0x00000004
+#define INTSTS_INTRQ		0x00000002
+#define INTSTS_ACTEINT		0x00000001
+
+
+/* PIO transfer mode table */
+/* JCHST */
+static const unsigned long JCHSTtbl[2][7] = {
+	{0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00},	/* 100MHz */
+	{0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00}	/* 133MHz */
+};
+
+/* JCHHT */
+static const unsigned long JCHHTtbl[2][7] = {
+	{0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00},	/* 100MHz */
+	{0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00}	/* 133MHz */
+};
+
+/* JCHCT */
+static const unsigned long JCHCTtbl[2][7] = {
+	{0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00},	/* 100MHz */
+	{0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00}	/* 133MHz */
+};
+
+/* DMA transfer mode  table */
+/* JCHDCTM/JCHDCTS */
+static const unsigned long JCHDCTxtbl[2][7] = {
+	{0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00},	/* 100MHz */
+	{0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00}	/* 133MHz */
+};
+
+/* JCSTWTM/JCSTWTS  */
+static const unsigned long JCSTWTxtbl[2][7] = {
+	{0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00},	/* 100MHz */
+	{0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02}	/* 133MHz */
+};
+
+/* JCTSS */
+static const unsigned long JCTSStbl[2][7] = {
+	{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00},	/* 100MHz */
+	{0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05}	/* 133MHz */
+};
+
+/* JCENVT */
+static const unsigned long JCENVTtbl[2][7] = {
+	{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00},	/* 100MHz */
+	{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}	/* 133MHz */
+};
+
+/* JCACTSELS/JCACTSELM */
+static const unsigned long JCACTSELtbl[2][7] = {
+	{0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00},	/* 100MHz */
+	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}	/* 133MHz */
+};
+
+static const struct pci_device_id scc_pci_tbl[] = {
+	{PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
+	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{ }	/* terminate list */
+};
+
+/**
+ *	scc_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set PIO mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int pio = adev->pio_mode - XFER_PIO_0;
+	void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+	void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+	void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
+	void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
+	unsigned long reg;
+	int offset;
+
+	reg = in_be32(cckctrl_port);
+	if (reg & CCKCTRL_ATACLKOEN)
+		offset = 1;	/* 133MHz */
+	else
+		offset = 0;	/* 100MHz */
+
+	reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
+	out_be32(piosht_port, reg);
+	reg = JCHCTtbl[offset][pio];
+	out_be32(pioct_port, reg);
+}
+
+/**
+ *	scc_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *	@udma: udma mode, 0 - 6
+ *
+ *	Set UDMA mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	unsigned int udma = adev->dma_mode;
+	unsigned int is_slave = (adev->devno != 0);
+	u8 speed = udma;
+	void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+	void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+	void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
+	void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
+	void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
+	void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
+	void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
+	void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
+	int offset, idx;
+
+	if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
+		offset = 1;	/* 133MHz */
+	else
+		offset = 0;	/* 100MHz */
+
+	if (speed >= XFER_UDMA_0)
+		idx = speed - XFER_UDMA_0;
+	else
+		return;
+
+	if (is_slave) {
+		out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
+		out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
+		out_be32(tdvhsel_port,
+			 (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
+	} else {
+		out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
+		out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
+		out_be32(tdvhsel_port,
+			 (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
+	}
+	out_be32(udenvt_port,
+		 JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
+}
+
+/**
+ *	scc_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Note: Original code is ata_tf_load().
+ */
+
+static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		out_be32(ioaddr->ctl_addr, tf->ctl);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		out_be32(ioaddr->feature_addr, tf->hob_feature);
+		out_be32(ioaddr->nsect_addr, tf->hob_nsect);
+		out_be32(ioaddr->lbal_addr, tf->hob_lbal);
+		out_be32(ioaddr->lbam_addr, tf->hob_lbam);
+		out_be32(ioaddr->lbah_addr, tf->hob_lbah);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		out_be32(ioaddr->feature_addr, tf->feature);
+		out_be32(ioaddr->nsect_addr, tf->nsect);
+		out_be32(ioaddr->lbal_addr, tf->lbal);
+		out_be32(ioaddr->lbam_addr, tf->lbam);
+		out_be32(ioaddr->lbah_addr, tf->lbah);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		out_be32(ioaddr->device_addr, tf->device);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+/**
+ *	scc_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Note: Original code is ata_check_status().
+ */
+
+static u8 scc_check_status (struct ata_port *ap)
+{
+	return in_be32(ap->ioaddr.status_addr);
+}
+
+/**
+ *	scc_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Note: Original code is ata_tf_read().
+ */
+
+static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = scc_check_status(ap);
+	tf->feature = in_be32(ioaddr->error_addr);
+	tf->nsect = in_be32(ioaddr->nsect_addr);
+	tf->lbal = in_be32(ioaddr->lbal_addr);
+	tf->lbam = in_be32(ioaddr->lbam_addr);
+	tf->lbah = in_be32(ioaddr->lbah_addr);
+	tf->device = in_be32(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
+		tf->hob_feature = in_be32(ioaddr->error_addr);
+		tf->hob_nsect = in_be32(ioaddr->nsect_addr);
+		tf->hob_lbal = in_be32(ioaddr->lbal_addr);
+		tf->hob_lbam = in_be32(ioaddr->lbam_addr);
+		tf->hob_lbah = in_be32(ioaddr->lbah_addr);
+	}
+}
+
+/**
+ *	scc_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Note: Original code is ata_exec_command().
+ */
+
+static void scc_exec_command (struct ata_port *ap,
+			      const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	out_be32(ap->ioaddr.command_addr, tf->command);
+	ata_pause(ap);
+}
+
+/**
+ *	scc_check_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ */
+
+static u8 scc_check_altstatus (struct ata_port *ap)
+{
+	return in_be32(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ *	scc_std_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Note: Original code is ata_std_dev_select().
+ */
+
+static void scc_std_dev_select (struct ata_port *ap, unsigned int device)
+{
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	out_be32(ap->ioaddr.device_addr, tmp);
+	ata_pause(ap);
+}
+
+/**
+ *	scc_bmdma_setup - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_setup().
+ */
+
+static void scc_bmdma_setup (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	/* load PRD table addr */
+	out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = in_be32(mmio + SCC_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	out_be32(mmio + SCC_DMA_CMD, dmactl);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	scc_bmdma_start - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_start().
+ */
+
+static void scc_bmdma_start (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 dmactl;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	/* start host DMA transaction */
+	dmactl = in_be32(mmio + SCC_DMA_CMD);
+	out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
+}
+
+/**
+ *	scc_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	Note: Original code is ata_devchk().
+ */
+
+static unsigned int scc_devchk (struct ata_port *ap,
+				unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->dev_select(ap, device);
+
+	out_be32(ioaddr->nsect_addr, 0x55);
+	out_be32(ioaddr->lbal_addr, 0xaa);
+
+	out_be32(ioaddr->nsect_addr, 0xaa);
+	out_be32(ioaddr->lbal_addr, 0x55);
+
+	out_be32(ioaddr->nsect_addr, 0x55);
+	out_be32(ioaddr->lbal_addr, 0xaa);
+
+	nsect = in_be32(ioaddr->nsect_addr);
+	lbal = in_be32(ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	scc_bus_post_reset - PATA device post reset
+ *
+ *	Note: Original code is ata_bus_post_reset().
+ */
+
+static int scc_bus_post_reset(struct ata_port *ap, unsigned int devmask,
+                              unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	int rc;
+
+	/* if device 0 was found in ata_devchk, wait for its
+	 * BSY bit to clear
+	 */
+	if (dev0) {
+		rc = ata_wait_ready(ap, deadline);
+		if (rc && rc != -ENODEV)
+			return rc;
+	}
+
+	/* if device 1 was found in ata_devchk, wait for
+	 * register access, then wait for BSY to clear
+	 */
+	while (dev1) {
+		u8 nsect, lbal;
+
+		ap->ops->dev_select(ap, 1);
+		nsect = in_be32(ioaddr->nsect_addr);
+		lbal = in_be32(ioaddr->lbal_addr);
+		if ((nsect == 1) && (lbal == 1))
+			break;
+		if (time_after(jiffies, deadline))
+			return -EBUSY;
+		msleep(50);	/* give drive a breather */
+	}
+	if (dev1) {
+		rc = ata_wait_ready(ap, deadline);
+		if (rc && rc != -ENODEV)
+			return rc;
+	}
+
+	/* is all this really necessary? */
+	ap->ops->dev_select(ap, 0);
+	if (dev1)
+		ap->ops->dev_select(ap, 1);
+	if (dev0)
+		ap->ops->dev_select(ap, 0);
+
+	return 0;
+}
+
+/**
+ *	scc_bus_softreset - PATA device software reset
+ *
+ *	Note: Original code is ata_bus_softreset().
+ */
+
+static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+                                      unsigned long deadline)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+	/* software reset.  causes dev0 to be selected */
+	out_be32(ioaddr->ctl_addr, ap->ctl);
+	udelay(20);
+	out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
+	udelay(20);
+	out_be32(ioaddr->ctl_addr, ap->ctl);
+
+	/* spec mandates ">= 2ms" before checking status.
+	 * We wait 150ms, because that was the magic delay used for
+	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 *
+	 * Old drivers/ide uses the 2mS rule and then waits for ready
+	 */
+	msleep(150);
+
+	/* Before we perform post reset processing we want to see if
+	 * the bus shows 0xFF because the odd clown forgets the D7
+	 * pulldown resistor.
+	 */
+	if (scc_check_status(ap) == 0xFF)
+		return 0;
+
+	scc_bus_post_reset(ap, devmask, deadline);
+
+	return 0;
+}
+
+/**
+ *	scc_std_softreset - reset host port via ATA SRST
+ *	@ap: port to reset
+ *	@classes: resulting classes of attached devices
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Note: Original code is ata_std_softreset().
+ */
+
+static int scc_std_softreset (struct ata_port *ap, unsigned int *classes,
+                              unsigned long deadline)
+{
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	unsigned int devmask = 0, err_mask;
+	u8 err;
+
+	DPRINTK("ENTER\n");
+
+	if (ata_port_offline(ap)) {
+		classes[0] = ATA_DEV_NONE;
+		goto out;
+	}
+
+	/* determine if device 0/1 are present */
+	if (scc_devchk(ap, 0))
+		devmask |= (1 << 0);
+	if (slave_possible && scc_devchk(ap, 1))
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->dev_select(ap, 0);
+
+	/* issue bus reset */
+	DPRINTK("about to softreset, devmask=%x\n", devmask);
+	err_mask = scc_bus_softreset(ap, devmask, deadline);
+	if (err_mask) {
+		ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+				err_mask);
+		return -EIO;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_dev_try_classify(ap, 0, &err);
+	if (slave_possible && err != 0x81)
+		classes[1] = ata_dev_try_classify(ap, 1, &err);
+
+ out:
+	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+	return 0;
+}
+
+/**
+ *	scc_bmdma_stop - Stop PCI IDE BMDMA transfer
+ *	@qc: Command we are ending DMA for
+ */
+
+static void scc_bmdma_stop (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+	void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
+	u32 reg;
+
+	while (1) {
+		reg = in_be32(bmid_base + SCC_DMA_INTST);
+
+		if (reg & INTSTS_SERROR) {
+			printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
+			out_be32(bmid_base + SCC_DMA_CMD,
+				 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+			continue;
+		}
+
+		if (reg & INTSTS_PRERR) {
+			u32 maea0, maec0;
+			maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
+			maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
+			printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
+			out_be32(bmid_base + SCC_DMA_CMD,
+				 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+			continue;
+		}
+
+		if (reg & INTSTS_RERR) {
+			printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
+			out_be32(bmid_base + SCC_DMA_CMD,
+				 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+			continue;
+		}
+
+		if (reg & INTSTS_ICERR) {
+			out_be32(bmid_base + SCC_DMA_CMD,
+				 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+			printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
+			continue;
+		}
+
+		if (reg & INTSTS_BMSINT) {
+			unsigned int classes;
+			unsigned long deadline = jiffies + ATA_TMOUT_BOOT;
+			printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
+			/* TBD: SW reset */
+			scc_std_softreset(ap, &classes, deadline);
+			continue;
+		}
+
+		if (reg & INTSTS_BMHE) {
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
+			continue;
+		}
+
+		if (reg & INTSTS_ACTEINT) {
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
+			continue;
+		}
+
+		if (reg & INTSTS_IOIRQS) {
+			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
+			continue;
+		}
+		break;
+	}
+
+	/* clear start/stop bit */
+	out_be32(bmid_base + SCC_DMA_CMD,
+		 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_altstatus(ap);	/* dummy read */
+}
+
+/**
+ *	scc_bmdma_status - Read PCI IDE BMDMA status
+ *	@ap: Port associated with this ATA transaction.
+ */
+
+static u8 scc_bmdma_status (struct ata_port *ap)
+{
+	u8 host_stat;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	host_stat = in_be32(mmio + SCC_DMA_STATUS);
+
+	/* Workaround for PTERADD: emulate DMA_INTR when
+	 * - IDE_STATUS[ERR] = 1
+	 * - INT_STATUS[INTRQ] = 1
+	 * - DMA_STATUS[IORACTA] = 1
+	 */
+	if (!(host_stat & ATA_DMA_INTR)) {
+		u32 int_status = in_be32(mmio + SCC_DMA_INTST);
+		if (ata_altstatus(ap) & ATA_ERR &&
+		    int_status & INTSTS_INTRQ &&
+		    host_stat & ATA_DMA_ACTIVE)
+			host_stat |= ATA_DMA_INTR;
+	}
+
+	return host_stat;
+}
+
+/**
+ *	scc_data_xfer - Transfer data by PIO
+ *	@adev: device for this I/O
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Note: Original code is ata_data_xfer().
+ */
+
+static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
+			   unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->ap;
+	unsigned int words = buflen >> 1;
+	unsigned int i;
+	u16 *buf16 = (u16 *) buf;
+	void __iomem *mmio = ap->ioaddr.data_addr;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		for (i = 0; i < words; i++)
+			out_be32(mmio, cpu_to_le16(buf16[i]));
+	} else {
+		for (i = 0; i < words; i++)
+			buf16[i] = le16_to_cpu(in_be32(mmio));
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			out_be32(mmio, cpu_to_le16(align_buf[0]));
+		} else {
+			align_buf[0] = le16_to_cpu(in_be32(mmio));
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	scc_irq_on - Enable interrupts on a port.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Note: Original code is ata_irq_on().
+ */
+
+static u8 scc_irq_on (struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 tmp;
+
+	ap->ctl &= ~ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	out_be32(ioaddr->ctl_addr, ap->ctl);
+	tmp = ata_wait_idle(ap);
+
+	ap->ops->irq_clear(ap);
+
+	return tmp;
+}
+
+/**
+ *	scc_irq_ack - Acknowledge a device interrupt.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Note: Original code is ata_irq_ack().
+ */
+
+static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq)
+{
+	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
+	u8 host_stat, post_stat, status;
+
+	status = ata_busy_wait(ap, bits, 1000);
+	if (status & bits)
+		if (ata_msg_err(ap))
+			printk(KERN_ERR "abnormal status 0x%X\n", status);
+
+	/* get controller status; clear intr, err bits */
+	host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
+	out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS,
+		 host_stat | ATA_DMA_INTR | ATA_DMA_ERR);
+
+	post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
+
+	if (ata_msg_intr(ap))
+		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
+		       __FUNCTION__,
+		       host_stat, post_stat, status);
+
+	return status;
+}
+
+/**
+ *	scc_bmdma_freeze - Freeze BMDMA controller port
+ *	@ap: port to freeze
+ *
+ *	Note: Original code is ata_bmdma_freeze().
+ */
+
+static void scc_bmdma_freeze (struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	ap->ctl |= ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	out_be32(ioaddr->ctl_addr, ap->ctl);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ata_chk_status(ap);
+
+	ap->ops->irq_clear(ap);
+}
+
+/**
+ *	scc_pata_prereset - prepare for reset
+ *	@ap: ATA port to be reset
+ *	@deadline: deadline jiffies for the operation
+ */
+
+static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline)
+{
+	ap->cbl = ATA_CBL_PATA80;
+	return ata_std_prereset(ap, deadline);
+}
+
+/**
+ *	scc_std_postreset - standard postreset callback
+ *	@ap: the target ata_port
+ *	@classes: classes of attached devices
+ *
+ *	Note: Original code is ata_std_postreset().
+ */
+
+static void scc_std_postreset (struct ata_port *ap, unsigned int *classes)
+{
+	DPRINTK("ENTER\n");
+
+	/* re-enable interrupts */
+	if (!ap->ops->error_handler)
+		ap->ops->irq_on(ap);
+
+	/* is double-select really necessary? */
+	if (classes[0] != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 1);
+	if (classes[1] != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 0);
+
+	/* bail out if no device is present */
+	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+		DPRINTK("EXIT, no device\n");
+		return;
+	}
+
+	/* set up device control */
+	if (ap->ioaddr.ctl_addr)
+		out_be32(ap->ioaddr.ctl_addr, ap->ctl);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	scc_error_handler - Stock error handler for BMDMA controller
+ *	@ap: port to handle error for
+ */
+
+static void scc_error_handler (struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, scc_pata_prereset, scc_std_softreset, NULL,
+			   scc_std_postreset);
+}
+
+/**
+ *	scc_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_irq_clear().
+ */
+
+static void scc_bmdma_irq_clear (struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
+		return;
+
+	out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
+}
+
+/**
+ *	scc_port_start - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Allocate space for PRD table using ata_port_start().
+ *	Set PRD table address for PTERADD. (PRD Transfer End Read)
+ */
+
+static int scc_port_start (struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+	int rc;
+
+	rc = ata_port_start(ap);
+	if (rc)
+		return rc;
+
+	out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
+	return 0;
+}
+
+/**
+ *	scc_port_stop - Undo scc_port_start()
+ *	@ap: Port to shut down
+ *
+ *	Reset PTERADD.
+ */
+
+static void scc_port_stop (struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	out_be32(mmio + SCC_DMA_PTERADD, 0);
+}
+
+static struct scsi_host_template scc_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations scc_pata_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= scc_set_piomode,
+	.set_dmamode		= scc_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= scc_tf_load,
+	.tf_read		= scc_tf_read,
+	.exec_command		= scc_exec_command,
+	.check_status		= scc_check_status,
+	.check_altstatus	= scc_check_altstatus,
+	.dev_select		= scc_std_dev_select,
+
+	.bmdma_setup		= scc_bmdma_setup,
+	.bmdma_start		= scc_bmdma_start,
+	.bmdma_stop		= scc_bmdma_stop,
+	.bmdma_status		= scc_bmdma_status,
+	.data_xfer		= scc_data_xfer,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.freeze			= scc_bmdma_freeze,
+	.error_handler		= scc_error_handler,
+	.post_internal_cmd	= scc_bmdma_stop,
+
+	.irq_clear		= scc_bmdma_irq_clear,
+	.irq_on			= scc_irq_on,
+	.irq_ack		= scc_irq_ack,
+
+	.port_start		= scc_port_start,
+	.port_stop		= scc_port_stop,
+};
+
+static struct ata_port_info scc_port_info[] = {
+	{
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x00,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &scc_pata_ops,
+	},
+};
+
+/**
+ *	scc_reset_controller - initialize SCC PATA controller.
+ */
+
+static int scc_reset_controller(struct ata_host *host)
+{
+	void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
+	void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
+	void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+	void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
+	void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
+	void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
+	void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
+	u32 reg = 0;
+
+	out_be32(cckctrl_port, reg);
+	reg |= CCKCTRL_ATACLKOEN;
+	out_be32(cckctrl_port, reg);
+	reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
+	out_be32(cckctrl_port, reg);
+	reg |= CCKCTRL_CRST;
+	out_be32(cckctrl_port, reg);
+
+	for (;;) {
+		reg = in_be32(cckctrl_port);
+		if (reg & CCKCTRL_CRST)
+			break;
+		udelay(5000);
+	}
+
+	reg |= CCKCTRL_ATARESET;
+	out_be32(cckctrl_port, reg);
+	out_be32(ecmode_port, ECMODE_VALUE);
+	out_be32(mode_port, MODE_JCUSFEN);
+	out_be32(intmask_port, INTMASK_MSK);
+
+	if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
+		printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ *	scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
+ *	@ioaddr: IO address structure to be initialized
+ *	@base: base address of BMID region
+ */
+
+static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
+{
+	ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
+	ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+	ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+	ioaddr->bmdma_addr = base;
+	ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
+	ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
+	ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
+	ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
+	ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
+	ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
+	ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
+	ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
+	ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
+	ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
+}
+
+static int scc_host_init(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	int rc;
+
+	rc = scc_reset_controller(host);
+	if (rc)
+		return rc;
+
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
+
+	pci_set_master(pdev);
+
+	return 0;
+}
+
+/**
+ *	scc_init_one - Register SCC PATA device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in scc_pci_tbl matching with @pdev
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
+	struct ata_host *host;
+	int rc;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
+	if (!host)
+		return -ENOMEM;
+
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	rc = scc_host_init(host);
+	if (rc)
+		return rc;
+
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &scc_sht);
+}
+
+static struct pci_driver scc_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= scc_pci_tbl,
+	.probe			= scc_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init scc_init (void)
+{
+	int rc;
+
+	DPRINTK("pci_register_driver\n");
+	rc = pci_register_driver(&scc_pci_driver);
+	if (rc)
+		return rc;
+
+	DPRINTK("done\n");
+	return 0;
+}
+
+static void __exit scc_exit (void)
+{
+	pci_unregister_driver(&scc_pci_driver);
+}
+
+module_init(scc_init);
+module_exit(scc_exit);
+
+MODULE_AUTHOR("Toshiba corp");
+MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_serverworks.c linux-2.6.18.x86_64.p4/drivers/ata/pata_serverworks.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_serverworks.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_serverworks.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,608 @@
+/*
+ * pata_serverworks.c 	- Serverworks PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon
+ *
+ * serverworks.c
+ *
+ * Copyright (C) 1998-2000 Michel Aubry
+ * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Portions copyright (c) 2001 Sun Microsystems
+ *
+ *
+ * RCC/ServerWorks IDE driver for Linux
+ *
+ *   OSB4: `Open South Bridge' IDE Interface (fn 1)
+ *         supports UDMA mode 2 (33 MB/s)
+ *
+ *   CSB5: `Champion South Bridge' IDE Interface (fn 1)
+ *         all revisions support UDMA mode 4 (66 MB/s)
+ *         revision A2.0 and up support UDMA mode 5 (100 MB/s)
+ *
+ *         *** The CSB5 does not provide ANY register ***
+ *         *** to detect 80-conductor cable presence. ***
+ *
+ *   CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
+ *
+ * Documentation:
+ *	Available under NDA only. Errata info very hard to get.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_serverworks"
+#define DRV_VERSION "0.4.1"
+
+#define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
+#define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
+
+/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5 */
+
+static const char *csb_bad_ata100[] = {
+	"ST320011A",
+	"ST340016A",
+	"ST360021A",
+	"ST380021A",
+	NULL
+};
+
+/**
+ *	dell_cable	-	Dell serverworks cable detection
+ *	@ap: ATA port to do cable detect
+ *
+ *	Dell hide the 40/80 pin select for their interfaces in the top two
+ *	bits of the subsystem ID.
+ */
+
+static int dell_cable(struct ata_port *ap) {
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	sun_cable	-	Sun Cobalt 'Alpine' cable detection
+ *	@ap: ATA port to do cable select
+ *
+ *	Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
+ *	subsystem ID the same as dell. We could use one function but we may
+ *	need to extend the Dell one in future
+ */
+
+static int sun_cable(struct ata_port *ap) {
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	osb4_cable	-	OSB4 cable detect
+ *	@ap: ATA port to check
+ *
+ *	The OSB4 isn't UDMA66 capable so this is easy
+ */
+
+static int osb4_cable(struct ata_port *ap) {
+	return ATA_CBL_PATA40;
+}
+
+/**
+ *	csb4_cable	-	CSB5/6 cable detect
+ *	@ap: ATA port to check
+ *
+ *	Serverworks default arrangement is to use the drive side detection
+ *	only.
+ */
+
+static int csb_cable(struct ata_port *ap) {
+	return ATA_CBL_PATA80;
+}
+
+struct sv_cable_table {
+	int device;
+	int subvendor;
+	int (*cable_detect)(struct ata_port *ap);
+};
+
+/*
+ *	Note that we don't copy the old serverworks code because the old
+ *	code contains obvious mistakes
+ */
+
+static struct sv_cable_table cable_detect[] = {
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN,  sun_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
+	{ PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
+	{ }
+};
+
+/**
+ *	serverworks_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection according to the device and subvendor
+ *	identifications
+ */
+
+static int serverworks_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct sv_cable_table *cb = cable_detect;
+
+	while(cb->device) {
+		if (cb->device == pdev->device &&
+		    (cb->subvendor == pdev->subsystem_vendor ||
+		      cb->subvendor == PCI_ANY_ID)) {
+			return cb->cable_detect(ap);
+		}
+		cb++;
+	}
+
+	BUG();
+	return -1;	/* kill compiler warning */
+}
+
+/**
+ *	serverworks_is_csb	-	Check for CSB or OSB
+ *	@pdev: PCI device to check
+ *
+ *	Returns true if the device being checked is known to be a CSB
+ *	series device.
+ */
+
+static u8 serverworks_is_csb(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+		case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+			return 1;
+		default:
+			break;
+	}
+	return 0;
+}
+
+/**
+ *	serverworks_osb4_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: Mask of proposed modes
+ *
+ *	Filter the offered modes for the device to apply controller
+ *	specific rules. OSB4 requires no UDMA for disks due to a FIFO
+ *	bug we hit.
+ */
+
+static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (adev->class == ATA_DEV_ATA)
+		mask &= ~ATA_MASK_UDMA;
+	return ata_pci_default_filter(adev, mask);
+}
+
+
+/**
+ *	serverworks_csb_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: Mask of proposed modes
+ *
+ *	Check the blacklist and disable UDMA5 if matched
+ */
+
+static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask)
+{
+	const char *p;
+	char model_num[ATA_ID_PROD_LEN + 1];
+	int i;
+
+	/* Disk, UDMA */
+	if (adev->class != ATA_DEV_ATA)
+		return ata_pci_default_filter(adev, mask);
+
+	/* Actually do need to check */
+	ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
+		if (!strcmp(p, model_num))
+			mask &= ~(0x1F << ATA_SHIFT_UDMA);
+	}
+	return ata_pci_default_filter(adev, mask);
+}
+
+
+/**
+ *	serverworks_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the OSB4/CSB5 timing registers for PIO. The PIO register
+ *	load is done as a simple lookup.
+ */
+static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+	int offset = 1 + (2 * ap->port_no) - adev->devno;
+	int devbits = (2 * ap->port_no + adev->devno) * 4;
+	u16 csb5_pio;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+
+	pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
+
+	/* The OSB4 just requires the timing but the CSB series want the
+	   mode number as well */
+	if (serverworks_is_csb(pdev)) {
+		pci_read_config_word(pdev, 0x4A, &csb5_pio);
+		csb5_pio &= ~(0x0F << devbits);
+		pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
+	}
+}
+
+/**
+ *	serverworks_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
+ *	chipset. The MWDMA mode values are pulled from a lookup table
+ *	while the chipset uses mode number for UDMA.
+ */
+
+static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
+	int offset = 1 + 2 * ap->port_no - adev->devno;
+	int devbits = (2 * ap->port_no + adev->devno);
+	u8 ultra;
+	u8 ultra_cfg;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	pci_read_config_byte(pdev, 0x54, &ultra_cfg);
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		pci_write_config_byte(pdev, 0x44 + offset,  0x20);
+
+		pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
+		ultra &= ~(0x0F << (ap->port_no * 4));
+		ultra |= (adev->dma_mode - XFER_UDMA_0)
+					<< (ap->port_no * 4);
+		pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
+
+		ultra_cfg |=  (1 << devbits);
+	} else {
+		pci_write_config_byte(pdev, 0x44 + offset,
+			dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
+		ultra_cfg &= ~(1 << devbits);
+	}
+	pci_write_config_byte(pdev, 0x54, ultra_cfg);
+}
+
+static struct scsi_host_template serverworks_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations serverworks_osb4_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= serverworks_set_piomode,
+	.set_dmamode	= serverworks_set_dmamode,
+	.mode_filter	= serverworks_osb4_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= serverworks_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations serverworks_csb_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= serverworks_set_piomode,
+	.set_dmamode	= serverworks_set_dmamode,
+	.mode_filter	= serverworks_csb_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= serverworks_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int serverworks_fixup_osb4(struct pci_dev *pdev)
+{
+	u32 reg;
+	struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+		  PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+	if (isa_dev) {
+		pci_read_config_dword(isa_dev, 0x64, &reg);
+		reg &= ~0x00002000; /* disable 600ns interrupt mask */
+		if (!(reg & 0x00004000))
+			printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
+		reg |=  0x00004000; /* enable UDMA/33 support */
+		pci_write_config_dword(isa_dev, 0x64, reg);
+		pci_dev_put(isa_dev);
+		return 0;
+	}
+	printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
+	return -ENODEV;
+}
+
+static int serverworks_fixup_csb(struct pci_dev *pdev)
+{
+	u8 rev;
+	u8 btr;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+
+	/* Third Channel Test */
+	if (!(PCI_FUNC(pdev->devfn) & 1)) {
+		struct pci_dev * findev = NULL;
+		u32 reg4c = 0;
+		findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+			PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
+		if (findev) {
+			pci_read_config_dword(findev, 0x4C, &reg4c);
+			reg4c &= ~0x000007FF;
+			reg4c |=  0x00000040;
+			reg4c |=  0x00000020;
+			pci_write_config_dword(findev, 0x4C, reg4c);
+			pci_dev_put(findev);
+		}
+	} else {
+		struct pci_dev * findev = NULL;
+		u8 reg41 = 0;
+
+		findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+				PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
+		if (findev) {
+			pci_read_config_byte(findev, 0x41, &reg41);
+			reg41 &= ~0x40;
+			pci_write_config_byte(findev, 0x41, reg41);
+			pci_dev_put(findev);
+		}
+	}
+	/* setup the UDMA Control register
+	 *
+	 * 1. clear bit 6 to enable DMA
+	 * 2. enable DMA modes with bits 0-1
+	 * 	00 : legacy
+	 * 	01 : udma2
+	 * 	10 : udma2/udma4
+	 * 	11 : udma2/udma4/udma5
+	 */
+	pci_read_config_byte(pdev, 0x5A, &btr);
+	btr &= ~0x40;
+	if (!(PCI_FUNC(pdev->devfn) & 1))
+		btr |= 0x2;
+	else
+		btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+	pci_write_config_byte(pdev, 0x5A, btr);
+
+	return btr;
+}
+
+static void serverworks_fixup_ht1000(struct pci_dev *pdev)
+{
+	u8 btr;
+	/* Setup HT1000 SouthBridge Controller - Single Channel Only */
+	pci_read_config_byte(pdev, 0x5A, &btr);
+	btr &= ~0x40;
+	btr |= 0x3;
+	pci_write_config_byte(pdev, 0x5A, btr);
+}
+
+
+static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info[4] = {
+		{ /* OSB4 */
+			.sht = &serverworks_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x07,
+			.port_ops = &serverworks_osb4_port_ops
+		}, { /* OSB4 no UDMA */
+			.sht = &serverworks_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x00,
+			.port_ops = &serverworks_osb4_port_ops
+		}, { /* CSB5 */
+			.sht = &serverworks_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x1f,
+			.port_ops = &serverworks_csb_port_ops
+		}, { /* CSB5 - later revisions*/
+			.sht = &serverworks_sht,
+			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.pio_mask = 0x1f,
+			.mwdma_mask = 0x07,
+			.udma_mask = 0x3f,
+			.port_ops = &serverworks_csb_port_ops
+		}
+	};
+	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+
+	/* Force master latency timer to 64 PCI clocks */
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+	/* OSB4 : South Bridge and IDE */
+	if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
+		/* Select non UDMA capable OSB4 if we can't do fixups */
+		if ( serverworks_fixup_osb4(pdev) < 0)
+			ppi[0] = &info[1];
+	}
+	/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+	else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+		 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
+		 (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
+
+		 /* If the returned btr is the newer revision then
+		    select the right info block */
+		 if (serverworks_fixup_csb(pdev) == 3)
+		 	ppi[0] = &info[3];
+
+		/* Is this the 3rd channel CSB6 IDE ? */
+		if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
+			ppi[1] = &ata_dummy_port_info;
+	}
+	/* setup HT1000E */
+	else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
+		serverworks_fixup_ht1000(pdev);
+
+	if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
+		ata_pci_clear_simplex(pdev);
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int serverworks_reinit_one(struct pci_dev *pdev)
+{
+	/* Force master latency timer to 64 PCI clocks */
+	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+	switch (pdev->device)
+	{
+		case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+			serverworks_fixup_osb4(pdev);
+			break;
+		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+			ata_pci_clear_simplex(pdev);
+			/* fall through */
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+			serverworks_fixup_csb(pdev);
+			break;
+		case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+			serverworks_fixup_ht1000(pdev);
+			break;
+	}
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id serverworks[] = {
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
+	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
+
+	{ },
+};
+
+static struct pci_driver serverworks_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= serverworks,
+	.probe 		= serverworks_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= serverworks_reinit_one,
+#endif
+};
+
+static int __init serverworks_init(void)
+{
+	return pci_register_driver(&serverworks_pci_driver);
+}
+
+static void __exit serverworks_exit(void)
+{
+	pci_unregister_driver(&serverworks_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, serverworks);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(serverworks_init);
+module_exit(serverworks_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_sil680.c linux-2.6.18.x86_64.p4/drivers/ata/pata_sil680.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_sil680.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_sil680.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,419 @@
+/*
+ * pata_sil680.c 	- SIL680 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon
+ *
+ * linux/drivers/ide/pci/siimage.c		Version 1.07	Nov 30, 2003
+ *
+ * Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003		Red Hat <alan@redhat.com>
+ *
+ *  May be copied or modified under the terms of the GNU General Public License
+ *
+ *  Documentation publically available.
+ *
+ *	If you have strange problems with nVidia chipset systems please
+ *	see the SI support documentation and update your system BIOS
+ *	if neccessary
+ *
+ * TODO
+ *	If we know all our devices are LBA28 (or LBA28 sized)  we could use
+ *	the command fifo mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sil680"
+#define DRV_VERSION "0.4.6"
+
+/**
+ *	sil680_selreg		-	return register base
+ *	@hwif: interface
+ *	@r: config offset
+ *
+ *	Turn a config register offset into the right address in either
+ *	PCI space or MMIO space to access the control register in question
+ *	Thankfully this is a configuration operation so isnt performance
+ *	criticial.
+ */
+
+static unsigned long sil680_selreg(struct ata_port *ap, int r)
+{
+	unsigned long base = 0xA0 + r;
+	base += (ap->port_no << 4);
+	return base;
+}
+
+/**
+ *	sil680_seldev		-	return register base
+ *	@hwif: interface
+ *	@r: config offset
+ *
+ *	Turn a config register offset into the right address in either
+ *	PCI space or MMIO space to access the control register in question
+ *	including accounting for the unit shift.
+ */
+
+static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
+{
+	unsigned long base = 0xA0 + r;
+	base += (ap->port_no << 4);
+	base |= adev->devno ? 2 : 0;
+	return base;
+}
+
+
+/**
+ *	sil680_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection. The SIL680 stores this in PCI config
+ *	space for us.
+ */
+
+static int sil680_cable_detect(struct ata_port *ap) {
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long addr = sil680_selreg(ap, 0);
+	u8 ata66;
+	pci_read_config_byte(pdev, addr, &ata66);
+	if (ata66 & 1)
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	sil680_bus_reset	-	reset the SIL680 bus
+ *	@ap: ATA port to reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the SIL680 housekeeping when doing an ATA bus reset
+ */
+
+static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes,
+			    unsigned long deadline)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long addr = sil680_selreg(ap, 0);
+	u8 reset;
+
+	pci_read_config_byte(pdev, addr, &reset);
+	pci_write_config_byte(pdev, addr, reset | 0x03);
+	udelay(25);
+	pci_write_config_byte(pdev, addr, reset);
+	return ata_std_softreset(ap, classes, deadline);
+}
+
+static void sil680_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, ata_std_prereset, sil680_bus_reset, NULL, ata_std_postreset);
+}
+
+/**
+ *	sil680_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the SIL680 registers for PIO mode. Note that the task speed
+ *	registers are shared between the devices so we must pick the lowest
+ *	mode for command work.
+ */
+
+static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
+	static u16 speed_t[5] = { 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1 };
+
+	unsigned long tfaddr = sil680_selreg(ap, 0x02);
+	unsigned long addr = sil680_seldev(ap, adev, 0x04);
+	unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	int pio = adev->pio_mode - XFER_PIO_0;
+	int lowest_pio = pio;
+	int port_shift = 4 * adev->devno;
+	u16 reg;
+	u8 mode;
+
+	struct ata_device *pair = ata_dev_pair(adev);
+
+	if (pair != NULL && adev->pio_mode > pair->pio_mode)
+		lowest_pio = pair->pio_mode - XFER_PIO_0;
+
+	pci_write_config_word(pdev, addr, speed_p[pio]);
+	pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
+
+	pci_read_config_word(pdev, tfaddr-2, &reg);
+	pci_read_config_byte(pdev, addr_mask, &mode);
+
+	reg &= ~0x0200;			/* Clear IORDY */
+	mode &= ~(3 << port_shift);	/* Clear IORDY and DMA bits */
+
+	if (ata_pio_need_iordy(adev)) {
+		reg |= 0x0200;		/* Enable IORDY */
+		mode |= 1 << port_shift;
+	}
+	pci_write_config_word(pdev, tfaddr-2, reg);
+	pci_write_config_byte(pdev, addr_mask, mode);
+}
+
+/**
+ *	sil680_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Program the MWDMA/UDMA modes for the sil680 k
+ *	chipset. The MWDMA mode values are pulled from a lookup table
+ *	while the chipset uses mode number for UDMA.
+ */
+
+static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static u8 ultra_table[2][7] = {
+		{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF },	/* 100MHz */
+		{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 },	/* 133Mhz */
+	};
+	static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned long ma = sil680_seldev(ap, adev, 0x08);
+	unsigned long ua = sil680_seldev(ap, adev, 0x0C);
+	unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+	int port_shift = adev->devno * 4;
+	u8 scsc, mode;
+	u16 multi, ultra;
+
+	pci_read_config_byte(pdev, 0x8A, &scsc);
+	pci_read_config_byte(pdev, addr_mask, &mode);
+	pci_read_config_word(pdev, ma, &multi);
+	pci_read_config_word(pdev, ua, &ultra);
+
+	/* Mask timing bits */
+	ultra &= ~0x3F;
+	mode &= ~(0x03 << port_shift);
+
+	/* Extract scsc */
+	scsc = (scsc & 0x30) ? 1: 0;
+
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		multi = 0x10C1;
+		ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
+		mode |= (0x03 << port_shift);
+	} else {
+		multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
+		mode |= (0x02 << port_shift);
+	}
+	pci_write_config_byte(pdev, addr_mask, mode);
+	pci_write_config_word(pdev, ma, multi);
+	pci_write_config_word(pdev, ua, ultra);
+}
+
+static struct scsi_host_template sil680_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations sil680_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= sil680_set_piomode,
+	.set_dmamode	= sil680_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= sil680_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= sil680_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	sil680_init_chip		-	chip setup
+ *	@pdev: PCI device
+ *
+ *	Perform all the chip setup which must be done both when the device
+ *	is powered up on boot and when we resume in case we resumed from RAM.
+ *	Returns the final clock settings.
+ */
+
+static u8 sil680_init_chip(struct pci_dev *pdev)
+{
+	u32 class_rev	= 0;
+	u8 tmpbyte	= 0;
+
+        pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+        class_rev &= 0xff;
+        /* FIXME: double check */
+	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
+
+	pci_write_config_byte(pdev, 0x80, 0x00);
+	pci_write_config_byte(pdev, 0x84, 0x00);
+
+	pci_read_config_byte(pdev, 0x8A, &tmpbyte);
+
+	printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
+			tmpbyte & 1, tmpbyte & 0x30);
+
+	switch(tmpbyte & 0x30) {
+		case 0x00:
+			/* 133 clock attempt to force it on */
+			pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
+			break;
+		case 0x30:
+			/* if clocking is disabled */
+			/* 133 clock attempt to force it on */
+			pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
+			break;
+		case 0x10:
+			/* 133 already */
+			break;
+		case 0x20:
+			/* BIOS set PCI x2 clocking */
+			break;
+	}
+
+	pci_read_config_byte(pdev,   0x8A, &tmpbyte);
+	printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
+			tmpbyte & 1, tmpbyte & 0x30);
+
+	pci_write_config_byte(pdev,  0xA1, 0x72);
+	pci_write_config_word(pdev,  0xA2, 0x328A);
+	pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
+	pci_write_config_dword(pdev, 0xA8, 0x43924392);
+	pci_write_config_dword(pdev, 0xAC, 0x40094009);
+	pci_write_config_byte(pdev,  0xB1, 0x72);
+	pci_write_config_word(pdev,  0xB2, 0x328A);
+	pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
+	pci_write_config_dword(pdev, 0xB8, 0x43924392);
+	pci_write_config_dword(pdev, 0xBC, 0x40094009);
+
+	switch(tmpbyte & 0x30) {
+		case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
+		case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
+		case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
+		/* This last case is _NOT_ ok */
+		case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
+	}
+	return tmpbyte & 0x30;
+}
+
+static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &sil680_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,
+		.port_ops = &sil680_port_ops
+	};
+	static const struct ata_port_info info_slow = {
+		.sht = &sil680_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &sil680_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	switch(sil680_init_chip(pdev))
+	{
+		case 0:
+			ppi[0] = &info_slow;
+			break;
+		case 0x30:
+			return -ENODEV;
+	}
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+static int sil680_reinit_one(struct pci_dev *pdev)
+{
+	sil680_init_chip(pdev);
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id sil680[] = {
+	{ PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
+
+	{ },
+};
+
+static struct pci_driver sil680_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sil680,
+	.probe 		= sil680_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= sil680_reinit_one,
+#endif
+};
+
+static int __init sil680_init(void)
+{
+	return pci_register_driver(&sil680_pci_driver);
+}
+
+static void __exit sil680_exit(void)
+{
+	pci_unregister_driver(&sil680_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for SI680 PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil680);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sil680_init);
+module_exit(sil680_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_sis.c linux-2.6.18.x86_64.p4/drivers/ata/pata_sis.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_sis.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_sis.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,1011 @@
+/*
+ *    pata_sis.c - SiS ATA driver
+ *
+ *	(C) 2005 Red Hat <alan@redhat.com>
+ *
+ *    Based upon linux/drivers/ide/pci/sis5513.c
+ * Copyright (C) 1999-2000	Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2002		Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
+ * Copyright (C) 2003		Vojtech Pavlik <vojtech@suse.cz>
+ * SiS Taiwan		: for direct support and hardware.
+ * Daniela Engert	: for initial ATA100 advices and numerous others.
+ * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt	:
+ *			  for checking code correctness, providing patches.
+ * Original tests and design on the SiS620 chipset.
+ * ATA100 tests and design on the SiS735 chipset.
+ * ATA16/33 support from specs
+ * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
+ *
+ *
+ *	TODO
+ *	Check MWDMA on drives that don't support MWDMA speed pio cycles ?
+ *	More Testing
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+#include "sis.h"
+
+#define DRV_NAME	"pata_sis"
+#define DRV_VERSION	"0.5.1"
+
+struct sis_chipset {
+	u16 device;				/* PCI host ID */
+	const struct ata_port_info *info;	/* Info block */
+	/* Probably add family, cable detect type etc here to clean
+	   up code later */
+};
+
+struct sis_laptop {
+	u16 device;
+	u16 subvendor;
+	u16 subdevice;
+};
+
+static const struct sis_laptop sis_laptop[] = {
+	/* devid, subvendor, subdev */
+	{ 0x5513, 0x1043, 0x1107 },	/* ASUS A6K */
+	/* end marker */
+	{ 0, }
+};
+
+static int sis_short_ata40(struct pci_dev *dev)
+{
+	const struct sis_laptop *lap = &sis_laptop[0];
+
+	while (lap->device) {
+		if (lap->device == dev->device &&
+		    lap->subvendor == dev->subsystem_vendor &&
+		    lap->subdevice == dev->subsystem_device)
+			return 1;
+		lap++;
+	}
+
+	return 0;
+}
+
+/**
+ *	sis_old_port_base		-	return PCI configuration base for dev
+ *	@adev: device
+ *
+ *	Returns the base of the PCI configuration registers for this port
+ *	number.
+ */
+
+static int sis_old_port_base(struct ata_device *adev)
+{
+	return  0x40 + (4 * adev->ap->port_no) +  (2 * adev->devno);
+}
+
+/**
+ *	sis_133_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for the later UDMA133 capable
+ *	SiS chipset.
+ */
+
+static int sis_133_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 tmp;
+
+	/* The top bit of this register is the cable detect bit */
+	pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
+	if ((tmp & 0x8000) && !sis_short_ata40(pdev))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+/**
+ *	sis_66_cable_detect	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection on the UDMA66, UDMA100 and early UDMA133
+ *	SiS IDE controllers.
+ */
+
+static int sis_66_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	/* Older chips keep cable detect in bits 4/5 of reg 0x48 */
+	pci_read_config_byte(pdev, 0x48, &tmp);
+	tmp >>= ap->port_no;
+	if ((tmp & 0x10) && !sis_short_ata40(pdev))
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+
+/**
+ *	sis_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int sis_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits sis_enable_bits[] = {
+		{ 0x4aU, 1U, 0x02UL, 0x02UL },	/* port 0 */
+		{ 0x4aU, 1U, 0x04UL, 0x04UL },	/* port 1 */
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+
+/**
+ *	sis_error_handler - Probe specified port on PATA host controller
+ *	@ap: Port to probe
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, sis_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	sis_set_fifo	-	Set RWP fifo bits for this device
+ *	@ap: Port
+ *	@adev: Device
+ *
+ *	SIS chipsets implement prefetch/postwrite bits for each device
+ *	on both channels. This functionality is not ATAPI compatible and
+ *	must be configured according to the class of device present
+ */
+
+static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 fifoctrl;
+	u8 mask = 0x11;
+
+	mask <<= (2 * ap->port_no);
+	mask <<= adev->devno;
+
+	/* This holds various bits including the FIFO control */
+	pci_read_config_byte(pdev, 0x4B, &fifoctrl);
+	fifoctrl &= ~mask;
+
+	/* Enable for ATA (disk) only */
+	if (adev->class == ATA_DEV_ATA)
+		fifoctrl |= mask;
+	pci_write_config_byte(pdev, 0x4B, fifoctrl);
+}
+
+/**
+ *	sis_old_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for all chips that are pre ATA100 and
+ *	also early ATA100 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int port = sis_old_port_base(adev);
+	u8 t1, t2;
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	const u8 active[]   = { 0x00, 0x07, 0x04, 0x03, 0x01 };
+	const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
+
+	sis_set_fifo(ap, adev);
+
+	pci_read_config_byte(pdev, port, &t1);
+	pci_read_config_byte(pdev, port + 1, &t2);
+
+	t1 &= ~0x0F;	/* Clear active/recovery timings */
+	t2 &= ~0x07;
+
+	t1 |= active[speed];
+	t2 |= recovery[speed];
+
+	pci_write_config_byte(pdev, port, t1);
+	pci_write_config_byte(pdev, port + 1, t2);
+}
+
+/**
+ *	sis_100_set_pioode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for ATA100 devices and early ATA133.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int port = sis_old_port_base(adev);
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
+
+	sis_set_fifo(ap, adev);
+
+	pci_write_config_byte(pdev, port, actrec[speed]);
+}
+
+/**
+ *	sis_133_set_pioode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device we are configuring for.
+ *
+ *	Set PIO mode for device, in host controller PCI config space. This
+ *	function handles PIO set up for the later ATA133 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int port = 0x40;
+	u32 t1;
+	u32 reg54;
+	int speed = adev->pio_mode - XFER_PIO_0;
+
+	const u32 timing133[] = {
+		0x28269000,	/* Recovery << 24 | Act << 16 | Ini << 12 */
+		0x0C266000,
+		0x04263000,
+		0x0C0A3000,
+		0x05093000
+	};
+	const u32 timing100[] = {
+		0x1E1C6000,	/* Recovery << 24 | Act << 16 | Ini << 12 */
+		0x091C4000,
+		0x031C2000,
+		0x09072000,
+		0x04062000
+	};
+
+	sis_set_fifo(ap, adev);
+
+	/* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
+	pci_read_config_dword(pdev, 0x54, &reg54);
+	if (reg54 & 0x40000000)
+		port = 0x70;
+	port += 8 * ap->port_no +  4 * adev->devno;
+
+	pci_read_config_dword(pdev, port, &t1);
+	t1 &= 0xC0C00FFF;	/* Mask out timing */
+
+	if (t1 & 0x08)		/* 100 or 133 ? */
+		t1 |= timing133[speed];
+	else
+		t1 |= timing100[speed];
+	pci_write_config_byte(pdev, port, t1);
+}
+
+/**
+ *	sis_old_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u16 timing;
+
+	const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
+	const u16 udma_bits[]  = { 0xE000, 0xC000, 0xA000 };
+
+	pci_read_config_word(pdev, drive_pci, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* bits 3-0 hold recovery timing bits 8-10 active timing and
+		   the higer bits are dependant on the device */
+		timing &= ~ 0x870F;
+		timing |= mwdma_bits[speed];
+		pci_write_config_word(pdev, drive_pci, timing);
+	} else {
+		/* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x6000;
+		timing |= udma_bits[speed];
+	}
+}
+
+/**
+ *	sis_66_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u16 timing;
+
+	const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
+	const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
+
+	pci_read_config_word(pdev, drive_pci, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* bits 3-0 hold recovery timing bits 8-10 active timing and
+		   the higer bits are dependant on the device, bit 15 udma */
+		timing &= ~0x870F;
+		timing |= mwdma_bits[speed];
+	} else {
+		/* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0xF000;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_word(pdev, drive_pci, timing);
+}
+
+/**
+ *	sis_100_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles UDMA66 and early UDMA100 devices.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u8 timing;
+
+	const u8 udma_bits[]  = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+	pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+	} else {
+		/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x8F;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ *	sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles early SiS 961 bridges. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int drive_pci = sis_old_port_base(adev);
+	u8 timing;
+	/* Low 4 bits are timing */
+	static const u8 udma_bits[]  = { 0x8F, 0x8A, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+	pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		/* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+	} else {
+		/* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+		speed = adev->dma_mode - XFER_UDMA_0;
+		timing &= ~0x8F;
+		timing |= udma_bits[speed];
+	}
+	pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ *	sis_133_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *	Handles early SiS 961 bridges. Supports MWDMA as well unlike
+ *	the old ide/pci driver.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
+	int speed = adev->dma_mode - XFER_MW_DMA_0;
+	int port = 0x40;
+	u32 t1;
+	u32 reg54;
+
+	/* bits 4- cycle time 8 - cvs time */
+	static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
+	static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
+
+	/* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
+	pci_read_config_dword(pdev, 0x54, &reg54);
+	if (reg54 & 0x40000000)
+		port = 0x70;
+	port += (8 * ap->port_no) +  (4 * adev->devno);
+
+	pci_read_config_dword(pdev, port, &t1);
+
+	if (adev->dma_mode < XFER_UDMA_0) {
+		t1 &= ~0x00000004;
+		/* FIXME: need data sheet to add MWDMA here. Also lacking on
+		   ide/pci driver */
+	} else {
+		speed = adev->dma_mode - XFER_UDMA_0;
+		/* if & 8 no UDMA133 - need info for ... */
+		t1 &= ~0x00000FF0;
+		t1 |= 0x00000004;
+		if (t1 & 0x08)
+			t1 |= timing_u133[speed];
+		else
+			t1 |= timing_u100[speed];
+	}
+	pci_write_config_dword(pdev, port, t1);
+}
+
+static struct scsi_host_template sis_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations sis_133_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= sis_133_set_piomode,
+	.set_dmamode		= sis_133_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= sis_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= sis_133_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations sis_133_early_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= sis_100_set_piomode,
+	.set_dmamode		= sis_133_early_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= sis_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= sis_66_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations sis_100_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= sis_100_set_piomode,
+	.set_dmamode		= sis_100_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= sis_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= sis_66_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations sis_66_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= sis_old_set_piomode,
+	.set_dmamode		= sis_66_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+	.cable_detect		= sis_66_cable_detect,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= sis_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations sis_old_ops = {
+	.port_disable		= ata_port_disable,
+	.set_piomode		= sis_old_set_piomode,
+	.set_dmamode		= sis_old_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= sis_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_info sis_info = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.mwdma_mask	= 0x07,
+	.udma_mask	= 0,
+	.port_ops	= &sis_old_ops,
+};
+static const struct ata_port_info sis_info33 = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.mwdma_mask	= 0x07,
+	.udma_mask	= ATA_UDMA2,	/* UDMA 33 */
+	.port_ops	= &sis_old_ops,
+};
+static const struct ata_port_info sis_info66 = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.udma_mask	= ATA_UDMA4,	/* UDMA 66 */
+	.port_ops	= &sis_66_ops,
+};
+static const struct ata_port_info sis_info100 = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.udma_mask	= ATA_UDMA5,
+	.port_ops	= &sis_100_ops,
+};
+static const struct ata_port_info sis_info100_early = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.udma_mask	= ATA_UDMA5,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.port_ops	= &sis_66_ops,
+};
+const struct ata_port_info sis_info133 = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_133_ops,
+};
+static const struct ata_port_info sis_info133_early = {
+	.sht		= &sis_sht,
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.pio_mask	= 0x1f,	/* pio0-4 */
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &sis_133_early_ops,
+};
+
+/* Privately shared with the SiS180 SATA driver, not for use elsewhere */
+EXPORT_SYMBOL_GPL(sis_info133);
+
+static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
+{
+	u16 regw;
+	u8 reg;
+
+	if (sis->info == &sis_info133) {
+		pci_read_config_word(pdev, 0x50, &regw);
+		if (regw & 0x08)
+			pci_write_config_word(pdev, 0x50, regw & ~0x08);
+		pci_read_config_word(pdev, 0x52, &regw);
+		if (regw & 0x08)
+			pci_write_config_word(pdev, 0x52, regw & ~0x08);
+		return;
+	}
+
+	if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
+		/* Fix up latency */
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+		/* Set compatibility bit */
+		pci_read_config_byte(pdev, 0x49, &reg);
+		if (!(reg & 0x01))
+			pci_write_config_byte(pdev, 0x49, reg | 0x01);
+		return;
+	}
+
+	if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
+		/* Fix up latency */
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+		/* Set compatibility bit */
+		pci_read_config_byte(pdev, 0x52, &reg);
+		if (!(reg & 0x04))
+			pci_write_config_byte(pdev, 0x52, reg | 0x04);
+		return;
+	}
+
+	if (sis->info == &sis_info33) {
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &reg);
+		if (( reg & 0x0F ) != 0x00)
+			pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
+		/* Fall through to ATA16 fixup below */
+	}
+
+	if (sis->info == &sis_info || sis->info == &sis_info33) {
+		/* force per drive recovery and active timings
+		   needed on ATA_33 and below chips */
+		pci_read_config_byte(pdev, 0x52, &reg);
+		if (!(reg & 0x08))
+			pci_write_config_byte(pdev, 0x52, reg|0x08);
+		return;
+	}
+
+	BUG();
+}
+
+/**
+ *	sis_init_one - Register SiS ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in sis_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	struct ata_port_info port;
+	const struct ata_port_info *ppi[] = { &port, NULL };
+	struct pci_dev *host = NULL;
+	struct sis_chipset *chipset = NULL;
+	struct sis_chipset *sets;
+
+	static struct sis_chipset sis_chipsets[] = {
+
+		{ 0x0968, &sis_info133 },
+		{ 0x0966, &sis_info133 },
+		{ 0x0965, &sis_info133 },
+		{ 0x0745, &sis_info100 },
+		{ 0x0735, &sis_info100 },
+		{ 0x0733, &sis_info100 },
+		{ 0x0635, &sis_info100 },
+		{ 0x0633, &sis_info100 },
+
+		{ 0x0730, &sis_info100_early },	/* 100 with ATA 66 layout */
+		{ 0x0550, &sis_info100_early },	/* 100 with ATA 66 layout */
+
+		{ 0x0640, &sis_info66 },
+		{ 0x0630, &sis_info66 },
+		{ 0x0620, &sis_info66 },
+		{ 0x0540, &sis_info66 },
+		{ 0x0530, &sis_info66 },
+
+		{ 0x5600, &sis_info33 },
+		{ 0x5598, &sis_info33 },
+		{ 0x5597, &sis_info33 },
+		{ 0x5591, &sis_info33 },
+		{ 0x5582, &sis_info33 },
+		{ 0x5581, &sis_info33 },
+
+		{ 0x5596, &sis_info },
+		{ 0x5571, &sis_info },
+		{ 0x5517, &sis_info },
+		{ 0x5511, &sis_info },
+
+		{0}
+	};
+	static struct sis_chipset sis133_early = {
+		0x0, &sis_info133_early
+	};
+	static struct sis_chipset sis133 = {
+		0x0, &sis_info133
+	};
+	static struct sis_chipset sis100_early = {
+		0x0, &sis_info100_early
+	};
+	static struct sis_chipset sis100 = {
+		0x0, &sis_info100
+	};
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+
+	/* We have to find the bridge first */
+
+	for (sets = &sis_chipsets[0]; sets->device; sets++) {
+		host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
+		if (host != NULL) {
+			chipset = sets;			/* Match found */
+			if (sets->device == 0x630) {	/* SIS630 */
+				u8 host_rev;
+				pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
+				if (host_rev >= 0x30)	/* 630 ET */
+					chipset = &sis100_early;
+			}
+			break;
+		}
+	}
+
+	/* Look for concealed bridges */
+	if (chipset == NULL) {
+		/* Second check */
+		u32 idemisc;
+		u16 trueid;
+
+		/* Disable ID masking and register remapping then
+		   see what the real ID is */
+
+		pci_read_config_dword(pdev, 0x54, &idemisc);
+		pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
+		pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+		pci_write_config_dword(pdev, 0x54, idemisc);
+
+		switch(trueid) {
+		case 0x5518:	/* SIS 962/963 */
+			chipset = &sis133;
+			if ((idemisc & 0x40000000) == 0) {
+				pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
+				printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
+			}
+			break;
+		case 0x0180:	/* SIS 965/965L */
+			chipset =  &sis133;
+			break;
+		case 0x1180:	/* SIS 966/966L */
+			chipset =  &sis133;
+			break;
+		}
+	}
+
+	/* Further check */
+	if (chipset == NULL) {
+		struct pci_dev *lpc_bridge;
+		u16 trueid;
+		u8 prefctl;
+		u8 idecfg;
+		u8 sbrev;
+
+		/* Try the second unmasking technique */
+		pci_read_config_byte(pdev, 0x4a, &idecfg);
+		pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
+		pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+		pci_write_config_byte(pdev, 0x4a, idecfg);
+
+		switch(trueid) {
+		case 0x5517:
+			lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
+			if (lpc_bridge == NULL)
+				break;
+			pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
+			pci_read_config_byte(pdev, 0x49, &prefctl);
+			pci_dev_put(lpc_bridge);
+
+			if (sbrev == 0x10 && (prefctl & 0x80)) {
+				chipset = &sis133_early;
+				break;
+			}
+			chipset = &sis100;
+			break;
+		}
+	}
+	pci_dev_put(host);
+
+	/* No chipset info, no support */
+	if (chipset == NULL)
+		return -ENODEV;
+
+	port = *chipset->info;
+	port.private_data = chipset;
+
+	sis_fixup(pdev, chipset);
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id sis_pci_tbl[] = {
+	{ PCI_VDEVICE(SI, 0x5513), },	/* SiS 5513 */
+	{ PCI_VDEVICE(SI, 0x5518), },	/* SiS 5518 */
+
+	{ }
+};
+
+static struct pci_driver sis_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sis_pci_tbl,
+	.probe			= sis_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init sis_init(void)
+{
+	return pci_register_driver(&sis_pci_driver);
+}
+
+static void __exit sis_exit(void)
+{
+	pci_unregister_driver(&sis_pci_driver);
+}
+
+module_init(sis_init);
+module_exit(sis_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_sl82c105.c linux-2.6.18.x86_64.p4/drivers/ata/pata_sl82c105.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_sl82c105.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_sl82c105.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,369 @@
+/*
+ * pata_sl82c105.c 	- SL82C105 PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * Based in part on linux/drivers/ide/pci/sl82c105.c
+ * 		SL82C105/Winbond 553 IDE driver
+ *
+ * and in part on the documentation and errata sheet
+ *
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sl82c105"
+#define DRV_VERSION "0.3.1"
+
+enum {
+	/*
+	 * SL82C105 PCI config register 0x40 bits.
+	 */
+	CTRL_IDE_IRQB	=	(1 << 30),
+	CTRL_IDE_IRQA   =	(1 << 28),
+	CTRL_LEGIRQ     =	(1 << 11),
+	CTRL_P1F16      =	(1 << 5),
+	CTRL_P1EN       =	(1 << 4),
+	CTRL_P0F16      =	(1 << 1),
+	CTRL_P0EN       =	(1 << 0)
+};
+
+/**
+ *	sl82c105_pre_reset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int sl82c105_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits sl82c105_enable_bits[] = {
+		{ 0x40, 1, 0x01, 0x01 },
+		{ 0x40, 1, 0x10, 0x10 }
+	};
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
+		return -ENOENT;
+	return ata_std_prereset(ap, deadline);
+}
+
+
+static void sl82c105_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+
+/**
+ *	sl82c105_configure_piomode	-	set chip PIO timing
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@pio: PIO mode
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	so a configure_dmamode call will undo any work we do here and vice
+ *	versa
+ */
+
+static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static u16 pio_timing[5] = {
+		0x50D, 0x407, 0x304, 0x242, 0x240
+	};
+	u16 dummy;
+	int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+
+	pci_write_config_word(pdev, timing, pio_timing[pio]);
+	/* Can we lose this oddity of the old driver */
+	pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ *	sl82c105_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Called to do the PIO mode setup. Our timing registers are shared
+ *	but we want to set the PIO timing by default.
+ */
+
+static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ *	sl82c105_configure_dmamode	-	set DMA mode in chip
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Load DMA cycle times into the chip ready for a DMA transfer
+ *	to occur.
+ */
+
+static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static u16 dma_timing[3] = {
+		0x707, 0x201, 0x200
+	};
+	u16 dummy;
+	int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+	int dma = adev->dma_mode - XFER_MW_DMA_0;
+
+	pci_write_config_word(pdev, timing, dma_timing[dma]);
+	/* Can we lose this oddity of the old driver */
+	pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ *	sl82c105_reset_engine	-	Reset the DMA engine
+ *	@ap: ATA interface
+ *
+ *	The sl82c105 has some serious problems with the DMA engine
+ *	when transfers don't run as expected or ATAPI is used. The
+ *	recommended fix is to reset the engine each use using a chip
+ *	test register.
+ */
+
+static void sl82c105_reset_engine(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u16 val;
+
+	pci_read_config_word(pdev, 0x7E, &val);
+	pci_write_config_word(pdev, 0x7E, val | 4);
+	pci_write_config_word(pdev, 0x7E, val & ~4);
+}
+
+/**
+ *	sl82c105_bmdma_start		-	DMA engine begin
+ *	@qc: ATA command
+ *
+ *	Reset the DMA engine each use as recommended by the errata
+ *	document.
+ *
+ *	FIXME: if we switch clock at BMDMA start/end we might get better
+ *	PIO performance on DMA capable devices.
+ */
+
+static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	udelay(100);
+	sl82c105_reset_engine(ap);
+	udelay(100);
+
+	/* Set the clocks for DMA */
+	sl82c105_configure_dmamode(ap, qc->dev);
+	/* Activate DMA */
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	sl82c105_bmdma_end		-	DMA engine stop
+ *	@qc: ATA command
+ *
+ *	Reset the DMA engine each use as recommended by the errata
+ *	document.
+ *
+ *	This function is also called to turn off DMA when a timeout occurs
+ *	during DMA operation. In both cases we need to reset the engine,
+ *	so no actual eng_timeout handler is required.
+ *
+ *	We assume bmdma_stop is always called if bmdma_start as called. If
+ *	not then we may need to wrap qc_issue.
+ */
+
+static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	ata_bmdma_stop(qc);
+	sl82c105_reset_engine(ap);
+	udelay(100);
+
+	/* This will redo the initial setup of the DMA device to matching
+	   PIO timings */
+	sl82c105_set_piomode(ap, qc->dev);
+}
+
+static struct scsi_host_template sl82c105_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations sl82c105_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= sl82c105_set_piomode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= sl82c105_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= sl82c105_bmdma_start,
+	.bmdma_stop	= sl82c105_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	sl82c105_bridge_revision	-	find bridge version
+ *	@pdev: PCI device for the ATA function
+ *
+ *	Locates the PCI bridge associated with the ATA function and
+ *	providing it is a Winbond 553 reports the revision. If it cannot
+ *	find a revision or the right device it returns -1
+ */
+
+static int sl82c105_bridge_revision(struct pci_dev *pdev)
+{
+	struct pci_dev *bridge;
+	u8 rev;
+
+	/*
+	 * The bridge should be part of the same device, but function 0.
+	 */
+	bridge = pci_get_slot(pdev->bus,
+			       PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+	if (!bridge)
+		return -1;
+
+	/*
+	 * Make sure it is a Winbond 553 and is an ISA bridge.
+	 */
+	if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
+	    bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
+	    bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
+	    	pci_dev_put(bridge);
+		return -1;
+	}
+	/*
+	 * We need to find function 0's revision, not function 1
+	 */
+	pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
+
+	pci_dev_put(bridge);
+	return rev;
+}
+
+
+static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info_dma = {
+		.sht = &sl82c105_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &sl82c105_port_ops
+	};
+	static const struct ata_port_info info_early = {
+		.sht = &sl82c105_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.port_ops = &sl82c105_port_ops
+	};
+	/* for now use only the first port */
+	const struct ata_port_info *ppi[] = { &info_early,
+					       &ata_dummy_port_info };
+	u32 val;
+	int rev;
+
+	rev = sl82c105_bridge_revision(dev);
+
+	if (rev == -1)
+		dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
+	else if (rev <= 5)
+		dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
+	else
+		ppi[0] = &info_dma;
+
+	pci_read_config_dword(dev, 0x40, &val);
+	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+	pci_write_config_dword(dev, 0x40, val);
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id sl82c105[] = {
+	{ PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
+
+	{ },
+};
+
+static struct pci_driver sl82c105_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= sl82c105,
+	.probe 		= sl82c105_init_one,
+	.remove		= ata_pci_remove_one
+};
+
+static int __init sl82c105_init(void)
+{
+	return pci_register_driver(&sl82c105_pci_driver);
+}
+
+static void __exit sl82c105_exit(void)
+{
+	pci_unregister_driver(&sl82c105_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Sl82c105");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sl82c105);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sl82c105_init);
+module_exit(sl82c105_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_triflex.c linux-2.6.18.x86_64.p4/drivers/ata/pata_triflex.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_triflex.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_triflex.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,286 @@
+/*
+ * pata_triflex.c 	- Compaq PATA for new ATA layer
+ *			  (C) 2005 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ * based upon
+ *
+ * triflex.c
+ *
+ * IDE Chipset driver for the Compaq TriFlex IDE controller.
+ *
+ * Known to work with the Compaq Workstation 5x00 series.
+ *
+ * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
+ * Author: Torben Mathiasen <torben.mathiasen@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ *	Not publically available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_triflex"
+#define DRV_VERSION "0.2.8"
+
+/**
+ *	triflex_prereset		-	probe begin
+ *	@ap: ATA port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Set up cable type and use generic probe init
+ */
+
+static int triflex_prereset(struct ata_port *ap, unsigned long deadline)
+{
+	static const struct pci_bits triflex_enable_bits[] = {
+		{ 0x80, 1, 0x01, 0x01 },
+		{ 0x80, 1, 0x02, 0x02 }
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+	if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
+		return -ENOENT;
+
+	return ata_std_prereset(ap, deadline);
+}
+
+
+
+static void triflex_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, triflex_prereset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	triflex_load_timing		-	timing configuration
+ *	@ap: ATA interface
+ *	@adev: Device on the bus
+ *	@speed: speed to configure
+ *
+ *	The Triflex has one set of timings per device per channel. This
+ *	means we must do some switching. As the PIO and DMA timings don't
+ *	match we have to do some reloading unlike PIIX devices where tuning
+ *	tricks can avoid it.
+ */
+
+static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 timing = 0;
+	u32 triflex_timing, old_triflex_timing;
+	int channel_offset = ap->port_no ? 0x74: 0x70;
+	unsigned int is_slave	= (adev->devno != 0);
+
+
+	pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
+	triflex_timing = old_triflex_timing;
+
+	switch(speed)
+	{
+		case XFER_MW_DMA_2:
+			timing = 0x0103;break;
+		case XFER_MW_DMA_1:
+			timing = 0x0203;break;
+		case XFER_MW_DMA_0:
+			timing = 0x0808;break;
+		case XFER_SW_DMA_2:
+		case XFER_SW_DMA_1:
+		case XFER_SW_DMA_0:
+			timing = 0x0F0F;break;
+		case XFER_PIO_4:
+			timing = 0x0202;break;
+		case XFER_PIO_3:
+			timing = 0x0204;break;
+		case XFER_PIO_2:
+			timing = 0x0404;break;
+		case XFER_PIO_1:
+			timing = 0x0508;break;
+		case XFER_PIO_0:
+			timing = 0x0808;break;
+		default:
+			BUG();
+	}
+	triflex_timing &= ~ (0xFFFF << (16 * is_slave));
+	triflex_timing |= (timing << (16 * is_slave));
+
+	if (triflex_timing != old_triflex_timing)
+		pci_write_config_dword(pdev, channel_offset, triflex_timing);
+}
+
+/**
+ *	triflex_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Use the timing loader to set up the PIO mode. We have to do this
+ *	because DMA start/stop will only be called once DMA occurs. If there
+ *	has been no DMA then the PIO timings are still needed.
+ */
+static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	triflex_load_timing(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	triflex_dma_start	-	DMA start callback
+ *	@qc: Command in progress
+ *
+ *	Usually drivers set the DMA timing at the point the set_dmamode call
+ *	is made. Triflex however requires we load new timings on the
+ *	transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
+ *	We load the DMA timings just before starting DMA and then restore
+ *	the PIO timing when the DMA is finished.
+ */
+
+static void triflex_bmdma_start(struct ata_queued_cmd *qc)
+{
+	triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	triflex_dma_stop	-	DMA stop callback
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	We loaded new timings in dma_start, as a result we need to restore
+ *	the PIO timings in dma_stop so that the next command issue gets the
+ *	right clock values.
+ */
+
+static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+	triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+static struct scsi_host_template triflex_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations triflex_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= triflex_set_piomode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= triflex_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= triflex_bmdma_start,
+	.bmdma_stop	= triflex_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &triflex_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &triflex_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	static int printed_version;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id triflex[] = {
+	{ PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), },
+
+	{ },
+};
+
+static struct pci_driver triflex_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= triflex,
+	.probe 		= triflex_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init triflex_init(void)
+{
+	return pci_register_driver(&triflex_pci_driver);
+}
+
+static void __exit triflex_exit(void)
+{
+	pci_unregister_driver(&triflex_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, triflex);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(triflex_init);
+module_exit(triflex_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_via.c linux-2.6.18.x86_64.p4/drivers/ata/pata_via.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_via.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_via.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,661 @@
+/*
+ * pata_via.c 	- VIA PATA for new ATA layer
+ *			  (C) 2005-2006 Red Hat Inc
+ *			  Alan Cox <alan@redhat.com>
+ *
+ *  Documentation
+ *	Most chipset documentation available under NDA only
+ *
+ *  VIA version guide
+ *	VIA VT82C561	-	early design, uses ata_generic currently
+ *	VIA VT82C576	-	MWDMA, 33Mhz
+ *	VIA VT82C586	-	MWDMA, 33Mhz
+ *	VIA VT82C586a	-	Added UDMA to 33Mhz
+ *	VIA VT82C586b	-	UDMA33
+ *	VIA VT82C596a	-	Nonfunctional UDMA66
+ *	VIA VT82C596b	-	Working UDMA66
+ *	VIA VT82C686	-	Nonfunctional UDMA66
+ *	VIA VT82C686a	-	Working UDMA66
+ *	VIA VT82C686b	-	Updated to UDMA100
+ *	VIA VT8231	-	UDMA100
+ *	VIA VT8233	-	UDMA100
+ *	VIA VT8233a	-	UDMA133
+ *	VIA VT8233c	-	UDMA100
+ *	VIA VT8235	-	UDMA133
+ *	VIA VT8237	-	UDMA133
+ *	VIA VT8237S	-	UDMA133
+ *	VIA VT8251	-	UDMA133
+ *
+ *	Most registers remain compatible across chips. Others start reserved
+ *	and acquire sensible semantics if set to 1 (eg cable detect). A few
+ *	exceptions exist, notably around the FIFO settings.
+ *
+ *	One additional quirk of the VIA design is that like ALi they use few
+ *	PCI IDs for a lot of chips.
+ *
+ *	Based heavily on:
+ *
+ * Version 3.38
+ *
+ * VIA IDE driver for Linux. Supported southbridges:
+ *
+ *   vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
+ *   vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
+ *   vt8235, vt8237
+ *
+ * Copyright (c) 2000-2002 Vojtech Pavlik
+ *
+ * Based on the work of:
+ *	Michel Aubry
+ *	Jeff Garzik
+ *	Andre Hedrick
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_via"
+#define DRV_VERSION "0.3.1"
+
+/*
+ *	The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
+ *	driver.
+ */
+
+enum {
+	VIA_UDMA	= 0x007,
+	VIA_UDMA_NONE	= 0x000,
+	VIA_UDMA_33	= 0x001,
+	VIA_UDMA_66	= 0x002,
+	VIA_UDMA_100	= 0x003,
+	VIA_UDMA_133	= 0x004,
+	VIA_BAD_PREQ	= 0x010, /* Crashes if PREQ# till DDACK# set */
+	VIA_BAD_CLK66	= 0x020, /* 66 MHz clock doesn't work correctly */
+	VIA_SET_FIFO	= 0x040, /* Needs to have FIFO split set */
+	VIA_NO_UNMASK	= 0x080, /* Doesn't work with IRQ unmasking on */
+	VIA_BAD_ID	= 0x100, /* Has wrong vendor ID (0x1107) */
+	VIA_BAD_AST	= 0x200, /* Don't touch Address Setup Timing */
+	VIA_NO_ENABLES	= 0x400, /* Has no enablebits */
+};
+
+/*
+ * VIA SouthBridge chips.
+ */
+
+static const struct via_isa_bridge {
+	const char *name;
+	u16 id;
+	u8 rev_min;
+	u8 rev_max;
+	u16 flags;
+} via_isa_bridges[] = {
+	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt6410",	PCI_DEVICE_ID_VIA_6410,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
+	{ "vt8237a",	PCI_DEVICE_ID_VIA_8237A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt8237",	PCI_DEVICE_ID_VIA_8237,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt8235",	PCI_DEVICE_ID_VIA_8235,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt8233a",	PCI_DEVICE_ID_VIA_8233A,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+	{ "vt8233c",	PCI_DEVICE_ID_VIA_8233C_0,  0x00, 0x2f, VIA_UDMA_100 },
+	{ "vt8233",	PCI_DEVICE_ID_VIA_8233_0,   0x00, 0x2f, VIA_UDMA_100 },
+	{ "vt8231",	PCI_DEVICE_ID_VIA_8231,     0x00, 0x2f, VIA_UDMA_100 },
+	{ "vt82c686b",	PCI_DEVICE_ID_VIA_82C686,   0x40, 0x4f, VIA_UDMA_100 },
+	{ "vt82c686a",	PCI_DEVICE_ID_VIA_82C686,   0x10, 0x2f, VIA_UDMA_66 },
+	{ "vt82c686",	PCI_DEVICE_ID_VIA_82C686,   0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
+	{ "vt82c596b",	PCI_DEVICE_ID_VIA_82C596,   0x10, 0x2f, VIA_UDMA_66 },
+	{ "vt82c596a",	PCI_DEVICE_ID_VIA_82C596,   0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
+	{ "vt82c586b",	PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
+	{ "vt82c586a",	PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
+	{ "vt82c586",	PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
+	{ "vt82c576",	PCI_DEVICE_ID_VIA_82C576,   0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+	{ NULL }
+};
+
+
+/*
+ *	Cable special cases
+ */
+
+static struct dmi_system_id cable_dmi_table[] = {
+	{
+		.ident = "Acer Ferrari 3400",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
+			DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
+		},
+	},
+	{ }
+};
+
+static int via_cable_override(struct pci_dev *pdev)
+{
+	/* Systems by DMI */
+	if (dmi_check_system(cable_dmi_table))
+		return 1;
+	return 0;
+}
+
+
+/**
+ *	via_cable_detect	-	cable detection
+ *	@ap: ATA port
+ *
+ *	Perform cable detection. Actually for the VIA case the BIOS
+ *	already did this for us. We read the values provided by the
+ *	BIOS. If you are using an 8235 in a non-PC configuration you
+ *	may need to update this code.
+ *
+ *	Hotplug also impacts on this.
+ */
+
+static int via_cable_detect(struct ata_port *ap) {
+	const struct via_isa_bridge *config = ap->host->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 ata66;
+
+	if (via_cable_override(pdev))
+		return ATA_CBL_PATA40_SHORT;
+
+	/* Early chips are 40 wire */
+	if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
+		return ATA_CBL_PATA40;
+	/* UDMA 66 chips have only drive side logic */
+	else if((config->flags & VIA_UDMA) < VIA_UDMA_100)
+		return ATA_CBL_PATA_UNK;
+	/* UDMA 100 or later */
+	pci_read_config_dword(pdev, 0x50, &ata66);
+	/* Check both the drive cable reporting bits, we might not have
+	   two drives */
+	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
+		return ATA_CBL_PATA80;
+	return ATA_CBL_PATA40;
+}
+
+static int via_pre_reset(struct ata_port *ap, unsigned long deadline)
+{
+	const struct via_isa_bridge *config = ap->host->private_data;
+
+	if (!(config->flags & VIA_NO_ENABLES)) {
+		static const struct pci_bits via_enable_bits[] = {
+			{ 0x40, 1, 0x02, 0x02 },
+			{ 0x40, 1, 0x01, 0x01 }
+		};
+		struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+		if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no]))
+			return -ENOENT;
+	}
+
+	return ata_std_prereset(ap, deadline);
+}
+
+
+/**
+ *	via_error_handler		-	reset for VIA chips
+ *	@ap: ATA port
+ *
+ *	Handle the reset callback for the later chips with cable detect
+ */
+
+static void via_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
+}
+
+/**
+ *	via_do_set_mode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *	@mode: ATA mode being programmed
+ *	@tdiv: Clocks per PCI clock
+ *	@set_ast: Set to program address setup
+ *	@udma_type: UDMA mode/format of registers
+ *
+ *	Program the VIA registers for DMA and PIO modes. Uses the ata timing
+ *	support in order to compute modes.
+ *
+ *	FIXME: Hotplug will require we serialize multiple mode changes
+ *	on the two channels.
+ */
+
+static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *peer = ata_dev_pair(adev);
+	struct ata_timing t, p;
+	static int via_clock = 33333;	/* Bus clock in kHZ - ought to be tunable one day */
+	unsigned long T =  1000000000 / via_clock;
+	unsigned long UT = T/tdiv;
+	int ut;
+	int offset = 3 - (2*ap->port_no) - adev->devno;
+
+
+	/* Calculate the timing values we require */
+	ata_timing_compute(adev, mode, &t, T, UT);
+
+	/* We share 8bit timing so we must merge the constraints */
+	if (peer) {
+		if (peer->pio_mode) {
+			ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
+			ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
+		}
+	}
+
+	/* Address setup is programmable but breaks on UDMA133 setups */
+	if (set_ast) {
+		u8 setup;	/* 2 bits per drive */
+		int shift = 2 * offset;
+
+		pci_read_config_byte(pdev, 0x4C, &setup);
+		setup &= ~(3 << shift);
+		setup |= FIT(t.setup, 1, 4) << shift;	/* 1,4 or 1,4 - 1  FIXME */
+		pci_write_config_byte(pdev, 0x4C, setup);
+	}
+
+	/* Load the PIO mode bits */
+	pci_write_config_byte(pdev, 0x4F - ap->port_no,
+		((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1));
+	pci_write_config_byte(pdev, 0x48 + offset,
+		((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1));
+
+	/* Load the UDMA bits according to type */
+	switch(udma_type) {
+		default:
+			/* BUG() ? */
+			/* fall through */
+		case 33:
+			ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03;
+			break;
+		case 66:
+			ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f;
+			break;
+		case 100:
+			ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
+			break;
+		case 133:
+			ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
+			break;
+	}
+	/* Set UDMA unless device is not UDMA capable */
+	if (udma_type)
+		pci_write_config_byte(pdev, 0x50 + offset, ut);
+}
+
+static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	const struct via_isa_bridge *config = ap->host->private_data;
+	int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+	int mode = config->flags & VIA_UDMA;
+	static u8 tclock[5] = { 1, 1, 2, 3, 4 };
+	static u8 udma[5] = { 0, 33, 66, 100, 133 };
+
+	via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]);
+}
+
+static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	const struct via_isa_bridge *config = ap->host->private_data;
+	int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+	int mode = config->flags & VIA_UDMA;
+	static u8 tclock[5] = { 1, 1, 2, 3, 4 };
+	static u8 udma[5] = { 0, 33, 66, 100, 133 };
+
+	via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
+}
+
+static struct scsi_host_template via_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations via_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= via_set_piomode,
+	.set_dmamode	= via_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= via_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= via_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+static struct ata_port_operations via_port_ops_noirq = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= via_set_piomode,
+	.set_dmamode	= via_set_dmamode,
+	.mode_filter	= ata_pci_default_filter,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= via_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= via_cable_detect,
+
+	.bmdma_setup 	= ata_bmdma_setup,
+	.bmdma_start 	= ata_bmdma_start,
+	.bmdma_stop	= ata_bmdma_stop,
+	.bmdma_status 	= ata_bmdma_status,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= ata_data_xfer_noirq,
+
+	.irq_handler	= ata_interrupt,
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	via_config_fifo		-	set up the FIFO
+ *	@pdev: PCI device
+ *	@flags: configuration flags
+ *
+ *	Set the FIFO properties for this device if neccessary. Used both on
+ *	set up and on and the resume path
+ */
+
+static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
+{
+	u8 enable;
+
+	/* 0x40 low bits indicate enabled channels */
+	pci_read_config_byte(pdev, 0x40 , &enable);
+	enable &= 3;
+
+	if (flags & VIA_SET_FIFO) {
+		static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
+		u8 fifo;
+
+		pci_read_config_byte(pdev, 0x43, &fifo);
+
+		/* Clear PREQ# until DDACK# for errata */
+		if (flags & VIA_BAD_PREQ)
+			fifo &= 0x7F;
+		else
+			fifo &= 0x9f;
+		/* Turn on FIFO for enabled channels */
+		fifo |= fifo_setting[enable];
+		pci_write_config_byte(pdev, 0x43, fifo);
+	}
+}
+
+/**
+ *	via_init_one		-	discovery callback
+ *	@pdev: PCI device
+ *	@id: PCI table info
+ *
+ *	A VIA IDE interface has been discovered. Figure out what revision
+ *	and perform configuration work before handing it to the ATA layer
+ */
+
+static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	/* Early VIA without UDMA support */
+	static const struct ata_port_info via_mwdma_info = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &via_port_ops
+	};
+	/* Ditto with IRQ masking required */
+	static const struct ata_port_info via_mwdma_info_borked = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.port_ops = &via_port_ops_noirq,
+	};
+	/* VIA UDMA 33 devices (and borked 66) */
+	static const struct ata_port_info via_udma33_info = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7,
+		.port_ops = &via_port_ops
+	};
+	/* VIA UDMA 66 devices */
+	static const struct ata_port_info via_udma66_info = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x1f,
+		.port_ops = &via_port_ops
+	};
+	/* VIA UDMA 100 devices */
+	static const struct ata_port_info via_udma100_info = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x3f,
+		.port_ops = &via_port_ops
+	};
+	/* UDMA133 with bad AST (All current 133) */
+	static const struct ata_port_info via_udma133_info = {
+		.sht = &via_sht,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = 0x7f,	/* FIXME: should check north bridge */
+		.port_ops = &via_port_ops
+	};
+	struct ata_port_info type;
+	const struct ata_port_info *ppi[] = { &type, NULL };
+	struct pci_dev *isa = NULL;
+	const struct via_isa_bridge *config;
+	static int printed_version;
+	u8 t;
+	u8 enable;
+	u32 timing;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	/* To find out how the IDE will behave and what features we
+	   actually have to look at the bridge not the IDE controller */
+	for (config = via_isa_bridges; config->id; config++)
+		if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
+			!!(config->flags & VIA_BAD_ID),
+			config->id, NULL))) {
+
+			pci_read_config_byte(isa, PCI_REVISION_ID, &t);
+			if (t >= config->rev_min &&
+			    t <= config->rev_max)
+				break;
+			pci_dev_put(isa);
+		}
+
+	if (!config->id) {
+		printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
+		return -ENODEV;
+	}
+	pci_dev_put(isa);
+
+	/* 0x40 low bits indicate enabled channels */
+	pci_read_config_byte(pdev, 0x40 , &enable);
+	enable &= 3;
+	if (enable == 0) {
+		return -ENODEV;
+	}
+
+	/* Initialise the FIFO for the enabled channels. */
+	via_config_fifo(pdev, config->flags);
+
+	/* Clock set up */
+	switch(config->flags & VIA_UDMA) {
+		case VIA_UDMA_NONE:
+			if (config->flags & VIA_NO_UNMASK)
+				type = via_mwdma_info_borked;
+			else
+				type = via_mwdma_info;
+			break;
+		case VIA_UDMA_33:
+			type = via_udma33_info;
+			break;
+		case VIA_UDMA_66:
+			type = via_udma66_info;
+			/* The 66 MHz devices require we enable the clock */
+			pci_read_config_dword(pdev, 0x50, &timing);
+			timing |= 0x80008;
+			pci_write_config_dword(pdev, 0x50, timing);
+			break;
+		case VIA_UDMA_100:
+			type = via_udma100_info;
+			break;
+		case VIA_UDMA_133:
+			type = via_udma133_info;
+			break;
+		default:
+			WARN_ON(1);
+			return -ENODEV;
+	}
+
+	if (config->flags & VIA_BAD_CLK66) {
+		/* Disable the 66MHz clock on problem devices */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing &= ~0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+
+	/* We have established the device type, now fire it up */
+	type.private_data = (void *)config;
+
+	return ata_pci_init_one(pdev, ppi);
+}
+
+#ifdef CONFIG_PM
+/**
+ *	via_reinit_one		-	reinit after resume
+ *	@pdev; PCI device
+ *
+ *	Called when the VIA PATA device is resumed. We must then
+ *	reconfigure the fifo and other setup we may have altered. In
+ *	addition the kernel needs to have the resume methods on PCI
+ *	quirk supported.
+ */
+
+static int via_reinit_one(struct pci_dev *pdev)
+{
+	u32 timing;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	const struct via_isa_bridge *config = host->private_data;
+
+	via_config_fifo(pdev, config->flags);
+
+	if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
+		/* The 66 MHz devices require we enable the clock */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing |= 0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+	if (config->flags & VIA_BAD_CLK66) {
+		/* Disable the 66MHz clock on problem devices */
+		pci_read_config_dword(pdev, 0x50, &timing);
+		timing &= ~0x80008;
+		pci_write_config_dword(pdev, 0x50, timing);
+	}
+	return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id via[] = {
+	{ PCI_VDEVICE(VIA, 0x0571), },
+	{ PCI_VDEVICE(VIA, 0x0581), },
+	{ PCI_VDEVICE(VIA, 0x1571), },
+	{ PCI_VDEVICE(VIA, 0x3164), },
+	{ PCI_VDEVICE(VIA, 0x5324), },
+
+	{ },
+};
+
+static struct pci_driver via_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= via,
+	.probe 		= via_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= via_reinit_one,
+#endif
+};
+
+static int __init via_init(void)
+{
+	return pci_register_driver(&via_pci_driver);
+}
+
+static void __exit via_exit(void)
+{
+	pci_unregister_driver(&via_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for VIA PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, via);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(via_init);
+module_exit(via_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pata_winbond.c linux-2.6.18.x86_64.p4/drivers/ata/pata_winbond.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pata_winbond.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/pata_winbond.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,312 @@
+/*
+ *    pata_winbond.c - Winbond VLB ATA controllers
+ *	(C) 2006 Red Hat <alan@redhat.com>
+ *
+ *    Support for the Winbond 83759A when operating in advanced mode.
+ *    Multichip mode is not currently supported.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_winbond"
+#define DRV_VERSION "0.0.3"
+
+#define NR_HOST 4	/* Two winbond controllers, two channels each */
+
+struct winbond_data {
+	unsigned long config;
+	struct platform_device *platform_dev;
+};
+
+static struct ata_host *winbond_host[NR_HOST];
+static struct winbond_data winbond_data[NR_HOST];
+static int nr_winbond_host;
+
+#ifdef MODULE
+static int probe_winbond = 1;
+#else
+static int probe_winbond;
+#endif
+
+static DEFINE_SPINLOCK(winbond_lock);
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	outb(val, port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+	u8 val;
+
+	unsigned long flags;
+	spin_lock_irqsave(&winbond_lock, flags);
+	outb(reg, port + 0x01);
+	val = inb(port + 0x02);
+	spin_unlock_irqrestore(&winbond_lock, flags);
+
+	return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing t;
+	struct winbond_data *winbond = ap->host->private_data;
+	int active, recovery;
+	u8 reg;
+	int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+	reg = winbond_readcfg(winbond->config, 0x81);
+
+	/* Get the timing data in cycles */
+	if (reg & 0x40)		/* Fast VLB bus, assume 50MHz */
+		ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+	else
+		ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+	active = (FIT(t.active, 3, 17) - 1) & 0x0F;
+	recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
+	timing = (active << 4) | recovery;
+	winbond_writecfg(winbond->config, timing, reg);
+
+	/* Load the setup timing */
+
+	reg = 0x35;
+	if (adev->class != ATA_DEV_ATA)
+		reg |= 0x08;	/* FIFO off */
+	if (!ata_pio_need_iordy(adev))
+		reg |= 0x02;	/* IORDY off */
+	reg |= (FIT(t.setup, 0, 3) << 6);
+	winbond_writecfg(winbond->config, timing + 1, reg);
+}
+
+
+static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->ap;
+	int slop = buflen & 3;
+
+	if (ata_id_has_dword_io(adev->id)) {
+		if (write_data)
+			iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+		else
+			ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+		if (unlikely(slop)) {
+			u32 pad;
+			if (write_data) {
+				memcpy(&pad, buf + buflen - slop, slop);
+				pad = le32_to_cpu(pad);
+				iowrite32(pad, ap->ioaddr.data_addr);
+			} else {
+				pad = ioread32(ap->ioaddr.data_addr);
+				pad = cpu_to_le16(pad);
+				memcpy(buf + buflen - slop, &pad, slop);
+			}
+		}
+	} else
+		ata_data_xfer(adev, buf, buflen, write_data);
+}
+
+static struct scsi_host_template winbond_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations winbond_port_ops = {
+	.port_disable	= ata_port_disable,
+	.set_piomode	= winbond_set_piomode,
+
+	.tf_load	= ata_tf_load,
+	.tf_read	= ata_tf_read,
+	.check_status 	= ata_check_status,
+	.exec_command	= ata_exec_command,
+	.dev_select 	= ata_std_dev_select,
+
+	.freeze		= ata_bmdma_freeze,
+	.thaw		= ata_bmdma_thaw,
+	.error_handler	= ata_bmdma_error_handler,
+	.post_internal_cmd = ata_bmdma_post_internal_cmd,
+	.cable_detect	= ata_cable_40wire,
+
+	.qc_prep 	= ata_qc_prep,
+	.qc_issue	= ata_qc_issue_prot,
+
+	.data_xfer	= winbond_data_xfer,
+
+	.irq_clear	= ata_bmdma_irq_clear,
+	.irq_on		= ata_irq_on,
+	.irq_ack	= ata_irq_ack,
+
+	.port_start	= ata_port_start,
+};
+
+/**
+ *	winbond_init_one		-	attach a winbond interface
+ *	@type: Type to display
+ *	@io: I/O port start
+ *	@irq: interrupt line
+ *	@fast: True if on a > 33Mhz VLB
+ *
+ *	Register a VLB bus IDE interface. Such interfaces are PIO and we
+ *	assume do not support IRQ sharing.
+ */
+
+static __init int winbond_init_one(unsigned long port)
+{
+	struct platform_device *pdev;
+	u8 reg;
+	int i, rc;
+
+	reg = winbond_readcfg(port, 0x81);
+	reg |= 0x80;	/* jumpered mode off */
+	winbond_writecfg(port, 0x81, reg);
+	reg = winbond_readcfg(port, 0x83);
+	reg |= 0xF0;	/* local control */
+	winbond_writecfg(port, 0x83, reg);
+	reg = winbond_readcfg(port, 0x85);
+	reg |= 0xF0;	/* programmable timing */
+	winbond_writecfg(port, 0x85, reg);
+
+	reg = winbond_readcfg(port, 0x81);
+
+	if (!(reg & 0x03))		/* Disabled */
+		return 0;
+
+	for (i = 0; i < 2 ; i ++) {
+		unsigned long cmd_port = 0x1F0 - (0x80 * i);
+		struct ata_host *host;
+		struct ata_port *ap;
+		void __iomem *cmd_addr, *ctl_addr;
+
+		if (!(reg & (1 << i)))
+			continue;
+
+		pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+
+		rc = -ENOMEM;
+		host = ata_host_alloc(&pdev->dev, 1);
+		if (!host)
+			goto err_unregister;
+
+		rc = -ENOMEM;
+		cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
+		ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1);
+		if (!cmd_addr || !ctl_addr)
+			goto err_unregister;
+
+		ap = host->ports[0];
+		ap->ops = &winbond_port_ops;
+		ap->pio_mask = 0x1F;
+		ap->flags |= ATA_FLAG_SLAVE_POSS;
+		ap->ioaddr.cmd_addr = cmd_addr;
+		ap->ioaddr.altstatus_addr = ctl_addr;
+		ap->ioaddr.ctl_addr = ctl_addr;
+		ata_std_ports(&ap->ioaddr);
+
+		/* hook in a private data structure per channel */
+		host->private_data = &winbond_data[nr_winbond_host];
+		winbond_data[nr_winbond_host].config = port;
+		winbond_data[nr_winbond_host].platform_dev = pdev;
+
+		/* activate */
+		rc = ata_host_activate(host, 14 + i, ata_interrupt, 0,
+				       &winbond_sht);
+		if (rc)
+			goto err_unregister;
+
+		winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
+	}
+
+	return 0;
+
+ err_unregister:
+	platform_device_unregister(pdev);
+	return rc;
+}
+
+/**
+ *	winbond_init		-	attach winbond interfaces
+ *
+ *	Attach winbond IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int winbond_init(void)
+{
+	static const unsigned long config[2] = { 0x130, 0x1B0 };
+
+	int ct = 0;
+	int i;
+
+	if (probe_winbond == 0)
+		return -ENODEV;
+
+	/*
+ 	 *	Check both base addresses
+	 */
+
+	for (i = 0; i < 2; i++) {
+		if (probe_winbond & (1<<i)) {
+			int ret = 0;
+			unsigned long port = config[i];
+
+			if (request_region(port, 2, "pata_winbond")) {
+				ret = winbond_init_one(port);
+				if(ret <= 0)
+					release_region(port, 2);
+				else ct+= ret;
+			}
+		}
+	}
+	if (ct != 0)
+		return 0;
+	return -ENODEV;
+}
+
+static __exit void winbond_exit(void)
+{
+	int i;
+
+	for (i = 0; i < nr_winbond_host; i++) {
+		ata_host_detach(winbond_host[i]);
+		release_region(winbond_data[i].config, 2);
+		platform_device_unregister(winbond_data[i].platform_dev);
+	}
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(winbond_init);
+module_exit(winbond_exit);
+
+module_param(probe_winbond, int, 0);
+
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/pdc_adma.c linux-2.6.18.x86_64.p4/drivers/ata/pdc_adma.c
--- linux-2.6.18.x86_64.p3/drivers/ata/pdc_adma.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/pdc_adma.c	2007-06-06 10:08:00.000000000 -0400
@@ -39,22 +39,26 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
-#include <asm/io.h>
 #include <linux/libata.h>
 
 #define DRV_NAME	"pdc_adma"
-#define DRV_VERSION	"0.04"
+#define DRV_VERSION	"0.06"
 
 /* macro to calculate base address for ATA regs */
 #define ADMA_ATA_REGS(base,port_no)	((base) + ((port_no) * 0x40))
 
 /* macro to calculate base address for ADMA regs */
-#define ADMA_REGS(base,port_no)	((base) + 0x80 + ((port_no) * 0x20))
+#define ADMA_REGS(base,port_no)		((base) + 0x80 + ((port_no) * 0x20))
+
+/* macro to obtain addresses from ata_port */
+#define ADMA_PORT_REGS(ap) \
+	ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
 
 enum {
+	ADMA_MMIO_BAR		= 4,
+
 	ADMA_PORTS		= 2,
 	ADMA_CPB_BYTES		= 40,
 	ADMA_PRD_BYTES		= LIBATA_MAX_PRD * 16,
@@ -124,10 +128,8 @@
 
 static int adma_ata_init_one (struct pci_dev *pdev,
 				const struct pci_device_id *ent);
-static irqreturn_t adma_intr (int irq, void *dev_instance,
-				struct pt_regs *regs);
 static int adma_port_start(struct ata_port *ap);
-static void adma_host_stop(struct ata_host_set *host_set);
+static void adma_host_stop(struct ata_host *host);
 static void adma_port_stop(struct ata_port *ap);
 static void adma_phy_reset(struct ata_port *ap);
 static void adma_qc_prep(struct ata_queued_cmd *qc);
@@ -168,9 +170,10 @@
 	.qc_prep		= adma_qc_prep,
 	.qc_issue		= adma_qc_issue,
 	.eng_timeout		= adma_eng_timeout,
-	.data_xfer		= ata_mmio_data_xfer,
-	.irq_handler		= adma_intr,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= adma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.port_start		= adma_port_start,
 	.port_stop		= adma_port_stop,
 	.host_stop		= adma_host_stop,
@@ -181,8 +184,7 @@
 static struct ata_port_info adma_port_info[] = {
 	/* board_1841_idx */
 	{
-		.sht		= &adma_ata_sht,
-		.host_flags	= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
 				  ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
 				  ATA_FLAG_PIO_POLLING,
 		.pio_mask	= 0x10, /* pio4 */
@@ -192,8 +194,7 @@
 };
 
 static const struct pci_device_id adma_ata_pci_tbl[] = {
-	{ PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_1841_idx },
+	{ PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
 
 	{ }	/* terminate list */
 };
@@ -225,8 +226,10 @@
 	/* nothing */
 }
 
-static void adma_reset_engine(void __iomem *chan)
+static void adma_reset_engine(struct ata_port *ap)
 {
+	void __iomem *chan = ADMA_PORT_REGS(ap);
+
 	/* reset ADMA to idle state */
 	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
 	udelay(2);
@@ -237,15 +240,14 @@
 static void adma_reinit_engine(struct ata_port *ap)
 {
 	struct adma_port_priv *pp = ap->private_data;
-	void __iomem *mmio_base = ap->host_set->mmio_base;
-	void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
+	void __iomem *chan = ADMA_PORT_REGS(ap);
 
 	/* mask/clear ATA interrupts */
-	writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
+	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
 	ata_check_status(ap);
 
 	/* reset the ADMA engine */
-	adma_reset_engine(chan);
+	adma_reset_engine(ap);
 
 	/* set in-FIFO threshold to 0x100 */
 	writew(0x100, chan + ADMA_FIFO_IN);
@@ -265,7 +267,7 @@
 
 static inline void adma_enter_reg_mode(struct ata_port *ap)
 {
-	void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
+	void __iomem *chan = ADMA_PORT_REGS(ap);
 
 	writew(aPIOMD4, chan + ADMA_CONTROL);
 	readb(chan + ADMA_STATUS);	/* flush */
@@ -412,7 +414,7 @@
 static inline void adma_packet_start(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
+	void __iomem *chan = ADMA_PORT_REGS(ap);
 
 	VPRINTK("ENTER, ap %p\n", ap);
 
@@ -442,16 +444,15 @@
 	return ata_qc_issue_prot(qc);
 }
 
-static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
+static inline unsigned int adma_intr_pkt(struct ata_host *host)
 {
 	unsigned int handled = 0, port_no;
-	u8 __iomem *mmio_base = host_set->mmio_base;
 
-	for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
-		struct ata_port *ap = host_set->ports[port_no];
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
 		struct adma_port_priv *pp;
 		struct ata_queued_cmd *qc;
-		void __iomem *chan = ADMA_REGS(mmio_base, port_no);
+		void __iomem *chan = ADMA_PORT_REGS(ap);
 		u8 status = readb(chan + ADMA_STATUS);
 
 		if (status == 0)
@@ -476,13 +477,13 @@
 	return handled;
 }
 
-static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
+static inline unsigned int adma_intr_mmio(struct ata_host *host)
 {
 	unsigned int handled = 0, port_no;
 
-	for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
 		struct ata_port *ap;
-		ap = host_set->ports[port_no];
+		ap = host->ports[port_no];
 		if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
 			struct ata_queued_cmd *qc;
 			struct adma_port_priv *pp = ap->private_data;
@@ -496,8 +497,8 @@
 				if ((status & ATA_BUSY))
 					continue;
 				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
-					ap->id, qc->tf.protocol, status);
-		
+					ap->print_id, qc->tf.protocol, status);
+
 				/* complete taskfile transaction */
 				pp->state = adma_state_idle;
 				qc->err_mask |= ac_err_mask(status);
@@ -509,23 +510,23 @@
 	return handled;
 }
 
-static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	unsigned int handled = 0;
 
 	VPRINTK("ENTER\n");
 
-	spin_lock(&host_set->lock);
-	handled  = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
-	spin_unlock(&host_set->lock);
+	spin_lock(&host->lock);
+	handled  = adma_intr_pkt(host) | adma_intr_mmio(host);
+	spin_unlock(&host->lock);
 
 	VPRINTK("EXIT\n");
 
 	return IRQ_RETVAL(handled);
 }
 
-static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
+static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
 {
 	port->cmd_addr		=
 	port->data_addr		= base + 0x000;
@@ -544,7 +545,7 @@
 
 static int adma_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
+	struct device *dev = ap->host->dev;
 	struct adma_port_priv *pp;
 	int rc;
 
@@ -552,72 +553,48 @@
 	if (rc)
 		return rc;
 	adma_enter_reg_mode(ap);
-	rc = -ENOMEM;
-	pp = kcalloc(1, sizeof(*pp), GFP_KERNEL);
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 	if (!pp)
-		goto err_out;
-	pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
-								GFP_KERNEL);
+		return -ENOMEM;
+	pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
+				      GFP_KERNEL);
 	if (!pp->pkt)
-		goto err_out_kfree;
+		return -ENOMEM;
 	/* paranoia? */
 	if ((pp->pkt_dma & 7) != 0) {
 		printk("bad alignment for pp->pkt_dma: %08x\n",
 						(u32)pp->pkt_dma);
-		dma_free_coherent(dev, ADMA_PKT_BYTES,
-						pp->pkt, pp->pkt_dma);
-		goto err_out_kfree;
+		return -ENOMEM;
 	}
 	memset(pp->pkt, 0, ADMA_PKT_BYTES);
 	ap->private_data = pp;
 	adma_reinit_engine(ap);
 	return 0;
-
-err_out_kfree:
-	kfree(pp);
-err_out:
-	ata_port_stop(ap);
-	return rc;
 }
 
 static void adma_port_stop(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct adma_port_priv *pp = ap->private_data;
-
-	adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
-	if (pp != NULL) {
-		ap->private_data = NULL;
-		if (pp->pkt != NULL)
-			dma_free_coherent(dev, ADMA_PKT_BYTES,
-					pp->pkt, pp->pkt_dma);
-		kfree(pp);
-	}
-	ata_port_stop(ap);
+	adma_reset_engine(ap);
 }
 
-static void adma_host_stop(struct ata_host_set *host_set)
+static void adma_host_stop(struct ata_host *host)
 {
 	unsigned int port_no;
 
 	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
-		adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
-
-	ata_pci_host_stop(host_set);
+		adma_reset_engine(host->ports[port_no]);
 }
 
-static void adma_host_init(unsigned int chip_id,
-				struct ata_probe_ent *probe_ent)
+static void adma_host_init(struct ata_host *host, unsigned int chip_id)
 {
 	unsigned int port_no;
-	void __iomem *mmio_base = probe_ent->mmio_base;
 
 	/* enable/lock aGO operation */
-	writeb(7, mmio_base + ADMA_MODE_LOCK);
+	writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
 
 	/* reset the ADMA logic */
 	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
-		adma_reset_engine(ADMA_REGS(mmio_base, port_no));
+		adma_reset_engine(host->ports[port_no]);
 }
 
 static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
@@ -640,89 +617,56 @@
 }
 
 static int adma_ata_init_one(struct pci_dev *pdev,
-				const struct pci_device_id *ent)
+			     const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	void __iomem *mmio_base;
 	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
+	struct ata_host *host;
+	void __iomem *mmio_base;
 	int rc, port_no;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
-	if (rc)
-		return rc;
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
+	if (!host)
+		return -ENOMEM;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
 	if (rc)
-		goto err_out;
+		return rc;
 
-	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
-		rc = -ENODEV;
-		goto err_out_regions;
-	}
+	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
+		return -ENODEV;
 
-	mmio_base = pci_iomap(pdev, 4, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
+	rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	mmio_base = host->iomap[ADMA_MMIO_BAR];
 
 	rc = adma_set_dma_masks(pdev, mmio_base);
 	if (rc)
-		goto err_out_iounmap;
-
-	probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
-
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	probe_ent->sht		= adma_port_info[board_idx].sht;
-	probe_ent->host_flags	= adma_port_info[board_idx].host_flags;
-	probe_ent->pio_mask	= adma_port_info[board_idx].pio_mask;
-	probe_ent->mwdma_mask	= adma_port_info[board_idx].mwdma_mask;
-	probe_ent->udma_mask	= adma_port_info[board_idx].udma_mask;
-	probe_ent->port_ops	= adma_port_info[board_idx].port_ops;
-
-	probe_ent->irq		= pdev->irq;
-	probe_ent->irq_flags	= IRQF_SHARED;
-	probe_ent->mmio_base	= mmio_base;
-	probe_ent->n_ports	= ADMA_PORTS;
-
-	for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
-		adma_ata_setup_port(&probe_ent->port[port_no],
-			ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
-	}
+		return rc;
 
-	pci_set_master(pdev);
+	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
+		adma_ata_setup_port(&host->ports[port_no]->ioaddr,
+				    ADMA_ATA_REGS(mmio_base, port_no));
 
 	/* initialize adapter */
-	adma_host_init(board_idx, probe_ent);
+	adma_host_init(host, board_idx);
 
-	rc = ata_device_add(probe_ent);
-	kfree(probe_ent);
-	if (rc != ADMA_PORTS)
-		goto err_out_iounmap;
-	return 0;
-
-err_out_iounmap:
-	pci_iounmap(pdev, mmio_base);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	pci_disable_device(pdev);
-	return rc;
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
+				 &adma_ata_sht);
 }
 
 static int __init adma_ata_init(void)
 {
-	return pci_module_init(&adma_ata_pci_driver);
+	return pci_register_driver(&adma_ata_pci_driver);
 }
 
 static void __exit adma_ata_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_inic162x.c linux-2.6.18.x86_64.p4/drivers/ata/sata_inic162x.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_inic162x.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_inic162x.c	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,763 @@
+/*
+ * sata_inic162x.c - Driver for Initio 162x SATA controllers
+ *
+ * Copyright 2006  SUSE Linux Products GmbH
+ * Copyright 2006  Tejun Heo <teheo@novell.com>
+ *
+ * This file is released under GPL v2.
+ *
+ * This controller is eccentric and easily locks up if something isn't
+ * right.  Documentation is available at initio's website but it only
+ * documents registers (not programming model).
+ *
+ * - ATA disks work.
+ * - Hotplug works.
+ * - ATAPI read works but burning doesn't.  This thing is really
+ *   peculiar about ATAPI and I couldn't figure out how ATAPI PIO and
+ *   ATAPI DMA WRITE should be programmed.  If you've got a clue, be
+ *   my guest.
+ * - Both STR and STD work.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_device.h>
+
+#define DRV_NAME	"sata_inic162x"
+#define DRV_VERSION	"0.2"
+
+enum {
+	MMIO_BAR		= 5,
+
+	NR_PORTS		= 2,
+
+	HOST_CTL		= 0x7c,
+	HOST_STAT		= 0x7e,
+	HOST_IRQ_STAT		= 0xbc,
+	HOST_IRQ_MASK		= 0xbe,
+
+	PORT_SIZE		= 0x40,
+
+	/* registers for ATA TF operation */
+	PORT_TF			= 0x00,
+	PORT_ALT_STAT		= 0x08,
+	PORT_IRQ_STAT		= 0x09,
+	PORT_IRQ_MASK		= 0x0a,
+	PORT_PRD_CTL		= 0x0b,
+	PORT_PRD_ADDR		= 0x0c,
+	PORT_PRD_XFERLEN	= 0x10,
+
+	/* IDMA register */
+	PORT_IDMA_CTL		= 0x14,
+
+	PORT_SCR		= 0x20,
+
+	/* HOST_CTL bits */
+	HCTL_IRQOFF		= (1 << 8),  /* global IRQ off */
+	HCTL_PWRDWN		= (1 << 13), /* power down PHYs */
+	HCTL_SOFTRST		= (1 << 13), /* global reset (no phy reset) */
+	HCTL_RPGSEL		= (1 << 15), /* register page select */
+
+	HCTL_KNOWN_BITS		= HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
+				  HCTL_RPGSEL,
+
+	/* HOST_IRQ_(STAT|MASK) bits */
+	HIRQ_PORT0		= (1 << 0),
+	HIRQ_PORT1		= (1 << 1),
+	HIRQ_SOFT		= (1 << 14),
+	HIRQ_GLOBAL		= (1 << 15), /* STAT only */
+
+	/* PORT_IRQ_(STAT|MASK) bits */
+	PIRQ_OFFLINE		= (1 << 0),  /* device unplugged */
+	PIRQ_ONLINE		= (1 << 1),  /* device plugged */
+	PIRQ_COMPLETE		= (1 << 2),  /* completion interrupt */
+	PIRQ_FATAL		= (1 << 3),  /* fatal error */
+	PIRQ_ATA		= (1 << 4),  /* ATA interrupt */
+	PIRQ_REPLY		= (1 << 5),  /* reply FIFO not empty */
+	PIRQ_PENDING		= (1 << 7),  /* port IRQ pending (STAT only) */
+
+	PIRQ_ERR		= PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
+
+	PIRQ_MASK_DMA_READ	= PIRQ_REPLY | PIRQ_ATA,
+	PIRQ_MASK_OTHER		= PIRQ_REPLY | PIRQ_COMPLETE,
+	PIRQ_MASK_FREEZE	= 0xff,
+
+	/* PORT_PRD_CTL bits */
+	PRD_CTL_START		= (1 << 0),
+	PRD_CTL_WR		= (1 << 3),
+	PRD_CTL_DMAEN		= (1 << 7),  /* DMA enable */
+
+	/* PORT_IDMA_CTL bits */
+	IDMA_CTL_RST_ATA	= (1 << 2),  /* hardreset ATA bus */
+	IDMA_CTL_RST_IDMA	= (1 << 5),  /* reset IDMA machinary */
+	IDMA_CTL_GO		= (1 << 7),  /* IDMA mode go */
+	IDMA_CTL_ATA_NIEN	= (1 << 8),  /* ATA IRQ disable */
+};
+
+struct inic_host_priv {
+	u16	cached_hctl;
+};
+
+struct inic_port_priv {
+	u8	dfl_prdctl;
+	u8	cached_prdctl;
+	u8	cached_pirq_mask;
+};
+
+static int inic_slave_config(struct scsi_device *sdev)
+{
+	/* This controller is braindamaged.  dma_boundary is 0xffff
+	 * like others but it will lock up the whole machine HARD if
+	 * 65536 byte PRD entry is fed.  Reduce maximum segment size.
+	 */
+	blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);
+
+	return ata_scsi_slave_config(sdev);
+}
+
+static struct scsi_host_template inic_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= inic_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static const int scr_map[] = {
+	[SCR_STATUS]	= 0,
+	[SCR_ERROR]	= 1,
+	[SCR_CONTROL]	= 2,
+};
+
+static void __iomem * inic_port_base(struct ata_port *ap)
+{
+	return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
+}
+
+static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct inic_port_priv *pp = ap->private_data;
+
+	writeb(mask, port_base + PORT_IRQ_MASK);
+	pp->cached_pirq_mask = mask;
+}
+
+static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
+{
+	struct inic_port_priv *pp = ap->private_data;
+
+	if (pp->cached_pirq_mask != mask)
+		__inic_set_pirq_mask(ap, mask);
+}
+
+static void inic_reset_port(void __iomem *port_base)
+{
+	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+	u16 ctl;
+
+	ctl = readw(idma_ctl);
+	ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);
+
+	/* mask IRQ and assert reset */
+	writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);
+	readw(idma_ctl); /* flush */
+
+	/* give it some time */
+	msleep(1);
+
+	/* release reset */
+	writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);
+
+	/* clear irq */
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+
+	/* reenable ATA IRQ, turn off IDMA mode */
+	writew(ctl, idma_ctl);
+}
+
+static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg)
+{
+	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *addr;
+	u32 val;
+
+	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+		return 0xffffffffU;
+
+	addr = scr_addr + scr_map[sc_reg] * 4;
+	val = readl(scr_addr + scr_map[sc_reg] * 4);
+
+	/* this controller has stuck DIAG.N, ignore it */
+	if (sc_reg == SCR_ERROR)
+		val &= ~SERR_PHYRDY_CHG;
+	return val;
+}
+
+static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+{
+	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *addr;
+
+	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+		return;
+
+	addr = scr_addr + scr_map[sc_reg] * 4;
+	writel(val, scr_addr + scr_map[sc_reg] * 4);
+}
+
+/*
+ * In TF mode, inic162x is very similar to SFF device.  TF registers
+ * function the same.  DMA engine behaves similary using the same PRD
+ * format as BMDMA but different command register, interrupt and event
+ * notification methods are used.  The following inic_bmdma_*()
+ * functions do the impedance matching.
+ */
+static void inic_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct inic_port_priv *pp = ap->private_data;
+	void __iomem *port_base = inic_port_base(ap);
+	int rw = qc->tf.flags & ATA_TFLAG_WRITE;
+
+	/* make sure device sees PRD table writes */
+	wmb();
+
+	/* load transfer length */
+	writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
+
+	/* turn on DMA and specify data direction */
+	pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
+	if (!rw)
+		pp->cached_prdctl |= PRD_CTL_WR;
+	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+static void inic_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct inic_port_priv *pp = ap->private_data;
+	void __iomem *port_base = inic_port_base(ap);
+
+	/* start host DMA transaction */
+	pp->cached_prdctl |= PRD_CTL_START;
+	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
+}
+
+static void inic_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct inic_port_priv *pp = ap->private_data;
+	void __iomem *port_base = inic_port_base(ap);
+
+	/* stop DMA engine */
+	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
+}
+
+static u8 inic_bmdma_status(struct ata_port *ap)
+{
+	/* event is already verified by the interrupt handler */
+	return ATA_DMA_INTR;
+}
+
+static void inic_irq_clear(struct ata_port *ap)
+{
+	/* noop */
+}
+
+static void inic_host_intr(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct ata_eh_info *ehi = &ap->eh_info;
+	u8 irq_stat;
+
+	/* fetch and clear irq */
+	irq_stat = readb(port_base + PORT_IRQ_STAT);
+	writeb(irq_stat, port_base + PORT_IRQ_STAT);
+
+	if (likely(!(irq_stat & PIRQ_ERR))) {
+		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+
+		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+			ata_chk_status(ap);	/* clear ATA interrupt */
+			return;
+		}
+
+		if (likely(ata_host_intr(ap, qc)))
+			return;
+
+		ata_chk_status(ap);	/* clear ATA interrupt */
+		ata_port_printk(ap, KERN_WARNING, "unhandled "
+				"interrupt, irq_stat=%x\n", irq_stat);
+		return;
+	}
+
+	/* error */
+	ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);
+
+	if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
+		ata_ehi_hotplugged(ehi);
+		ata_port_freeze(ap);
+	} else
+		ata_port_abort(ap);
+}
+
+static irqreturn_t inic_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
+{
+	struct ata_host *host = dev_instance;
+	void __iomem *mmio_base = host->iomap[MMIO_BAR];
+	u16 host_irq_stat;
+	int i, handled = 0;;
+
+	host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);
+
+	if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
+		goto out;
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < NR_PORTS; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (!(host_irq_stat & (HIRQ_PORT0 << i)))
+			continue;
+
+		if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
+			inic_host_intr(ap);
+			handled++;
+		} else {
+			if (ata_ratelimit())
+				dev_printk(KERN_ERR, host->dev, "interrupt "
+					   "from disabled port %d (0x%x)\n",
+					   i, host_irq_stat);
+		}
+	}
+
+	spin_unlock(&host->lock);
+
+ out:
+	return IRQ_RETVAL(handled);
+}
+
+static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* ATA IRQ doesn't wait for DMA transfer completion and vice
+	 * versa.  Mask IRQ selectively to detect command completion.
+	 * Without it, ATA DMA read command can cause data corruption.
+	 *
+	 * Something similar might be needed for ATAPI writes.  I
+	 * tried a lot of combinations but couldn't find the solution.
+	 */
+	if (qc->tf.protocol == ATA_PROT_DMA &&
+	    !(qc->tf.flags & ATA_TFLAG_WRITE))
+		inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
+	else
+		inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
+
+	/* Issuing a command to yet uninitialized port locks up the
+	 * controller.  Most of the time, this happens for the first
+	 * command after reset which are ATA and ATAPI IDENTIFYs.
+	 * Fast fail if stat is 0x7f or 0xff for those commands.
+	 */
+	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
+		     qc->tf.command == ATA_CMD_ID_ATAPI)) {
+		u8 stat = ata_chk_status(ap);
+		if (stat == 0x7f || stat == 0xff)
+			return AC_ERR_HSM;
+	}
+
+	return ata_qc_issue_prot(qc);
+}
+
+static void inic_freeze(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	__inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
+
+	ata_chk_status(ap);
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+
+	readb(port_base + PORT_IRQ_STAT); /* flush */
+}
+
+static void inic_thaw(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	ata_chk_status(ap);
+	writeb(0xff, port_base + PORT_IRQ_STAT);
+
+	__inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
+
+	readb(port_base + PORT_IRQ_STAT); /* flush */
+}
+
+/*
+ * SRST and SControl hardreset don't give valid signature on this
+ * controller.  Only controller specific hardreset mechanism works.
+ */
+static int inic_hardreset(struct ata_port *ap, unsigned int *class,
+			  unsigned long deadline)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+	u16 val;
+	int rc;
+
+	/* hammer it into sane state */
+	inic_reset_port(port_base);
+
+	val = readw(idma_ctl);
+	writew(val | IDMA_CTL_RST_ATA, idma_ctl);
+	readw(idma_ctl);	/* flush */
+	msleep(1);
+	writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
+
+	rc = sata_phy_resume(ap, timing, deadline);
+	if (rc) {
+		ata_port_printk(ap, KERN_WARNING, "failed to resume "
+				"link after reset (errno=%d)\n", rc);
+		return rc;
+	}
+
+	*class = ATA_DEV_NONE;
+	if (ata_port_online(ap)) {
+		struct ata_taskfile tf;
+
+		/* wait a while before checking status */
+		msleep(150);
+
+		rc = ata_wait_ready(ap, deadline);
+		/* link occupied, -ENODEV too is an error */
+		if (rc) {
+			ata_port_printk(ap, KERN_WARNING, "device not ready "
+					"after hardreset (errno=%d)\n", rc);
+			return rc;
+		}
+
+		ata_tf_read(ap, &tf);
+		*class = ata_dev_classify(&tf);
+		if (*class == ATA_DEV_UNKNOWN)
+			*class = ATA_DEV_NONE;
+	}
+
+	return 0;
+}
+
+static void inic_error_handler(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct inic_port_priv *pp = ap->private_data;
+	unsigned long flags;
+
+	/* reset PIO HSM and stop DMA engine */
+	inic_reset_port(port_base);
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->hsm_task_state = HSM_ST_IDLE;
+	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	/* PIO and DMA engines have been stopped, perform recovery */
+	ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset,
+		  ata_std_postreset);
+}
+
+static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		inic_reset_port(inic_port_base(qc->ap));
+}
+
+static void inic_dev_config(struct ata_device *dev)
+{
+	/* inic can only handle upto LBA28 max sectors */
+	if (dev->max_sectors > ATA_MAX_SECTORS)
+		dev->max_sectors = ATA_MAX_SECTORS;
+}
+
+static void init_port(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+
+	/* Setup PRD address */
+	writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
+}
+
+static int inic_port_resume(struct ata_port *ap)
+{
+	init_port(ap);
+	return 0;
+}
+
+static int inic_port_start(struct ata_port *ap)
+{
+	void __iomem *port_base = inic_port_base(ap);
+	struct inic_port_priv *pp;
+	u8 tmp;
+	int rc;
+
+	/* alloc and initialize private data */
+	pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	ap->private_data = pp;
+
+	/* default PRD_CTL value, DMAEN, WR and START off */
+	tmp = readb(port_base + PORT_PRD_CTL);
+	tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
+	pp->dfl_prdctl = tmp;
+
+	/* Alloc resources */
+	rc = ata_port_start(ap);
+	if (rc) {
+		kfree(pp);
+		return rc;
+	}
+
+	init_port(ap);
+
+	return 0;
+}
+
+static struct ata_port_operations inic_port_ops = {
+	.port_disable		= ata_port_disable,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.scr_read		= inic_scr_read,
+	.scr_write		= inic_scr_write,
+
+	.bmdma_setup		= inic_bmdma_setup,
+	.bmdma_start		= inic_bmdma_start,
+	.bmdma_stop		= inic_bmdma_stop,
+	.bmdma_status		= inic_bmdma_status,
+
+	.irq_clear		= inic_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.qc_prep	 	= ata_qc_prep,
+	.qc_issue		= inic_qc_issue,
+	.data_xfer		= ata_data_xfer,
+
+	.freeze			= inic_freeze,
+	.thaw			= inic_thaw,
+	.error_handler		= inic_error_handler,
+	.post_internal_cmd	= inic_post_internal_cmd,
+	.dev_config		= inic_dev_config,
+
+	.port_resume		= inic_port_resume,
+
+	.port_start		= inic_port_start,
+};
+
+static struct ata_port_info inic_port_info = {
+	/* For some reason, ATA_PROT_ATAPI is broken on this
+	 * controller, and no, PIO_POLLING does't fix it.  It somehow
+	 * manages to report the wrong ireason and ignoring ireason
+	 * results in machine lock up.  Tell libata to always prefer
+	 * DMA.
+	 */
+	.flags			= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+	.pio_mask		= 0x1f,	/* pio0-4 */
+	.mwdma_mask		= 0x07, /* mwdma0-2 */
+	.udma_mask		= 0x7f,	/* udma0-6 */
+	.port_ops		= &inic_port_ops
+};
+
+static int init_controller(void __iomem *mmio_base, u16 hctl)
+{
+	int i;
+	u16 val;
+
+	hctl &= ~HCTL_KNOWN_BITS;
+
+	/* Soft reset whole controller.  Spec says reset duration is 3
+	 * PCI clocks, be generous and give it 10ms.
+	 */
+	writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
+	readw(mmio_base + HOST_CTL); /* flush */
+
+	for (i = 0; i < 10; i++) {
+		msleep(1);
+		val = readw(mmio_base + HOST_CTL);
+		if (!(val & HCTL_SOFTRST))
+			break;
+	}
+
+	if (val & HCTL_SOFTRST)
+		return -EIO;
+
+	/* mask all interrupts and reset ports */
+	for (i = 0; i < NR_PORTS; i++) {
+		void __iomem *port_base = mmio_base + i * PORT_SIZE;
+
+		writeb(0xff, port_base + PORT_IRQ_MASK);
+		inic_reset_port(port_base);
+	}
+
+	/* port IRQ is masked now, unmask global IRQ */
+	writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
+	val = readw(mmio_base + HOST_IRQ_MASK);
+	val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
+	writew(val, mmio_base + HOST_IRQ_MASK);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int inic_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	struct inic_host_priv *hpriv = host->private_data;
+	void __iomem *mmio_base = host->iomap[MMIO_BAR];
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = init_controller(mmio_base, hpriv->cached_hctl);
+		if (rc)
+			return rc;
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
+	struct ata_host *host;
+	struct inic_host_priv *hpriv;
+	void __iomem * const *iomap;
+	int i, rc;
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+
+	host->private_data = hpriv;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+	if (rc)
+		return rc;
+	host->iomap = iomap = pcim_iomap_table(pdev);
+
+	for (i = 0; i < NR_PORTS; i++) {
+		struct ata_ioports *port = &host->ports[i]->ioaddr;
+		void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE;
+
+		port->cmd_addr = iomap[2 * i];
+		port->altstatus_addr =
+		port->ctl_addr = (void __iomem *)
+			((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
+		port->scr_addr = port_base + PORT_SCR;
+
+		ata_std_ports(port);
+	}
+
+	hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
+
+	/* Set dma_mask.  This devices doesn't support 64bit addressing. */
+	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "32-bit DMA enable failed\n");
+		return rc;
+	}
+
+	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "32-bit consistent DMA enable failed\n");
+		return rc;
+	}
+
+	rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "failed to initialize controller\n");
+		return rc;
+	}
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
+				 &inic_sht);
+}
+
+static const struct pci_device_id inic_pci_tbl[] = {
+	{ PCI_VDEVICE(INIT, 0x1622), },
+	{ },
+};
+
+static struct pci_driver inic_pci_driver = {
+	.name 		= DRV_NAME,
+	.id_table	= inic_pci_tbl,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= inic_pci_device_resume,
+#endif
+	.probe 		= inic_init_one,
+	.remove		= ata_pci_remove_one,
+};
+
+static int __init inic_init(void)
+{
+	return pci_register_driver(&inic_pci_driver);
+}
+
+static void __exit inic_exit(void)
+{
+	pci_unregister_driver(&inic_pci_driver);
+}
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(inic_init);
+module_exit(inic_exit);
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_mv.c linux-2.6.18.x86_64.p4/drivers/ata/sata_mv.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_mv.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_mv.c	2007-06-06 10:08:00.000000000 -0400
@@ -21,6 +21,50 @@
  *
  */
 
+/*
+  sata_mv TODO list:
+
+  1) Needs a full errata audit for all chipsets.  I implemented most
+  of the errata workarounds found in the Marvell vendor driver, but
+  I distinctly remember a couple workarounds (one related to PCI-X)
+  are still needed.
+
+  2) Convert to LibATA new EH.  Required for hotplug, NCQ, and sane
+  probing/error handling in general.  MUST HAVE.
+
+  3) Add hotplug support (easy, once new-EH support appears)
+
+  4) Add NCQ support (easy to intermediate, once new-EH support appears)
+
+  5) Investigate problems with PCI Message Signalled Interrupts (MSI).
+
+  6) Add port multiplier support (intermediate)
+
+  7) Test and verify 3.0 Gbps support
+
+  8) Develop a low-power-consumption strategy, and implement it.
+
+  9) [Experiment, low priority] See if ATAPI can be supported using
+  "unknown FIS" or "vendor-specific FIS" support, or something creative
+  like that.
+
+  10) [Experiment, low priority] Investigate interrupt coalescing.
+  Quite often, especially with PCI Message Signalled Interrupts (MSI),
+  the overhead reduced by interrupt mitigation is quite often not
+  worth the latency cost.
+
+  11) [Experiment, Marvell value added] Is it possible to use target
+  mode to cross-connect two Linux boxes with Marvell cards?  If so,
+  creating LibATA target mode support would be very interesting.
+
+  Target mode, for those without docs, is the ability to directly
+  connect two SATA controllers.
+
+  13) Verify that 7042 is fully supported.  I only have a 6042.
+
+*/
+
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -28,16 +72,14 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/dma-mapping.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 
 #define DRV_NAME	"sata_mv"
-#define DRV_VERSION	"0.7"
+#define DRV_VERSION	"0.81"
 
 enum {
 	/* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -139,14 +181,19 @@
 	PCI_ERR			= (1 << 18),
 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
+	PORTS_0_3_COAL_DONE	= (1 << 8),
+	PORTS_4_7_COAL_DONE	= (1 << 17),
 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
 	GPIO_INT		= (1 << 22),
 	SELF_INT		= (1 << 23),
 	TWSI_INT		= (1 << 24),
 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
+	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
 				   HC_MAIN_RSVD),
+	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
+				   HC_MAIN_RSVD_5),
 
 	/* SATAHC registers */
 	HC_CFG_OFS		= 0,
@@ -250,10 +297,7 @@
 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 
 enum {
-	/* Our DMA boundary is determined by an ePRD being unable to handle
-	 * anything larger than 64KB
-	 */
-	MV_DMA_BOUNDARY		= 0xffffU,
+	MV_DMA_BOUNDARY		= 0xffffffffU,
 
 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
 
@@ -342,14 +386,11 @@
 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
 static void mv_phy_reset(struct ata_port *ap);
 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
-static void mv_host_stop(struct ata_host_set *host_set);
 static int mv_port_start(struct ata_port *ap);
 static void mv_port_stop(struct ata_port *ap);
 static void mv_qc_prep(struct ata_queued_cmd *qc);
 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static irqreturn_t mv_interrupt(int irq, void *dev_instance,
-				struct pt_regs *regs);
 static void mv_eng_timeout(struct ata_port *ap);
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 
@@ -383,10 +424,10 @@
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= MV_USE_Q_DEPTH,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= MV_MAX_SG_CT / 2,
+	.sg_tablesize		= MV_MAX_SG_CT,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
-	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.use_clustering		= 1,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= MV_DMA_BOUNDARY,
 	.slave_configure	= ata_scsi_slave_config,
@@ -404,22 +445,23 @@
 	.dev_select		= ata_std_dev_select,
 
 	.phy_reset		= mv_phy_reset,
+	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep,
 	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.eng_timeout		= mv_eng_timeout,
 
-	.irq_handler		= mv_interrupt,
 	.irq_clear		= mv_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= mv5_scr_read,
 	.scr_write		= mv5_scr_write,
 
 	.port_start		= mv_port_start,
 	.port_stop		= mv_port_stop,
-	.host_stop		= mv_host_stop,
 };
 
 static const struct ata_port_operations mv6_ops = {
@@ -432,22 +474,23 @@
 	.dev_select		= ata_std_dev_select,
 
 	.phy_reset		= mv_phy_reset,
+	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep,
 	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.eng_timeout		= mv_eng_timeout,
 
-	.irq_handler		= mv_interrupt,
 	.irq_clear		= mv_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= mv_scr_read,
 	.scr_write		= mv_scr_write,
 
 	.port_start		= mv_port_start,
 	.port_stop		= mv_port_stop,
-	.host_stop		= mv_host_stop,
 };
 
 static const struct ata_port_operations mv_iie_ops = {
@@ -460,72 +503,65 @@
 	.dev_select		= ata_std_dev_select,
 
 	.phy_reset		= mv_phy_reset,
+	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep_iie,
 	.qc_issue		= mv_qc_issue,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.eng_timeout		= mv_eng_timeout,
 
-	.irq_handler		= mv_interrupt,
 	.irq_clear		= mv_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= mv_scr_read,
 	.scr_write		= mv_scr_write,
 
 	.port_start		= mv_port_start,
 	.port_stop		= mv_port_stop,
-	.host_stop		= mv_host_stop,
 };
 
 static const struct ata_port_info mv_port_info[] = {
 	{  /* chip_504x */
-		.sht		= &mv_sht,
-		.host_flags	= MV_COMMON_FLAGS,
+		.flags		= MV_COMMON_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_508x */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_5080 */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_604x */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv6_ops,
 	},
 	{  /* chip_608x */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
 				   MV_FLAG_DUAL_HC),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv6_ops,
 	},
 	{  /* chip_6042 */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv_iie_ops,
 	},
 	{  /* chip_7042 */
-		.sht		= &mv_sht,
-		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
-				   MV_FLAG_DUAL_HC),
+		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.udma_mask	= 0x7f,	/* udma0-6 */
 		.port_ops	= &mv_iie_ops,
@@ -533,19 +569,25 @@
 };
 
 static const struct pci_device_id mv_pci_tbl[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
-
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
-	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
+	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+
+	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 
-	{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
-	{}			/* terminate list */
+	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+	/* add Marvell 7042 support */
+	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+	{ }			/* terminate list */
 };
 
 static struct pci_driver mv_pci_driver = {
@@ -579,6 +621,39 @@
 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
 
 
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+	int rc;
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+		if (rc) {
+			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+			if (rc) {
+				dev_printk(KERN_ERR, &pdev->dev,
+					   "64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
 /*
  * Functions
  */
@@ -619,12 +694,12 @@
 
 static inline void __iomem *mv_ap_base(struct ata_port *ap)
 {
-	return mv_port_base(ap->host_set->mmio_base, ap->port_no);
+	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
 }
 
-static inline int mv_get_hc_count(unsigned long host_flags)
+static inline int mv_get_hc_count(unsigned long port_flags)
 {
-	return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
+	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
 }
 
 static void mv_irq_clear(struct ata_port *ap)
@@ -792,49 +867,18 @@
 {
 	unsigned int ofs = mv_scr_offset(sc_reg_in);
 
-	if (0xffffffffU != ofs) {
+	if (0xffffffffU != ofs)
 		return readl(mv_ap_base(ap) + ofs);
-	} else {
+	else
 		return (u32) ofs;
-	}
 }
 
 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
 {
 	unsigned int ofs = mv_scr_offset(sc_reg_in);
 
-	if (0xffffffffU != ofs) {
+	if (0xffffffffU != ofs)
 		writelfl(val, mv_ap_base(ap) + ofs);
-	}
-}
-
-/**
- *      mv_host_stop - Host specific cleanup/stop routine.
- *      @host_set: host data structure
- *
- *      Disable ints, cleanup host memory, call general purpose
- *      host_stop.
- *
- *      LOCKING:
- *      Inherited from caller.
- */
-static void mv_host_stop(struct ata_host_set *host_set)
-{
-	struct mv_host_priv *hpriv = host_set->private_data;
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
-
-	if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
-		pci_disable_msi(pdev);
-	} else {
-		pci_intx(pdev, 0);
-	}
-	kfree(hpriv);
-	ata_host_stop(host_set);
-}
-
-static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
-{
-	dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
 }
 
 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
@@ -842,23 +886,27 @@
 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
 
 	/* set up non-NCQ EDMA configuration */
-	cfg &= ~0x1f;		/* clear queue depth */
-	cfg &= ~EDMA_CFG_NCQ;	/* clear NCQ mode */
 	cfg &= ~(1 << 9);	/* disable equeue */
 
-	if (IS_GEN_I(hpriv))
+	if (IS_GEN_I(hpriv)) {
+		cfg &= ~0x1f;		/* clear queue depth */
 		cfg |= (1 << 8);	/* enab config burst size mask */
+	}
 
-	else if (IS_GEN_II(hpriv))
+	else if (IS_GEN_II(hpriv)) {
+		cfg &= ~0x1f;		/* clear queue depth */
 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
+		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
+	}
 
 	else if (IS_GEN_IIE(hpriv)) {
-		cfg |= (1 << 23);	/* dis RX PM port mask */
-		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
+		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
+		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
 		cfg |= (1 << 18);	/* enab early completion */
-		cfg |= (1 << 17);	/* enab host q cache */
-		cfg |= (1 << 22);	/* enab cutthrough */
+		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
+		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
+		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
 	}
 
 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
@@ -876,28 +924,27 @@
  */
 static int mv_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct mv_host_priv *hpriv = ap->host_set->private_data;
+	struct device *dev = ap->host->dev;
+	struct mv_host_priv *hpriv = ap->host->private_data;
 	struct mv_port_priv *pp;
 	void __iomem *port_mmio = mv_ap_base(ap);
 	void *mem;
 	dma_addr_t mem_dma;
-	int rc = -ENOMEM;
+	int rc;
 
-	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 	if (!pp)
-		goto err_out;
-	memset(pp, 0, sizeof(*pp));
+		return -ENOMEM;
 
-	mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
-				 GFP_KERNEL);
+	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
+				  GFP_KERNEL);
 	if (!mem)
-		goto err_out_pp;
+		return -ENOMEM;
 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
 
 	rc = ata_pad_alloc(ap, dev);
 	if (rc)
-		goto err_out_priv;
+		return rc;
 
 	/* First item in chunk of DMA memory:
 	 * 32-slot command request table (CRQB), 32 bytes each in size
@@ -950,13 +997,6 @@
 	 */
 	ap->private_data = pp;
 	return 0;
-
-err_out_priv:
-	mv_priv_free(pp, dev);
-err_out_pp:
-	kfree(pp);
-err_out:
-	return rc;
 }
 
 /**
@@ -966,22 +1006,15 @@
  *      Stop DMA, cleanup port memory.
  *
  *      LOCKING:
- *      This routine uses the host_set lock to protect the DMA stop.
+ *      This routine uses the host lock to protect the DMA stop.
  */
 static void mv_port_stop(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct mv_port_priv *pp = ap->private_data;
 	unsigned long flags;
 
-	spin_lock_irqsave(&ap->host_set->lock, flags);
+	spin_lock_irqsave(&ap->host->lock, flags);
 	mv_stop_dma(ap);
-	spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
-	ap->private_data = NULL;
-	ata_pad_free(ap, dev);
-	mv_priv_free(pp, dev);
-	kfree(pp);
+	spin_unlock_irqrestore(&ap->host->lock, flags);
 }
 
 /**
@@ -993,38 +1026,30 @@
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_fill_sg(struct ata_queued_cmd *qc)
+static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
 {
 	struct mv_port_priv *pp = qc->ap->private_data;
-	unsigned int i = 0;
+	unsigned int n_sg = 0;
 	struct scatterlist *sg;
+	struct mv_sg *mv_sg;
 
+	mv_sg = pp->sg_tbl;
 	ata_for_each_sg(sg, qc) {
-		dma_addr_t addr;
-		u32 sg_len, len, offset;
-
-		addr = sg_dma_address(sg);
-		sg_len = sg_dma_len(sg);
-
-		while (sg_len) {
-			offset = addr & MV_DMA_BOUNDARY;
-			len = sg_len;
-			if ((offset + sg_len) > 0x10000)
-				len = 0x10000 - offset;
-
-			pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
-			pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
-			pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
+		dma_addr_t addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
 
-			sg_len -= len;
-			addr += len;
+		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
+		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
 
-			if (!sg_len && ata_sg_is_last(sg, qc))
-				pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+		if (ata_sg_is_last(sg, qc))
+			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
 
-			i++;
-		}
+		mv_sg++;
+		n_sg++;
 	}
+
+	return n_sg;
 }
 
 static inline unsigned mv_inc_q_index(unsigned index)
@@ -1319,7 +1344,7 @@
 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
 	}
 	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
-		"SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
+		"SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
 
 	/* Clear EDMA now that SERR cleanup done */
 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -1331,7 +1356,7 @@
 
 /**
  *      mv_host_intr - Handle all interrupts on the given host controller
- *      @host_set: host specific structure
+ *      @host: host specific structure
  *      @relevant: port error bits relevant to this host controller
  *      @hc: which host controller we're to look at
  *
@@ -1345,34 +1370,31 @@
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
-			 unsigned int hc)
+static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
 {
-	void __iomem *mmio = host_set->mmio_base;
+	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
 	struct ata_queued_cmd *qc;
 	u32 hc_irq_cause;
 	int shift, port, port0, hard_port, handled;
 	unsigned int err_mask;
 
-	if (hc == 0) {
+	if (hc == 0)
 		port0 = 0;
-	} else {
+	else
 		port0 = MV_PORTS_PER_HC;
-	}
 
 	/* we'll need the HC success int register in most cases */
 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
-	if (hc_irq_cause) {
+	if (hc_irq_cause)
 		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
-	}
 
 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
 		hc,relevant,hc_irq_cause);
 
 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
 		u8 ata_status = 0;
-		struct ata_port *ap = host_set->ports[port];
+		struct ata_port *ap = host->ports[port];
 		struct mv_port_priv *pp = ap->private_data;
 
 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
@@ -1391,8 +1413,7 @@
 		} else {
 			/* PIO: check for device (drive) interrupt */
 			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
-				ata_status = readb((void __iomem *)
-					   ap->ioaddr.status_addr);
+				ata_status = readb(ap->ioaddr.status_addr);
 				handled = 1;
 				/* ignore spurious intr if drive still BUSY */
 				if (ata_status & ATA_BUSY) {
@@ -1445,15 +1466,14 @@
  *      reported here.
  *
  *      LOCKING:
- *      This routine holds the host_set lock while processing pending
+ *      This routine holds the host lock while processing pending
  *      interrupts.
  */
-static irqreturn_t mv_interrupt(int irq, void *dev_instance,
-				struct pt_regs *regs)
+static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	unsigned int hc, handled = 0, n_hcs;
-	void __iomem *mmio = host_set->mmio_base;
+	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
 	struct mv_host_priv *hpriv;
 	u32 irq_stat;
 
@@ -1462,22 +1482,21 @@
 	/* check the cases where we either have nothing pending or have read
 	 * a bogus register value which can indicate HW removal or PCI fault
 	 */
-	if (!irq_stat || (0xffffffffU == irq_stat)) {
+	if (!irq_stat || (0xffffffffU == irq_stat))
 		return IRQ_NONE;
-	}
 
-	n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
-	spin_lock(&host_set->lock);
+	n_hcs = mv_get_hc_count(host->ports[0]->flags);
+	spin_lock(&host->lock);
 
 	for (hc = 0; hc < n_hcs; hc++) {
 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
 		if (relevant) {
-			mv_host_intr(host_set, relevant, hc);
+			mv_host_intr(host, relevant, hc);
 			handled++;
 		}
 	}
 
-	hpriv = host_set->private_data;
+	hpriv = host->private_data;
 	if (IS_60XX(hpriv)) {
 		/* deal with the interrupt coalescing bits */
 		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
@@ -1492,12 +1511,12 @@
 		       readl(mmio + PCI_IRQ_CAUSE_OFS));
 
 		DPRINTK("All regs @ PCI error\n");
-		mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
+		mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
 
 		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
 		handled++;
 	}
-	spin_unlock(&host_set->lock);
+	spin_unlock(&host->lock);
 
 	return IRQ_RETVAL(handled);
 }
@@ -1529,22 +1548,24 @@
 
 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
 {
-	void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
 
 	if (ofs != 0xffffffffU)
-		return readl(mmio + ofs);
+		return readl(addr + ofs);
 	else
 		return (u32) ofs;
 }
 
 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
 {
-	void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
 
 	if (ofs != 0xffffffffU)
-		writelfl(val, mmio + ofs);
+		writelfl(val, addr + ofs);
 }
 
 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
@@ -1905,8 +1926,8 @@
 
 static void mv_stop_and_reset(struct ata_port *ap)
 {
-	struct mv_host_priv *hpriv = ap->host_set->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
 
 	mv_stop_dma(ap);
 
@@ -1937,7 +1958,7 @@
 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
 {
 	struct mv_port_priv *pp	= ap->private_data;
-	struct mv_host_priv *hpriv = ap->host_set->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
 	void __iomem *port_mmio = mv_ap_base(ap);
 	struct ata_taskfile tf;
 	struct ata_device *dev = &ap->device[0];
@@ -1987,7 +2008,6 @@
 		ata_port_disable(ap);
 		return;
 	}
-	ap->cbl = ATA_CBL_SATA;
 
 	/* even after SStatus reflects that device is ready,
 	 * it seems to take a while for link to be fully
@@ -2004,10 +2024,10 @@
 			break;
 	}
 
-	tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
-	tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
-	tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
-	tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
+	tf.lbah = readb(ap->ioaddr.lbah_addr);
+	tf.lbam = readb(ap->ioaddr.lbam_addr);
+	tf.lbal = readb(ap->ioaddr.lbal_addr);
+	tf.nsect = readb(ap->ioaddr.nsect_addr);
 
 	dev->class = ata_dev_classify(&tf);
 	if (!ata_dev_enabled(dev)) {
@@ -2035,27 +2055,26 @@
  *      chip/bus, fail the command, and move on.
  *
  *      LOCKING:
- *      This routine holds the host_set lock while failing the command.
+ *      This routine holds the host lock while failing the command.
  */
 static void mv_eng_timeout(struct ata_port *ap)
 {
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
 	struct ata_queued_cmd *qc;
 	unsigned long flags;
 
 	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
 	DPRINTK("All regs @ start of eng_timeout\n");
-	mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
-			 to_pci_dev(ap->host_set->dev));
+	mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
 
 	qc = ata_qc_from_tag(ap, ap->active_tag);
         printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
-	       ap->host_set->mmio_base, ap, qc, qc->scsicmd,
-	       &qc->scsicmd->cmnd);
+	       mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
 
-	spin_lock_irqsave(&ap->host_set->lock, flags);
+	spin_lock_irqsave(&ap->host->lock, flags);
 	mv_err_intr(ap, 0);
 	mv_stop_and_reset(ap);
-	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+	spin_unlock_irqrestore(&ap->host->lock, flags);
 
 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
@@ -2078,7 +2097,7 @@
  */
 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
 {
-	unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
+	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
 	unsigned serr_ofs;
 
 	/* PIO related setup
@@ -2097,7 +2116,7 @@
 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
 
 	/* unused: */
-	port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
+	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
 
 	/* Clear any currently outstanding port interrupt conditions */
 	serr_ofs = mv_scr_offset(SCR_ERROR);
@@ -2113,9 +2132,10 @@
 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
 }
 
-static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
-		      unsigned int board_idx)
+static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 {
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct mv_host_priv *hpriv = host->private_data;
 	u8 rev_id;
 	u32 hp_flags = hpriv->hp_flags;
 
@@ -2213,8 +2233,8 @@
 
 /**
  *      mv_init_host - Perform some early initialization of the host.
- *	@pdev: host PCI device
- *      @probe_ent: early data struct representing the host
+ *	@host: ATA host to initialize
+ *      @board_idx: controller index
  *
  *      If possible, do an early global reset of the host.  Then do
  *      our port init and clear/unmask all/relevant host interrupts.
@@ -2222,24 +2242,23 @@
  *      LOCKING:
  *      Inherited from caller.
  */
-static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
-			unsigned int board_idx)
+static int mv_init_host(struct ata_host *host, unsigned int board_idx)
 {
 	int rc = 0, n_hc, port, hc;
-	void __iomem *mmio = probe_ent->mmio_base;
-	struct mv_host_priv *hpriv = probe_ent->private_data;
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+	struct mv_host_priv *hpriv = host->private_data;
 
 	/* global interrupt mask */
 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
 
-	rc = mv_chip_id(pdev, hpriv, board_idx);
+	rc = mv_chip_id(host, board_idx);
 	if (rc)
 		goto done;
 
-	n_hc = mv_get_hc_count(probe_ent->host_flags);
-	probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
+	n_hc = mv_get_hc_count(host->ports[0]->flags);
 
-	for (port = 0; port < probe_ent->n_ports; port++)
+	for (port = 0; port < host->n_ports; port++)
 		hpriv->ops->read_preamp(hpriv, port, mmio);
 
 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
@@ -2250,7 +2269,7 @@
 	hpriv->ops->reset_bus(pdev, mmio);
 	hpriv->ops->enable_leds(hpriv, mmio);
 
-	for (port = 0; port < probe_ent->n_ports; port++) {
+	for (port = 0; port < host->n_ports; port++) {
 		if (IS_60XX(hpriv)) {
 			void __iomem *port_mmio = mv_port_base(mmio, port);
 
@@ -2263,9 +2282,9 @@
 		hpriv->ops->phy_errata(hpriv, mmio, port);
 	}
 
-	for (port = 0; port < probe_ent->n_ports; port++) {
+	for (port = 0; port < host->n_ports; port++) {
 		void __iomem *port_mmio = mv_port_base(mmio, port);
-		mv_port_init(&probe_ent->port[port], port_mmio);
+		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
 	}
 
 	for (hc = 0; hc < n_hc; hc++) {
@@ -2285,7 +2304,11 @@
 
 	/* and unmask interrupt generation for host regs */
 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
-	writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
+
+	if (IS_50XX(hpriv))
+		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
+	else
+		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
 
 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
 		"PCI int cause/mask=0x%08x/0x%08x\n",
@@ -2300,17 +2323,17 @@
 
 /**
  *      mv_print_info - Dump key info to kernel log for perusal.
- *      @probe_ent: early data struct representing the host
+ *      @host: ATA host to print info about
  *
  *      FIXME: complete this.
  *
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_print_info(struct ata_probe_ent *probe_ent)
+static void mv_print_info(struct ata_host *host)
 {
-	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-	struct mv_host_priv *hpriv = probe_ent->private_data;
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct mv_host_priv *hpriv = host->private_data;
 	u8 rev_id, scc;
 	const char *scc_s;
 
@@ -2329,7 +2352,7 @@
 
 	dev_printk(KERN_INFO, &pdev->dev,
 	       "%u slots %u ports %s mode IRQ via %s\n",
-	       (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
+	       (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
 }
 
@@ -2344,110 +2367,60 @@
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version = 0;
-	struct ata_probe_ent *probe_ent = NULL;
-	struct mv_host_priv *hpriv;
 	unsigned int board_idx = (unsigned int)ent->driver_data;
-	void __iomem *mmio_base;
-	int pci_dev_busy = 0, rc;
+	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
+	struct ata_host *host;
+	struct mv_host_priv *hpriv;
+	int n_ports, rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		return rc;
-	}
-	pci_set_master(pdev);
-
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
-
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
+	/* allocate host */
+	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
 
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+	host->private_data = hpriv;
 
-	mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-
-	hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv) {
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
-	memset(hpriv, 0, sizeof(*hpriv));
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
 
-	probe_ent->sht = mv_port_info[board_idx].sht;
-	probe_ent->host_flags = mv_port_info[board_idx].host_flags;
-	probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
-	probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
-	probe_ent->port_ops = mv_port_info[board_idx].port_ops;
+	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
 
-	probe_ent->irq = pdev->irq;
-	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-	probe_ent->private_data = hpriv;
+	rc = pci_go_64(pdev);
+	if (rc)
+		return rc;
 
 	/* initialize adapter */
-	rc = mv_init_host(pdev, probe_ent, board_idx);
-	if (rc) {
-		goto err_out_hpriv;
-	}
+	rc = mv_init_host(host, board_idx);
+	if (rc)
+		return rc;
 
 	/* Enable interrupts */
-	if (msi && pci_enable_msi(pdev) == 0) {
-		hpriv->hp_flags |= MV_HP_FLAG_MSI;
-	} else {
+	if (msi && pci_enable_msi(pdev))
 		pci_intx(pdev, 1);
-	}
 
 	mv_dump_pci_cfg(pdev, 0x68);
-	mv_print_info(probe_ent);
-
-	if (ata_device_add(probe_ent) == 0) {
-		rc = -ENODEV;		/* No devices discovered */
-		goto err_out_dev_add;
-	}
-
-	kfree(probe_ent);
-	return 0;
+	mv_print_info(host);
 
-err_out_dev_add:
-	if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
-		pci_disable_msi(pdev);
-	} else {
-		pci_intx(pdev, 0);
-	}
-err_out_hpriv:
-	kfree(hpriv);
-err_out_iounmap:
-	pci_iounmap(pdev, mmio_base);
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy) {
-		pci_disable_device(pdev);
-	}
-
-	return rc;
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
+				 &mv_sht);
 }
 
 static int __init mv_init(void)
 {
-	return pci_module_init(&mv_pci_driver);
+	return pci_register_driver(&mv_pci_driver);
 }
 
 static void __exit mv_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_nv.c linux-2.6.18.x86_64.p4/drivers/ata/sata_nv.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_nv.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_nv.c	2007-06-06 10:08:00.000000000 -0400
@@ -29,6 +29,11 @@
  *  NV-specific details such as register offsets, SATA phy location,
  *  hotplug info, etc.
  *
+ *  CK804/MCP04 controllers support an alternate programming interface
+ *  similar to the ADMA specification (with some modifications).
+ *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
+ *  sent through the legacy interface.
+ *
  */
 
 #include <linux/kernel.h>
@@ -40,12 +45,17 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
 #include <linux/libata.h>
 
 #define DRV_NAME			"sata_nv"
-#define DRV_VERSION			"2.0"
+#define DRV_VERSION			"3.4"
+
+#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
 
 enum {
+	NV_MMIO_BAR			= 5,
+
 	NV_PORTS			= 2,
 	NV_PIO_MASK			= 0x1f,
 	NV_MWDMA_MASK			= 0x07,
@@ -78,16 +88,154 @@
 	// For PCI config register 20
 	NV_MCP_SATA_CFG_20		= 0x50,
 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
+	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
+	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
+	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
+	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
+
+	NV_ADMA_MAX_CPBS		= 32,
+	NV_ADMA_CPB_SZ			= 128,
+	NV_ADMA_APRD_SZ			= 16,
+	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
+					   NV_ADMA_APRD_SZ,
+	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
+	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
+	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
+					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
+
+	/* BAR5 offset to ADMA general registers */
+	NV_ADMA_GEN			= 0x400,
+	NV_ADMA_GEN_CTL			= 0x00,
+	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
+
+	/* BAR5 offset to ADMA ports */
+	NV_ADMA_PORT			= 0x480,
+
+	/* size of ADMA port register space  */
+	NV_ADMA_PORT_SIZE		= 0x100,
+
+	/* ADMA port registers */
+	NV_ADMA_CTL			= 0x40,
+	NV_ADMA_CPB_COUNT		= 0x42,
+	NV_ADMA_NEXT_CPB_IDX		= 0x43,
+	NV_ADMA_STAT			= 0x44,
+	NV_ADMA_CPB_BASE_LOW		= 0x48,
+	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
+	NV_ADMA_APPEND			= 0x50,
+	NV_ADMA_NOTIFIER		= 0x68,
+	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
+
+	/* NV_ADMA_CTL register bits */
+	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
+	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
+	NV_ADMA_CTL_GO			= (1 << 7),
+	NV_ADMA_CTL_AIEN		= (1 << 8),
+	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
+	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
+
+	/* CPB response flag bits */
+	NV_CPB_RESP_DONE		= (1 << 0),
+	NV_CPB_RESP_ATA_ERR		= (1 << 3),
+	NV_CPB_RESP_CMD_ERR		= (1 << 4),
+	NV_CPB_RESP_CPB_ERR		= (1 << 7),
+
+	/* CPB control flag bits */
+	NV_CPB_CTL_CPB_VALID		= (1 << 0),
+	NV_CPB_CTL_QUEUE		= (1 << 1),
+	NV_CPB_CTL_APRD_VALID		= (1 << 2),
+	NV_CPB_CTL_IEN			= (1 << 3),
+	NV_CPB_CTL_FPDMA		= (1 << 4),
+
+	/* APRD flags */
+	NV_APRD_WRITE			= (1 << 1),
+	NV_APRD_END			= (1 << 2),
+	NV_APRD_CONT			= (1 << 3),
+
+	/* NV_ADMA_STAT flags */
+	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
+	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
+	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
+	NV_ADMA_STAT_CPBERR		= (1 << 4),
+	NV_ADMA_STAT_SERROR		= (1 << 5),
+	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
+	NV_ADMA_STAT_IDLE		= (1 << 8),
+	NV_ADMA_STAT_LEGACY		= (1 << 9),
+	NV_ADMA_STAT_STOPPED		= (1 << 10),
+	NV_ADMA_STAT_DONE		= (1 << 12),
+	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
+	 				  NV_ADMA_STAT_TIMEOUT,
+
+	/* port flags */
+	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
+	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
+
+};
+
+/* ADMA Physical Region Descriptor - one SG segment */
+struct nv_adma_prd {
+	__le64			addr;
+	__le32			len;
+	u8			flags;
+	u8			packet_len;
+	__le16			reserved;
+};
+
+enum nv_adma_regbits {
+	CMDEND	= (1 << 15),		/* end of command list */
+	WNB	= (1 << 14),		/* wait-not-BSY */
+	IGN	= (1 << 13),		/* ignore this entry */
+	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
+	DA2	= (1 << (2 + 8)),
+	DA1	= (1 << (1 + 8)),
+	DA0	= (1 << (0 + 8)),
+};
+
+/* ADMA Command Parameter Block
+   The first 5 SG segments are stored inside the Command Parameter Block itself.
+   If there are more than 5 segments the remainder are stored in a separate
+   memory area indicated by next_aprd. */
+struct nv_adma_cpb {
+	u8			resp_flags;    /* 0 */
+	u8			reserved1;     /* 1 */
+	u8			ctl_flags;     /* 2 */
+	/* len is length of taskfile in 64 bit words */
+ 	u8			len;           /* 3  */
+	u8			tag;           /* 4 */
+	u8			next_cpb_idx;  /* 5 */
+	__le16			reserved2;     /* 6-7 */
+	__le16			tf[12];        /* 8-31 */
+	struct nv_adma_prd	aprd[5];       /* 32-111 */
+	__le64			next_aprd;     /* 112-119 */
+	__le64			reserved3;     /* 120-127 */
 };
 
+
+struct nv_adma_port_priv {
+	struct nv_adma_cpb	*cpb;
+	dma_addr_t		cpb_dma;
+	struct nv_adma_prd	*aprd;
+	dma_addr_t		aprd_dma;
+	void __iomem *		ctl_block;
+	void __iomem *		gen_block;
+	void __iomem *		notifier_clear_block;
+	u8			flags;
+	int			last_issue_ncq;
+};
+
+struct nv_host_priv {
+	unsigned long		type;
+};
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+
 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static void nv_ck804_host_stop(struct ata_host_set *host_set);
-static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
-					struct pt_regs *regs);
-static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
-				    struct pt_regs *regs);
-static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
-				      struct pt_regs *regs);
+#ifdef CONFIG_PM
+static int nv_pci_device_resume(struct pci_dev *pdev);
+#endif
+static void nv_ck804_host_stop(struct ata_host *host);
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 
@@ -96,61 +244,61 @@
 static void nv_ck804_freeze(struct ata_port *ap);
 static void nv_ck804_thaw(struct ata_port *ap);
 static void nv_error_handler(struct ata_port *ap);
+static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
+static void nv_adma_irq_clear(struct ata_port *ap);
+static int nv_adma_port_start(struct ata_port *ap);
+static void nv_adma_port_stop(struct ata_port *ap);
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_adma_port_resume(struct ata_port *ap);
+#endif
+static void nv_adma_freeze(struct ata_port *ap);
+static void nv_adma_thaw(struct ata_port *ap);
+static void nv_adma_error_handler(struct ata_port *ap);
+static void nv_adma_host_stop(struct ata_host *host);
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 
 enum nv_host_type
 {
 	GENERIC,
 	NFORCE2,
 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
-	CK804
+	CK804,
+	ADMA
 };
 
 static const struct pci_device_id nv_pci_tbl[] = {
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
-		PCI_ANY_ID, PCI_ANY_ID,
-		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
-	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
-		PCI_ANY_ID, PCI_ANY_ID,
-		PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
-	{ 0, } /* terminate list */
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
+
+	{ } /* terminate list */
 };
 
 static struct pci_driver nv_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= nv_pci_tbl,
 	.probe			= nv_init_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= nv_pci_device_resume,
+#endif
 	.remove			= ata_pci_remove_one,
 };
 
@@ -172,6 +320,24 @@
 	.bios_param		= ata_std_bios_param,
 };
 
+static struct scsi_host_template nv_adma_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= NV_ADMA_MAX_CPBS,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
+	.slave_configure	= nv_adma_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
 static const struct ata_port_operations nv_generic_ops = {
 	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
@@ -189,14 +355,13 @@
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= nv_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_pio_data_xfer,
-	.irq_handler		= nv_generic_interrupt,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_pci_host_stop,
 };
 
 static const struct ata_port_operations nv_nf2_ops = {
@@ -216,14 +381,13 @@
 	.thaw			= nv_nf2_thaw,
 	.error_handler		= nv_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_pio_data_xfer,
-	.irq_handler		= nv_nf2_interrupt,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_pci_host_stop,
 };
 
 static const struct ata_port_operations nv_ck804_ops = {
@@ -243,43 +407,94 @@
 	.thaw			= nv_ck804_thaw,
 	.error_handler		= nv_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.data_xfer		= ata_pio_data_xfer,
-	.irq_handler		= nv_ck804_interrupt,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
 	.host_stop		= nv_ck804_host_stop,
 };
 
-static struct ata_port_info nv_port_info[] = {
+static const struct ata_port_operations nv_adma_ops = {
+	.port_disable		= ata_port_disable,
+	.tf_load		= ata_tf_load,
+	.tf_read		= nv_adma_tf_read,
+	.check_atapi_dma	= nv_adma_check_atapi_dma,
+	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
+	.dev_select		= ata_std_dev_select,
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= nv_adma_qc_prep,
+	.qc_issue		= nv_adma_qc_issue,
+	.freeze			= nv_adma_freeze,
+	.thaw			= nv_adma_thaw,
+	.error_handler		= nv_adma_error_handler,
+	.post_internal_cmd	= nv_adma_post_internal_cmd,
+	.data_xfer		= ata_data_xfer,
+	.irq_clear		= nv_adma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+	.scr_read		= nv_scr_read,
+	.scr_write		= nv_scr_write,
+	.port_start		= nv_adma_port_start,
+	.port_stop		= nv_adma_port_stop,
+#ifdef CONFIG_PM
+	.port_suspend		= nv_adma_port_suspend,
+	.port_resume		= nv_adma_port_resume,
+#endif
+	.host_stop		= nv_adma_host_stop,
+};
+
+static const struct ata_port_info nv_port_info[] = {
 	/* generic */
 	{
 		.sht		= &nv_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_generic_ops,
+		.irq_handler	= nv_generic_interrupt,
 	},
 	/* nforce2/3 */
 	{
 		.sht		= &nv_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_nf2_ops,
+		.irq_handler	= nv_nf2_interrupt,
 	},
 	/* ck804 */
 	{
 		.sht		= &nv_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_ck804_ops,
+		.irq_handler	= nv_ck804_interrupt,
+	},
+	/* ADMA */
+	{
+		.sht		= &nv_adma_sht,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_HRST_TO_RESUME |
+				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_adma_ops,
+		.irq_handler	= nv_adma_interrupt,
 	},
 };
 
@@ -289,44 +504,265 @@
 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
-					struct pt_regs *regs)
+static int adma_enabled = 1;
+
+static void nv_adma_register_mode(struct ata_port *ap)
 {
-	struct ata_host_set *host_set = dev_instance;
-	unsigned int i;
-	unsigned int handled = 0;
-	unsigned long flags;
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp, status;
+	int count = 0;
 
-	spin_lock_irqsave(&host_set->lock, flags);
+	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+		return;
 
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap;
+	status = readw(mmio + NV_ADMA_STAT);
+	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
+	}
+	if(count == 20)
+		ata_port_printk(ap, KERN_WARNING,
+			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
+			status);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	count = 0;
+	status = readw(mmio + NV_ADMA_STAT);
+	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
+	}
+	if(count == 20)
+		ata_port_printk(ap, KERN_WARNING,
+			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
+			 status);
 
-		ap = host_set->ports[i];
-		if (ap &&
-		    !(ap->flags & ATA_FLAG_DISABLED)) {
-			struct ata_queued_cmd *qc;
+	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+}
 
-			qc = ata_qc_from_tag(ap, ap->active_tag);
-			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
-				handled += ata_host_intr(ap, qc);
-			else
-				// No request pending?  Clear interrupt status
-				// anyway, in case there's one pending.
-				ap->ops->check_status(ap);
-		}
+static void nv_adma_mode(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp, status;
+	int count = 0;
 
+	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
+		return;
+
+	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+	status = readw(mmio + NV_ADMA_STAT);
+	while(((status & NV_ADMA_STAT_LEGACY) ||
+	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
+		ndelay(50);
+		status = readw(mmio + NV_ADMA_STAT);
+		count++;
 	}
+	if(count == 20)
+		ata_port_printk(ap, KERN_WARNING,
+			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
+			status);
 
-	spin_unlock_irqrestore(&host_set->lock, flags);
+	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
+}
 
-	return IRQ_RETVAL(handled);
+static int nv_adma_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct nv_adma_port_priv *pp = ap->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u64 bounce_limit;
+	unsigned long segment_boundary;
+	unsigned short sg_tablesize;
+	int rc;
+	int adma_enable;
+	u32 current_reg, new_reg, config_mask;
+
+	rc = ata_scsi_slave_config(sdev);
+
+	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+		/* Not a proper libata device, ignore */
+		return rc;
+
+	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
+		/*
+		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
+		 * Therefore ATAPI commands are sent through the legacy interface.
+		 * However, the legacy interface only supports 32-bit DMA.
+		 * Restrict DMA parameters as required by the legacy interface
+		 * when an ATAPI device is connected.
+		 */
+		bounce_limit = ATA_DMA_MASK;
+		segment_boundary = ATA_DMA_BOUNDARY;
+		/* Subtract 1 since an extra entry may be needed for padding, see
+		   libata-scsi.c */
+		sg_tablesize = LIBATA_MAX_PRD - 1;
+
+		/* Since the legacy DMA engine is in use, we need to disable ADMA
+		   on the port. */
+		adma_enable = 0;
+		nv_adma_register_mode(ap);
+	}
+	else {
+		bounce_limit = *ap->dev->dma_mask;
+		segment_boundary = NV_ADMA_DMA_BOUNDARY;
+		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
+		adma_enable = 1;
+	}
+
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
+
+	if(ap->port_no == 1)
+		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
+			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+	else
+		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
+			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
+
+	if(adma_enable) {
+		new_reg = current_reg | config_mask;
+		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
+	}
+	else {
+		new_reg = current_reg & ~config_mask;
+		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
+	}
+
+	if(current_reg != new_reg)
+		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
+
+	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
+	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
+	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
+	ata_port_printk(ap, KERN_INFO,
+		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
+	return rc;
+}
+
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+}
+
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	/* Since commands where a result TF is requested are not
+	   executed in ADMA mode, the only time this function will be called
+	   in ADMA mode will be if a command fails. In this case we
+	   don't care about going into register mode with ADMA commands
+	   pending, as the commands will all shortly be aborted anyway. */
+	nv_adma_register_mode(ap);
+
+	ata_tf_read(ap, tf);
+}
+
+static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
+{
+	unsigned int idx = 0;
+
+	if(tf->flags & ATA_TFLAG_ISADDR) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
+			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
+			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
+		} else
+			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
+
+		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
+		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
+	}
+
+	if(tf->flags & ATA_TFLAG_DEVICE)
+		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
+
+	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
+
+	while(idx < 12)
+		cpb[idx++] = cpu_to_le16(IGN);
+
+	return idx;
+}
+
+static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	u8 flags = pp->cpb[cpb_num].resp_flags;
+
+	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+
+	if (unlikely((force_err ||
+		     flags & (NV_CPB_RESP_ATA_ERR |
+			      NV_CPB_RESP_CMD_ERR |
+			      NV_CPB_RESP_CPB_ERR)))) {
+		struct ata_eh_info *ehi = &ap->eh_info;
+		int freeze = 0;
+
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
+		if (flags & NV_CPB_RESP_ATA_ERR) {
+			ata_ehi_push_desc(ehi, ": ATA error");
+			ehi->err_mask |= AC_ERR_DEV;
+		} else if (flags & NV_CPB_RESP_CMD_ERR) {
+			ata_ehi_push_desc(ehi, ": CMD error");
+			ehi->err_mask |= AC_ERR_DEV;
+		} else if (flags & NV_CPB_RESP_CPB_ERR) {
+			ata_ehi_push_desc(ehi, ": CPB error");
+			ehi->err_mask |= AC_ERR_SYSTEM;
+			freeze = 1;
+		} else {
+			/* notifier error, but no error in CPB flags? */
+			ehi->err_mask |= AC_ERR_OTHER;
+			freeze = 1;
+		}
+		/* Kill all commands. EH will determine what actually failed. */
+		if (freeze)
+			ata_port_freeze(ap);
+		else
+			ata_port_abort(ap);
+		return 1;
+	}
+
+	if (likely(flags & NV_CPB_RESP_DONE)) {
+		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
+		VPRINTK("CPB flags done, flags=0x%x\n", flags);
+		if (likely(qc)) {
+			DPRINTK("Completing qc from tag %d\n",cpb_num);
+			ata_qc_complete(qc);
+		} else {
+			struct ata_eh_info *ehi = &ap->eh_info;
+			/* Notifier bits set without a command may indicate the drive
+			   is misbehaving. Raise host state machine violation on this
+			   condition. */
+			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
+				cpb_num);
+			ehi->err_mask |= AC_ERR_HSM;
+			ehi->action |= ATA_EH_SOFTRESET;
+			ata_port_freeze(ap);
+			return 1;
+		}
+	}
+	return 0;
 }
 
 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 {
 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
-	int handled;
 
 	/* freeze if hotplugged */
 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
@@ -345,21 +781,577 @@
 	}
 
 	/* handle interrupt */
-	handled = ata_host_intr(ap, qc);
-	if (unlikely(!handled)) {
-		/* spurious, clear it */
-		ata_check_status(ap);
+	return ata_host_intr(ap, qc);
+}
+
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
+{
+	struct ata_host *host = dev_instance;
+	int i, handled = 0;
+	u32 notifier_clears[2];
+
+	spin_lock(&host->lock);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		notifier_clears[i] = 0;
+
+		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+			struct nv_adma_port_priv *pp = ap->private_data;
+			void __iomem *mmio = pp->ctl_block;
+			u16 status;
+			u32 gen_ctl;
+			u32 notifier, notifier_error;
+
+			/* if ADMA is disabled, use standard ata interrupt handler */
+			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+					>> (NV_INT_PORT_SHIFT * i);
+				handled += nv_host_intr(ap, irq_stat);
+				continue;
+			}
+
+			/* if in ATA register mode, check for standard interrupts */
+			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+					>> (NV_INT_PORT_SHIFT * i);
+				if(ata_tag_valid(ap->active_tag))
+					/** NV_INT_DEV indication seems unreliable at times
+					    at least in ADMA mode. Force it on always when a
+					    command is active, to prevent losing interrupts. */
+					irq_stat |= NV_INT_DEV;
+				handled += nv_host_intr(ap, irq_stat);
+			}
+
+			notifier = readl(mmio + NV_ADMA_NOTIFIER);
+			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+			notifier_clears[i] = notifier | notifier_error;
+
+			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+			    !notifier_error)
+				/* Nothing to do */
+				continue;
+
+			status = readw(mmio + NV_ADMA_STAT);
+
+			/* Clear status. Ensure the controller sees the clearing before we start
+			   looking at any of the CPB statuses, so that any CPB completions after
+			   this point in the handler will raise another interrupt. */
+			writew(status, mmio + NV_ADMA_STAT);
+			readw(mmio + NV_ADMA_STAT); /* flush posted write */
+			rmb();
+
+			handled++; /* irq handled if we got here */
+
+			/* freeze if hotplugged or controller error */
+			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+					       NV_ADMA_STAT_HOTUNPLUG |
+					       NV_ADMA_STAT_TIMEOUT |
+					       NV_ADMA_STAT_SERROR))) {
+				struct ata_eh_info *ehi = &ap->eh_info;
+
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
+				if (status & NV_ADMA_STAT_TIMEOUT) {
+					ehi->err_mask |= AC_ERR_SYSTEM;
+					ata_ehi_push_desc(ehi, ": timeout");
+				} else if (status & NV_ADMA_STAT_HOTPLUG) {
+					ata_ehi_hotplugged(ehi);
+					ata_ehi_push_desc(ehi, ": hotplug");
+				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+					ata_ehi_hotplugged(ehi);
+					ata_ehi_push_desc(ehi, ": hot unplug");
+				} else if (status & NV_ADMA_STAT_SERROR) {
+					/* let libata analyze SError and figure out the cause */
+					ata_ehi_push_desc(ehi, ": SError");
+				}
+				ata_port_freeze(ap);
+				continue;
+			}
+
+			if (status & (NV_ADMA_STAT_DONE |
+				      NV_ADMA_STAT_CPBERR)) {
+				u32 check_commands;
+				int pos, error = 0;
+
+				if(ata_tag_valid(ap->active_tag))
+					check_commands = 1 << ap->active_tag;
+				else
+					check_commands = ap->sactive;
+
+				/** Check CPBs for completed commands */
+				while ((pos = ffs(check_commands)) && !error) {
+					pos--;
+					error = nv_adma_check_cpb(ap, pos,
+						notifier_error & (1 << pos) );
+					check_commands &= ~(1 << pos );
+				}
+			}
+		}
+	}
+
+	if(notifier_clears[0] || notifier_clears[1]) {
+		/* Note: Both notifier clear registers must be written
+		   if either is set, even if one is zero, according to NVIDIA. */
+		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
+		writel(notifier_clears[0], pp->notifier_clear_block);
+		pp = host->ports[1]->private_data;
+		writel(notifier_clears[1], pp->notifier_clear_block);
+	}
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+static void nv_adma_freeze(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	nv_ck804_freeze(ap);
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+		return;
+
+	/* clear any outstanding CK804 notifications */
+	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+	/* Disable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+		mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+}
+
+static void nv_adma_thaw(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	nv_ck804_thaw(ap);
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+		return;
+
+	/* Enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+		mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+}
+
+static void nv_adma_irq_clear(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u32 notifier_clears[2];
+
+	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+		ata_bmdma_irq_clear(ap);
+		return;
+	}
+
+	/* clear any outstanding CK804 notifications */
+	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+	/* clear ADMA status */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* clear notifiers - note both ports need to be written with
+	   something even though we are only clearing on one */
+	if (ap->port_no == 0) {
+		notifier_clears[0] = 0xFFFFFFFF;
+		notifier_clears[1] = 0;
+	} else {
+		notifier_clears[0] = 0;
+		notifier_clears[1] = 0xFFFFFFFF;
+	}
+	pp = ap->host->ports[0]->private_data;
+	writel(notifier_clears[0], pp->notifier_clear_block);
+	pp = ap->host->ports[1]->private_data;
+	writel(notifier_clears[1], pp->notifier_clear_block);
+}
+
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+		ata_bmdma_post_internal_cmd(qc);
+}
+
+static int nv_adma_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct nv_adma_port_priv *pp;
+	int rc;
+	void *mem;
+	dma_addr_t mem_dma;
+	void __iomem *mmio;
+	u16 tmp;
+
+	VPRINTK("ENTER\n");
+
+	rc = ata_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
+	       ap->port_no * NV_ADMA_PORT_SIZE;
+	pp->ctl_block = mmio;
+	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
+	pp->notifier_clear_block = pp->gen_block +
+	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
+
+	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
+				  &mem_dma, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
+
+	/*
+	 * First item in chunk of DMA memory:
+	 * 128-byte command parameter block (CPB)
+	 * one for each command tag
+	 */
+	pp->cpb     = mem;
+	pp->cpb_dma = mem_dma;
+
+	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
+	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+
+	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+
+	/*
+	 * Second item: block of ADMA_SGTBL_LEN s/g entries
+	 */
+	pp->aprd = mem;
+	pp->aprd_dma = mem_dma;
+
+	ap->private_data = pp;
+
+	/* clear any outstanding interrupt conditions */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* initialize port variables */
+	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* clear GO for register mode, enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	udelay(1);
+	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+
+	return 0;
+}
+
+static void nv_adma_port_stop(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+
+	VPRINTK("ENTER\n");
+	writew(0, mmio + NV_ADMA_CTL);
+}
+
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+
+	/* Go to register mode - clears GO */
+	nv_adma_register_mode(ap);
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* disable interrupt, shut down port */
+	writew(0, mmio + NV_ADMA_CTL);
+
+	return 0;
+}
+
+static int nv_adma_port_resume(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	u16 tmp;
+
+	/* set CPB block location */
+	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
+	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+
+	/* clear any outstanding interrupt conditions */
+	writew(0xffff, mmio + NV_ADMA_STAT);
+
+	/* initialize port variables */
+	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+
+	/* clear CPB fetch count */
+	writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+	/* clear GO for register mode, enable interrupt */
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+	tmp = readw(mmio + NV_ADMA_CTL);
+	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	udelay(1);
+	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+
+	return 0;
+}
+#endif
+
+static void nv_adma_setup_port(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	struct ata_ioports *ioport = &ap->ioaddr;
+
+	VPRINTK("ENTER\n");
+
+	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
+
+	ioport->cmd_addr	= mmio;
+	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
+	ioport->error_addr	=
+	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
+	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
+	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
+	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
+	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
+	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
+	ioport->status_addr	=
+	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
+	ioport->altstatus_addr	=
+	ioport->ctl_addr	= mmio + 0x20;
+}
+
+static int nv_adma_host_init(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	unsigned int i;
+	u32 tmp32;
+
+	VPRINTK("ENTER\n");
+
+	/* enable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
+		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_EN |
+		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+	for (i = 0; i < host->n_ports; i++)
+		nv_adma_setup_port(host->ports[i]);
+
+	return 0;
+}
+
+static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
+			      struct scatterlist *sg,
+			      int idx,
+			      struct nv_adma_prd *aprd)
+{
+	u8 flags = 0;
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		flags |= NV_APRD_WRITE;
+	if (idx == qc->n_elem - 1)
+		flags |= NV_APRD_END;
+	else if (idx != 4)
+		flags |= NV_APRD_CONT;
+
+	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
+	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
+	aprd->flags = flags;
+	aprd->packet_len = 0;
+}
+
+static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	unsigned int idx;
+	struct nv_adma_prd *aprd;
+	struct scatterlist *sg;
+
+	VPRINTK("ENTER\n");
+
+	idx = 0;
+
+	ata_for_each_sg(sg, qc) {
+		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+		nv_adma_fill_aprd(qc, sg, idx, aprd);
+		idx++;
 	}
+	if (idx > 5)
+		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
+	else
+		cpb->next_aprd = cpu_to_le64(0);
+}
+
+static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+	/* ADMA engine can only be used for non-ATAPI DMA commands,
+	   or interrupt-driven no-data commands, where a result taskfile
+	   is not required. */
+	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+	   (qc->tf.flags & ATA_TFLAG_POLLING) ||
+	   (qc->flags & ATA_QCFLAG_RESULT_TF))
+		return 1;
+
+	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
+	   (qc->tf.protocol == ATA_PROT_NODATA))
+		return 0;
 
 	return 1;
 }
 
-static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
+	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
+		       NV_CPB_CTL_IEN;
+
+	if (nv_adma_use_reg_mode(qc)) {
+		nv_adma_register_mode(qc->ap);
+		ata_qc_prep(qc);
+		return;
+	}
+
+	cpb->resp_flags = NV_CPB_RESP_DONE;
+	wmb();
+	cpb->ctl_flags = 0;
+	wmb();
+
+	cpb->len		= 3;
+	cpb->tag		= qc->tag;
+	cpb->next_cpb_idx	= 0;
+
+	/* turn on NCQ flags for NCQ commands */
+	if (qc->tf.protocol == ATA_PROT_NCQ)
+		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
+
+	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
+
+	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
+
+	if(qc->flags & ATA_QCFLAG_DMAMAP) {
+		nv_adma_fill_sg(qc, cpb);
+		ctl_flags |= NV_CPB_CTL_APRD_VALID;
+	} else
+		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
+
+	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
+	   finished filling in all of the contents */
+	wmb();
+	cpb->ctl_flags = ctl_flags;
+	wmb();
+	cpb->resp_flags = 0;
+}
+
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct nv_adma_port_priv *pp = qc->ap->private_data;
+	void __iomem *mmio = pp->ctl_block;
+	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
+
+	VPRINTK("ENTER\n");
+
+	if (nv_adma_use_reg_mode(qc)) {
+		/* use ATA register mode */
+		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
+		nv_adma_register_mode(qc->ap);
+		return ata_qc_issue_prot(qc);
+	} else
+		nv_adma_mode(qc->ap);
+
+	/* write append register, command tag in lower 8 bits
+	   and (number of cpbs to append -1) in top 8 bits */
+	wmb();
+
+	if(curr_ncq != pp->last_issue_ncq) {
+	   	/* Seems to need some delay before switching between NCQ and non-NCQ
+		   commands, else we get command timeouts and such. */
+		udelay(20);
+		pp->last_issue_ncq = curr_ncq;
+	}
+
+	writew(qc->tag, mmio + NV_ADMA_APPEND);
+
+	DPRINTK("Issued tag %u\n",qc->tag);
+
+	return 0;
+}
+
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap;
+
+		ap = host->ports[i];
+		if (ap &&
+		    !(ap->flags & ATA_FLAG_DISABLED)) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->active_tag);
+			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+				handled += ata_host_intr(ap, qc);
+			else
+				// No request pending?  Clear interrupt status
+				// anyway, in case there's one pending.
+				ap->ops->check_status(ap);
+		}
+
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
 {
 	int i, handled = 0;
 
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
 			handled += nv_host_intr(ap, irq_stat);
@@ -370,32 +1362,30 @@
 	return IRQ_RETVAL(handled);
 }
 
-static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
-				    struct pt_regs *regs)
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	u8 irq_stat;
 	irqreturn_t ret;
 
-	spin_lock(&host_set->lock);
-	irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
-	ret = nv_do_interrupt(host_set, irq_stat);
-	spin_unlock(&host_set->lock);
+	spin_lock(&host->lock);
+	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
+	ret = nv_do_interrupt(host, irq_stat);
+	spin_unlock(&host->lock);
 
 	return ret;
 }
 
-static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
-				      struct pt_regs *regs)
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	u8 irq_stat;
 	irqreturn_t ret;
 
-	spin_lock(&host_set->lock);
-	irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
-	ret = nv_do_interrupt(host_set, irq_stat);
-	spin_unlock(&host_set->lock);
+	spin_lock(&host->lock);
+	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+	ret = nv_do_interrupt(host, irq_stat);
+	spin_unlock(&host->lock);
 
 	return ret;
 }
@@ -405,7 +1395,7 @@
 	if (sc_reg > SCR_CONTROL)
 		return 0xffffffffU;
 
-	return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
+	return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -413,36 +1403,36 @@
 	if (sc_reg > SCR_CONTROL)
 		return;
 
-	iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
+	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 static void nv_nf2_freeze(struct ata_port *ap)
 {
-	unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
 	u8 mask;
 
-	mask = inb(scr_addr + NV_INT_ENABLE);
+	mask = ioread8(scr_addr + NV_INT_ENABLE);
 	mask &= ~(NV_INT_ALL << shift);
-	outb(mask, scr_addr + NV_INT_ENABLE);
+	iowrite8(mask, scr_addr + NV_INT_ENABLE);
 }
 
 static void nv_nf2_thaw(struct ata_port *ap)
 {
-	unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
 	u8 mask;
 
-	outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
+	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
 
-	mask = inb(scr_addr + NV_INT_ENABLE);
+	mask = ioread8(scr_addr + NV_INT_ENABLE);
 	mask |= (NV_INT_MASK << shift);
-	outb(mask, scr_addr + NV_INT_ENABLE);
+	iowrite8(mask, scr_addr + NV_INT_ENABLE);
 }
 
 static void nv_ck804_freeze(struct ata_port *ap)
 {
-	void __iomem *mmio_base = ap->host_set->mmio_base;
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
 	u8 mask;
 
@@ -453,7 +1443,7 @@
 
 static void nv_ck804_thaw(struct ata_port *ap)
 {
-	void __iomem *mmio_base = ap->host_set->mmio_base;
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
 	u8 mask;
 
@@ -464,7 +1454,8 @@
 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
 }
 
-static int nv_hardreset(struct ata_port *ap, unsigned int *class)
+static int nv_hardreset(struct ata_port *ap, unsigned int *class,
+			unsigned long deadline)
 {
 	unsigned int dummy;
 
@@ -472,7 +1463,7 @@
 	 * some controllers.  Don't classify on hardreset.  For more
 	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
 	 */
-	return sata_std_hardreset(ap, &dummy);
+	return sata_std_hardreset(ap, &dummy, deadline);
 }
 
 static void nv_error_handler(struct ata_port *ap)
@@ -481,15 +1472,71 @@
 			   nv_hardreset, ata_std_postreset);
 }
 
+static void nv_adma_error_handler(struct ata_port *ap)
+{
+	struct nv_adma_port_priv *pp = ap->private_data;
+	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+		void __iomem *mmio = pp->ctl_block;
+		int i;
+		u16 tmp;
+
+		if(ata_tag_valid(ap->active_tag) || ap->sactive) {
+			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+			u32 status = readw(mmio + NV_ADMA_STAT);
+			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
+			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
+
+			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
+				"next cpb count 0x%X next cpb idx 0x%x\n",
+				notifier, notifier_error, gen_ctl, status,
+				cpb_count, next_cpb_idx);
+
+			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+				struct nv_adma_cpb *cpb = &pp->cpb[i];
+				if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
+				    ap->sactive & (1 << i) )
+					ata_port_printk(ap, KERN_ERR,
+						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
+						i, cpb->ctl_flags, cpb->resp_flags);
+			}
+		}
+
+		/* Push us back into port register mode for error handling. */
+		nv_adma_register_mode(ap);
+
+		/* Mark all of the CPBs as invalid to prevent them from being executed */
+		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
+			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
+
+		/* clear CPB fetch count */
+		writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+		/* Reset channel */
+		tmp = readw(mmio + NV_ADMA_CTL);
+		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+		udelay(1);
+		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	}
+
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
+			   nv_hardreset, ata_std_postreset);
+}
+
 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version = 0;
-	struct ata_port_info *ppi;
-	struct ata_probe_ent *probe_ent;
-	int pci_dev_busy = 0;
+	const struct ata_port_info *ppi[] = { NULL, NULL };
+	struct ata_host *host;
+	struct nv_host_priv *hpriv;
 	int rc;
 	u32 bar;
-	unsigned long base;
+	void __iomem *base;
+	unsigned long type = ent->driver_data;
 
         // Make sure this is a SATA controller by counting the number of bars
         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
@@ -501,43 +1548,45 @@
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
-		goto err_out;
+		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out_disable;
+	/* determine type and allocate host */
+	if (type >= CK804 && adma_enabled) {
+		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+		type = ADMA;
 	}
 
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	ppi[0] = &nv_port_info[type];
+	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
 	if (rc)
-		goto err_out_regions;
+		return rc;
 
-	rc = -ENOMEM;
-
-	ppi = &nv_port_info[ent->driver_data];
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
-	if (!probe_ent)
-		goto err_out_regions;
-
-	probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
-	if (!probe_ent->mmio_base) {
-		rc = -EIO;
-		goto err_out_free_ent;
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	hpriv->type = type;
+	host->private_data = hpriv;
+
+	/* set 64bit dma masks, may fail */
+	if (type == ADMA) {
+		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
+			pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 	}
 
-	base = (unsigned long)probe_ent->mmio_base;
+	/* request and iomap NV_MMIO_BAR */
+	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
+	if (rc)
+		return rc;
 
-	probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
-	probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
+	/* configure SCR access */
+	base = host->iomap[NV_MMIO_BAR];
+	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
+	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
 
 	/* enable SATA space for CK804 */
-	if (ent->driver_data == CK804) {
+	if (type >= CK804) {
 		u8 regval;
 
 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
@@ -545,45 +1594,99 @@
 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
 	}
 
+	/* init ADMA */
+	if (type == ADMA) {
+		rc = nv_adma_host_init(host);
+		if (rc)
+			return rc;
+	}
+
 	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
+				 IRQF_SHARED, ppi[0]->sht);
+}
 
-	rc = ata_device_add(probe_ent);
-	if (rc != NV_PORTS)
-		goto err_out_iounmap;
+#ifdef CONFIG_PM
+static int nv_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	struct nv_host_priv *hpriv = host->private_data;
+	int rc;
 
-	kfree(probe_ent);
+	rc = ata_pci_device_do_resume(pdev);
+	if(rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		if(hpriv->type >= CK804) {
+			u8 regval;
+
+			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+		}
+		if(hpriv->type == ADMA) {
+			u32 tmp32;
+			struct nv_adma_port_priv *pp;
+			/* enable/disable ADMA on the ports appropriately */
+			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+
+			pp = host->ports[0]->private_data;
+			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+			else
+				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
+				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+			pp = host->ports[1]->private_data;
+			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
+				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+			else
+				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
+				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
 
-	return 0;
+			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+		}
+	}
 
-err_out_iounmap:
-	pci_iounmap(pdev, probe_ent->mmio_base);
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out_disable:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-err_out:
-	return rc;
+	ata_host_resume(host);
+
+	return 0;
 }
+#endif
 
-static void nv_ck804_host_stop(struct ata_host_set *host_set)
+static void nv_ck804_host_stop(struct ata_host *host)
 {
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(host->dev);
 	u8 regval;
 
 	/* disable SATA space for CK804 */
 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+}
+
+static void nv_adma_host_stop(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u32 tmp32;
+
+	/* disable ADMA on the ports */
+	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_EN |
+		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
 
-	ata_pci_host_stop(host_set);
+	nv_ck804_host_stop(host);
 }
 
 static int __init nv_init(void)
 {
-	return pci_module_init(&nv_pci_driver);
+	return pci_register_driver(&nv_pci_driver);
 }
 
 static void __exit nv_exit(void)
@@ -593,3 +1696,5 @@
 
 module_init(nv_init);
 module_exit(nv_exit);
+module_param_named(adma, adma_enabled, bool, 0444);
+MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_promise.c linux-2.6.18.x86_64.p4/drivers/ata/sata_promise.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_promise.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_promise.c	2007-06-06 10:08:00.000000000 -0400
@@ -37,47 +37,91 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/device.h>
+#include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 #include "sata_promise.h"
 
 #define DRV_NAME	"sata_promise"
-#define DRV_VERSION	"1.04"
+#define DRV_VERSION	"2.07"
 
 
 enum {
+	PDC_MAX_PORTS		= 4,
+	PDC_MMIO_BAR		= 3,
+
+	/* register offsets */
+	PDC_FEATURE		= 0x04, /* Feature/Error reg (per port) */
+	PDC_SECTOR_COUNT	= 0x08, /* Sector count reg (per port) */
+	PDC_SECTOR_NUMBER	= 0x0C, /* Sector number reg (per port) */
+	PDC_CYLINDER_LOW	= 0x10, /* Cylinder low reg (per port) */
+	PDC_CYLINDER_HIGH	= 0x14, /* Cylinder high reg (per port) */
+	PDC_DEVICE		= 0x18, /* Device/Head reg (per port) */
+	PDC_COMMAND		= 0x1C, /* Command/status reg (per port) */
+	PDC_ALTSTATUS		= 0x38, /* Alternate-status/device-control reg (per port) */
 	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
 	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
-	PDC_TBG_MODE		= 0x41,	/* TBG mode */
 	PDC_FLASH_CTL		= 0x44, /* Flash control register */
-	PDC_PCI_CTL		= 0x48, /* PCI control and status register */
 	PDC_GLOBAL_CTL		= 0x48, /* Global control/status (per port) */
 	PDC_CTLSTAT		= 0x60,	/* IDE control and status (per port) */
 	PDC_SATA_PLUG_CSR	= 0x6C, /* SATA Plug control/status reg */
 	PDC2_SATA_PLUG_CSR	= 0x60, /* SATAII Plug control/status reg */
-	PDC_SLEW_CTL		= 0x470, /* slew rate control reg */
+	PDC_TBG_MODE		= 0x41C, /* TBG mode (not SATAII) */
+	PDC_SLEW_CTL		= 0x470, /* slew rate control reg (not SATAII) */
 
-	PDC_ERR_MASK		= (1<<19) | (1<<20) | (1<<21) | (1<<22) |
-				  (1<<8) | (1<<9) | (1<<10),
+	/* PDC_GLOBAL_CTL bit definitions */
+	PDC_PH_ERR		= (1 <<  8), /* PCI error while loading packet */
+	PDC_SH_ERR		= (1 <<  9), /* PCI error while loading S/G table */
+	PDC_DH_ERR		= (1 << 10), /* PCI error while loading data */
+	PDC2_HTO_ERR		= (1 << 12), /* host bus timeout */
+	PDC2_ATA_HBA_ERR	= (1 << 13), /* error during SATA DATA FIS transmission */
+	PDC2_ATA_DMA_CNT_ERR	= (1 << 14), /* DMA DATA FIS size differs from S/G count */
+	PDC_OVERRUN_ERR		= (1 << 19), /* S/G byte count larger than HD requires */
+	PDC_UNDERRUN_ERR	= (1 << 20), /* S/G byte count less than HD requires */
+	PDC_DRIVE_ERR		= (1 << 21), /* drive error */
+	PDC_PCI_SYS_ERR		= (1 << 22), /* PCI system error */
+	PDC1_PCI_PARITY_ERR	= (1 << 23), /* PCI parity error (from SATA150 driver) */
+	PDC1_ERR_MASK		= PDC1_PCI_PARITY_ERR,
+	PDC2_ERR_MASK		= PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR,
+	PDC_ERR_MASK		= (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC_OVERRUN_ERR
+				   | PDC_UNDERRUN_ERR | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR
+				   | PDC1_ERR_MASK | PDC2_ERR_MASK),
 
 	board_2037x		= 0,	/* FastTrak S150 TX2plus */
-	board_20319		= 1,	/* FastTrak S150 TX4 */
-	board_20619		= 2,	/* FastTrak TX4000 */
-	board_20771		= 3,	/* FastTrak TX2300 */
+	board_2037x_pata	= 1,	/* FastTrak S150 TX2plus PATA port */
+	board_20319		= 2,	/* FastTrak S150 TX4 */
+	board_20619		= 3,	/* FastTrak TX4000 */
 	board_2057x		= 4,	/* SATAII150 Tx2plus */
-	board_40518		= 5,	/* SATAII150 Tx4 */
+	board_2057x_pata	= 5,	/* SATAII150 Tx2plus */
+	board_40518		= 6,	/* SATAII150 Tx4 */
 
 	PDC_HAS_PATA		= (1 << 1), /* PDC20375/20575 has PATA */
 
+	/* Sequence counter control registers bit definitions */
+	PDC_SEQCNTRL_INT_MASK	= (1 << 5), /* Sequence Interrupt Mask */
+
+	/* Feature register values */
+	PDC_FEATURE_ATAPI_PIO	= 0x00, /* ATAPI data xfer by PIO */
+	PDC_FEATURE_ATAPI_DMA	= 0x01, /* ATAPI data xfer by DMA */
+
+	/* Device/Head register values */
+	PDC_DEVICE_SATA		= 0xE0, /* Device/Head value for SATA devices */
+
+	/* PDC_CTLSTAT bit definitions */
+	PDC_DMA_ENABLE		= (1 << 7),
+	PDC_IRQ_DISABLE		= (1 << 10),
 	PDC_RESET		= (1 << 11), /* HDMA reset */
 
-	PDC_COMMON_FLAGS	= ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
-				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
+	PDC_COMMON_FLAGS	= ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO |
 				  ATA_FLAG_PIO_POLLING,
+
+	/* ap->flags bits */
+	PDC_FLAG_GEN_II		= (1 << 24),
+	PDC_FLAG_SATA_PATA	= (1 << 25), /* supports SATA + PATA */
+	PDC_FLAG_4_PORTS	= (1 << 26), /* 4 ports */
 };
 
 
@@ -86,26 +130,25 @@
 	dma_addr_t		pkt_dma;
 };
 
-struct pdc_host_priv {
-	int			hotplug_offset;
-};
-
 static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
-static void pdc_eng_timeout(struct ata_port *ap);
-static int pdc_port_start(struct ata_port *ap);
-static void pdc_port_stop(struct ata_port *ap);
-static void pdc_pata_phy_reset(struct ata_port *ap);
-static void pdc_sata_phy_reset(struct ata_port *ap);
+static int pdc_common_port_start(struct ata_port *ap);
+static int pdc_sata_port_start(struct ata_port *ap);
 static void pdc_qc_prep(struct ata_queued_cmd *qc);
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
 static void pdc_irq_clear(struct ata_port *ap);
 static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
-static void pdc_host_stop(struct ata_host_set *host_set);
-
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static void pdc_pata_error_handler(struct ata_port *ap);
+static void pdc_sata_error_handler(struct ata_port *ap);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
+static int pdc_pata_cable_detect(struct ata_port *ap);
+static int pdc_sata_cable_detect(struct ata_port *ap);
 
 static struct scsi_host_template pdc_ata_sht = {
 	.module			= THIS_MODULE,
@@ -132,21 +175,50 @@
 	.check_status		= ata_check_status,
 	.exec_command		= pdc_exec_command_mmio,
 	.dev_select		= ata_std_dev_select,
+	.check_atapi_dma	= pdc_check_atapi_dma,
 
-	.phy_reset		= pdc_sata_phy_reset,
+	.qc_prep		= pdc_qc_prep,
+	.qc_issue		= pdc_qc_issue_prot,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.error_handler		= pdc_sata_error_handler,
+	.post_internal_cmd	= pdc_post_internal_cmd,
+	.cable_detect		= pdc_sata_cable_detect,
+	.data_xfer		= ata_data_xfer,
+	.irq_clear		= pdc_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.scr_read		= pdc_sata_scr_read,
+	.scr_write		= pdc_sata_scr_write,
+	.port_start		= pdc_sata_port_start,
+};
+
+/* First-generation chips need a more restrictive ->check_atapi_dma op */
+static const struct ata_port_operations pdc_old_sata_ops = {
+	.port_disable		= ata_port_disable,
+	.tf_load		= pdc_tf_load_mmio,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= pdc_exec_command_mmio,
+	.dev_select		= ata_std_dev_select,
+	.check_atapi_dma	= pdc_old_sata_check_atapi_dma,
 
 	.qc_prep		= pdc_qc_prep,
 	.qc_issue		= pdc_qc_issue_prot,
-	.eng_timeout		= pdc_eng_timeout,
-	.data_xfer		= ata_mmio_data_xfer,
-	.irq_handler		= pdc_interrupt,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.error_handler		= pdc_sata_error_handler,
+	.post_internal_cmd	= pdc_post_internal_cmd,
+	.cable_detect		= pdc_sata_cable_detect,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= pdc_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= pdc_sata_scr_read,
 	.scr_write		= pdc_sata_scr_write,
-	.port_start		= pdc_port_start,
-	.port_stop		= pdc_port_stop,
-	.host_stop		= pdc_host_stop,
+	.port_start		= pdc_sata_port_start,
 };
 
 static const struct ata_port_operations pdc_pata_ops = {
@@ -156,76 +228,87 @@
 	.check_status		= ata_check_status,
 	.exec_command		= pdc_exec_command_mmio,
 	.dev_select		= ata_std_dev_select,
-
-	.phy_reset		= pdc_pata_phy_reset,
+	.check_atapi_dma	= pdc_check_atapi_dma,
 
 	.qc_prep		= pdc_qc_prep,
 	.qc_issue		= pdc_qc_issue_prot,
-	.data_xfer		= ata_mmio_data_xfer,
-	.eng_timeout		= pdc_eng_timeout,
-	.irq_handler		= pdc_interrupt,
+	.freeze			= pdc_freeze,
+	.thaw			= pdc_thaw,
+	.error_handler		= pdc_pata_error_handler,
+	.post_internal_cmd	= pdc_post_internal_cmd,
+	.cable_detect		= pdc_pata_cable_detect,
+	.data_xfer		= ata_data_xfer,
 	.irq_clear		= pdc_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
-	.port_start		= pdc_port_start,
-	.port_stop		= pdc_port_stop,
-	.host_stop		= pdc_host_stop,
+	.port_start		= pdc_common_port_start,
 };
 
 static const struct ata_port_info pdc_port_info[] = {
 	/* board_2037x */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS /* | ATA_FLAG_SATA */,	/* pata fix */
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_SATA_PATA,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
-		.port_ops	= &pdc_sata_ops,
+		.port_ops	= &pdc_old_sata_ops,
+	},
+
+	/* board_2037x_pata */
+	{
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.port_ops	= &pdc_pata_ops,
 	},
 
 	/* board_20319 */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
-		.port_ops	= &pdc_sata_ops,
+		.port_ops	= &pdc_old_sata_ops,
 	},
 
 	/* board_20619 */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+				  PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &pdc_pata_ops,
 	},
 
-	/* board_20771 */
+	/* board_2057x */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
 		.port_ops	= &pdc_sata_ops,
 	},
 
-	/* board_2057x */
+	/* board_2057x_pata */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+				  PDC_FLAG_GEN_II,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
-		.port_ops	= &pdc_sata_ops,
+		.port_ops	= &pdc_pata_ops,
 	},
 
 	/* board_40518 */
 	{
-		.sht		= &pdc_ata_sht,
-		.host_flags	= PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+				  PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
@@ -234,49 +317,25 @@
 };
 
 static const struct pci_device_id pdc_ata_pci_tbl[] = {
-	{ PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2057x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2057x },
-	{ PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2037x },
-
-	{ PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20319 },
-	{ PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20319 },
-	{ PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20319 },
-	{ PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20319 },
-	{ PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20319 },
-	{ PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_40518 },
-
-	{ PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20619 },
-
-/* TODO: remove all associated board_20771 code, as it completely
- * duplicates board_2037x code, unless reason for separation can be
- * divined.
- */
-#if 0
-	{ PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20771 },
-#endif
+	{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
+	{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
+	{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
+
+	{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
+	{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
+
+	{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
 
 	{ }	/* terminate list */
 };
@@ -290,9 +349,9 @@
 };
 
 
-static int pdc_port_start(struct ata_port *ap)
+static int pdc_common_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
+	struct device *dev = ap->host->dev;
 	struct pdc_port_priv *pp;
 	int rc;
 
@@ -300,55 +359,43 @@
 	if (rc)
 		return rc;
 
-	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
-	if (!pp) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
-
-	pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
-	if (!pp->pkt) {
-		rc = -ENOMEM;
-		goto err_out_kfree;
-	}
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
 
 	ap->private_data = pp;
 
 	return 0;
-
-err_out_kfree:
-	kfree(pp);
-err_out:
-	ata_port_stop(ap);
-	return rc;
 }
 
-
-static void pdc_port_stop(struct ata_port *ap)
+static int pdc_sata_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
-	struct pdc_port_priv *pp = ap->private_data;
-
-	ap->private_data = NULL;
-	dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
-	kfree(pp);
-	ata_port_stop(ap);
-}
-
+	int rc;
 
-static void pdc_host_stop(struct ata_host_set *host_set)
-{
-	struct pdc_host_priv *hp = host_set->private_data;
+	rc = pdc_common_port_start(ap);
+	if (rc)
+		return rc;
 
-	ata_pci_host_stop(host_set);
+	/* fix up PHYMODE4 align timing */
+	if (ap->flags & PDC_FLAG_GEN_II) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.scr_addr;
+		unsigned int tmp;
+
+		tmp = readl(mmio + 0x014);
+		tmp = (tmp & ~3) | 1;	/* set bits 1:0 = 0:1 */
+		writel(tmp, mmio + 0x014);
+	}
 
-	kfree(hp);
+	return 0;
 }
 
-
 static void pdc_reset_port(struct ata_port *ap)
 {
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
 	unsigned int i;
 	u32 tmp;
 
@@ -368,53 +415,27 @@
 	readl(mmio);	/* flush */
 }
 
-static void pdc_sata_phy_reset(struct ata_port *ap)
-{
-	/*    pdc_reset_port(ap); */  /* pata fix */
-	/*    sata_phy_reset(ap); */  /* pata fix */
-	/* if no sata flag, test for pata drive */      /* pata fix */
-	if (ap->flags & ATA_FLAG_SATA)  /* pata fix */
-	{                               /* pata fix */
-		pdc_reset_port(ap);     /* pata fix */
-		sata_phy_reset(ap);     /* pata fix */
-	}                               /* pata fix */
-	else                            /* pata fix */
-		pdc_pata_phy_reset(ap); /* pata fix */
-}
-
-static void pdc_pata_cbl_detect(struct ata_port *ap)
+static int pdc_pata_cable_detect(struct ata_port *ap)
 {
 	u8 tmp;
-	void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
 
 	tmp = readb(mmio);
+	if (tmp & 0x01)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
 
-	if (tmp & 0x01) {
-		ap->cbl = ATA_CBL_PATA40;
-		ap->udma_mask &= ATA_UDMA_MASK_40C;
-	} else
-		ap->cbl = ATA_CBL_PATA80;
-}
-
-static void pdc_pata_phy_reset(struct ata_port *ap)
-{
-	u8 tmp;                                         /* pata fix */
-	void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03; /* pata fix */
-	tmp = readb(mmio);                              /* pata fix */
-	if (tmp & 0x01)                                 /* pata fix */
-		ap->udma_mask &= ATA_UDMA_MASK_40C;     /* pata fix */
-
-	pdc_pata_cbl_detect(ap);
-	pdc_reset_port(ap);
-	ata_port_probe(ap);
-	ata_bus_reset(ap);
+static int pdc_sata_cable_detect(struct ata_port *ap)
+{
+	return ATA_CBL_SATA;
 }
 
 static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
 	if (sc_reg > SCR_CONTROL)
 		return 0xffffffffU;
-	return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -423,7 +444,91 @@
 {
 	if (sc_reg > SCR_CONTROL)
 		return;
-	writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	dma_addr_t sg_table = ap->prd_dma;
+	unsigned int cdb_len = qc->dev->cdb_len;
+	u8 *cdb = qc->cdb;
+	struct pdc_port_priv *pp = ap->private_data;
+	u8 *buf = pp->pkt;
+	u32 *buf32 = (u32 *) buf;
+	unsigned int dev_sel, feature, nbytes;
+
+	/* set control bits (byte 0), zero delay seq id (byte 3),
+	 * and seq id (byte 2)
+	 */
+	switch (qc->tf.protocol) {
+	case ATA_PROT_ATAPI_DMA:
+		if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+			buf32[0] = cpu_to_le32(PDC_PKT_READ);
+		else
+			buf32[0] = 0;
+		break;
+	case ATA_PROT_ATAPI_NODATA:
+		buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	buf32[1] = cpu_to_le32(sg_table);	/* S/G table addr */
+	buf32[2] = 0;				/* no next-packet */
+
+	/* select drive */
+	if (sata_scr_valid(ap)) {
+		dev_sel = PDC_DEVICE_SATA;
+	} else {
+		dev_sel = ATA_DEVICE_OBS;
+		if (qc->dev->devno != 0)
+			dev_sel |= ATA_DEV1;
+	}
+	buf[12] = (1 << 5) | ATA_REG_DEVICE;
+	buf[13] = dev_sel;
+	buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
+	buf[15] = dev_sel; /* once more, waiting for BSY to clear */
+
+	buf[16] = (1 << 5) | ATA_REG_NSECT;
+	buf[17] = 0x00;
+	buf[18] = (1 << 5) | ATA_REG_LBAL;
+	buf[19] = 0x00;
+
+	/* set feature and byte counter registers */
+	if (qc->tf.protocol != ATA_PROT_ATAPI_DMA) {
+		feature = PDC_FEATURE_ATAPI_PIO;
+		/* set byte counter register to real transfer byte count */
+		nbytes = qc->nbytes;
+		if (nbytes > 0xffff)
+			nbytes = 0xffff;
+	} else {
+		feature = PDC_FEATURE_ATAPI_DMA;
+		/* set byte counter register to 0 */
+		nbytes = 0;
+	}
+	buf[20] = (1 << 5) | ATA_REG_FEATURE;
+	buf[21] = feature;
+	buf[22] = (1 << 5) | ATA_REG_BYTEL;
+	buf[23] = nbytes & 0xFF;
+	buf[24] = (1 << 5) | ATA_REG_BYTEH;
+	buf[25] = (nbytes >> 8) & 0xFF;
+
+	/* send ATAPI packet command 0xA0 */
+	buf[26] = (1 << 5) | ATA_REG_CMD;
+	buf[27] = ATA_CMD_PACKET;
+
+	/* select drive and check DRQ */
+	buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
+	buf[29] = dev_sel;
+
+	/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
+	BUG_ON(cdb_len & ~0x1E);
+
+	/* append the CDB as the final part */
+	buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
+	memcpy(buf+31, cdb, cdb_len);
 }
 
 static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -450,64 +555,131 @@
 		pdc_pkt_footer(&qc->tf, pp->pkt, i);
 		break;
 
+	case ATA_PROT_ATAPI:
+		ata_qc_prep(qc);
+		break;
+
+	case ATA_PROT_ATAPI_DMA:
+		ata_qc_prep(qc);
+		/*FALLTHROUGH*/
+	case ATA_PROT_ATAPI_NODATA:
+		pdc_atapi_pkt(qc);
+		break;
+
 	default:
 		break;
 	}
 }
 
-static void pdc_eng_timeout(struct ata_port *ap)
+static void pdc_freeze(struct ata_port *ap)
 {
-	struct ata_host_set *host_set = ap->host_set;
-	u8 drv_stat;
-	struct ata_queued_cmd *qc;
-	unsigned long flags;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	u32 tmp;
 
-	DPRINTK("ENTER\n");
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp |= PDC_IRQ_DISABLE;
+	tmp &= ~PDC_DMA_ENABLE;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
 
-	spin_lock_irqsave(&host_set->lock, flags);
+static void pdc_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	u32 tmp;
 
-	qc = ata_qc_from_tag(ap, ap->active_tag);
+	/* clear IRQ */
+	readl(mmio + PDC_INT_SEQMASK);
 
-	switch (qc->tf.protocol) {
-	case ATA_PROT_DMA:
-	case ATA_PROT_NODATA:
-		ata_port_printk(ap, KERN_ERR, "command timeout\n");
-		drv_stat = ata_wait_idle(ap);
-		qc->err_mask |= __ac_err_mask(drv_stat);
-		break;
+	/* turn IRQ back on */
+	tmp = readl(mmio + PDC_CTLSTAT);
+	tmp &= ~PDC_IRQ_DISABLE;
+	writel(tmp, mmio + PDC_CTLSTAT);
+	readl(mmio + PDC_CTLSTAT); /* flush */
+}
 
-	default:
-		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+static void pdc_common_error_handler(struct ata_port *ap, ata_reset_fn_t hardreset)
+{
+	if (!(ap->pflags & ATA_PFLAG_FROZEN))
+		pdc_reset_port(ap);
 
-		ata_port_printk(ap, KERN_ERR,
-				"unknown timeout, cmd 0x%x stat 0x%x\n",
-				qc->tf.command, drv_stat);
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
+		  ata_std_postreset);
+}
 
-		qc->err_mask |= ac_err_mask(drv_stat);
-		break;
-	}
+static void pdc_pata_error_handler(struct ata_port *ap)
+{
+	pdc_common_error_handler(ap, NULL);
+}
 
-	spin_unlock_irqrestore(&host_set->lock, flags);
-	ata_eh_qc_complete(qc);
-	DPRINTK("EXIT\n");
+static void pdc_sata_error_handler(struct ata_port *ap)
+{
+	pdc_common_error_handler(ap, sata_std_hardreset);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		pdc_reset_port(ap);
+}
+
+static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
+			   u32 port_status, u32 err_mask)
+{
+	struct ata_eh_info *ehi = &ap->eh_info;
+	unsigned int ac_err_mask = 0;
+
+	ata_ehi_clear_desc(ehi);
+	ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
+	port_status &= err_mask;
+
+	if (port_status & PDC_DRIVE_ERR)
+		ac_err_mask |= AC_ERR_DEV;
+	if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
+		ac_err_mask |= AC_ERR_HSM;
+	if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
+		ac_err_mask |= AC_ERR_ATA_BUS;
+	if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
+			   | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
+		ac_err_mask |= AC_ERR_HOST_BUS;
+
+	if (sata_scr_valid(ap))
+		ehi->serror |= pdc_sata_scr_read(ap, SCR_ERROR);
+
+	qc->err_mask |= ac_err_mask;
+
+	pdc_reset_port(ap);
+
+	ata_port_abort(ap);
 }
 
 static inline unsigned int pdc_host_intr( struct ata_port *ap,
                                           struct ata_queued_cmd *qc)
 {
 	unsigned int handled = 0;
-	u32 tmp;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
+	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
+	u32 port_status, err_mask;
 
-	tmp = readl(mmio);
-	if (tmp & PDC_ERR_MASK) {
-		qc->err_mask |= AC_ERR_DEV;
-		pdc_reset_port(ap);
+	err_mask = PDC_ERR_MASK;
+	if (ap->flags & PDC_FLAG_GEN_II)
+		err_mask &= ~PDC1_ERR_MASK;
+	else
+		err_mask &= ~PDC2_ERR_MASK;
+	port_status = readl(port_mmio + PDC_GLOBAL_CTL);
+	if (unlikely(port_status & err_mask)) {
+		pdc_error_intr(ap, qc, port_status, err_mask);
+		return 1;
 	}
 
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
 	case ATA_PROT_NODATA:
+	case ATA_PROT_ATAPI_DMA:
+	case ATA_PROT_ATAPI_NODATA:
 		qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
 		ata_qc_complete(qc);
 		handled = 1;
@@ -523,15 +695,15 @@
 
 static void pdc_irq_clear(struct ata_port *ap)
 {
-	struct ata_host_set *host_set = ap->host_set;
-	void __iomem *mmio = host_set->mmio_base;
+	struct ata_host *host = ap->host;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	readl(mmio + PDC_INT_SEQMASK);
 }
 
-static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	struct ata_port *ap;
 	u32 mask = 0;
 	unsigned int i, tmp;
@@ -540,12 +712,12 @@
 
 	VPRINTK("ENTER\n");
 
-	if (!host_set || !host_set->mmio_base) {
+	if (!host || !host->iomap[PDC_MMIO_BAR]) {
 		VPRINTK("QUICK EXIT\n");
 		return IRQ_NONE;
 	}
 
-	mmio_base = host_set->mmio_base;
+	mmio_base = host->iomap[PDC_MMIO_BAR];
 
 	/* reading should also clear interrupts */
 	mask = readl(mmio_base + PDC_INT_SEQMASK);
@@ -555,7 +727,7 @@
 		return IRQ_NONE;
 	}
 
-	spin_lock(&host_set->lock);
+	spin_lock(&host->lock);
 
 	mask &= 0xffff;		/* only 16 tags possible */
 	if (!mask) {
@@ -565,9 +737,9 @@
 
 	writel(mask, mmio_base + PDC_INT_SEQMASK);
 
-	for (i = 0; i < host_set->n_ports; i++) {
+	for (i = 0; i < host->n_ports; i++) {
 		VPRINTK("port %u\n", i);
-		ap = host_set->ports[i];
+		ap = host->ports[i];
 		tmp = mask & (1 << (i + 1));
 		if (tmp && ap &&
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
@@ -582,7 +754,7 @@
 	VPRINTK("EXIT\n");
 
 done_irq:
-	spin_unlock(&host_set->lock);
+	spin_unlock(&host->lock);
 	return IRQ_RETVAL(handled);
 }
 
@@ -590,32 +762,34 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct pdc_port_priv *pp = ap->private_data;
+	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
 	unsigned int port_no = ap->port_no;
 	u8 seq = (u8) (port_no + 1);
 
 	VPRINTK("ENTER, ap %p\n", ap);
 
-	writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
-	readl(ap->host_set->mmio_base + (seq * 4));	/* flush */
+	writel(0x00000001, mmio + (seq * 4));
+	readl(mmio + (seq * 4));	/* flush */
 
 	pp->pkt[2] = seq;
 	wmb();			/* flush PRD, pkt writes */
-	writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-	readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
+	writel(pp->pkt_dma, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+	readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
 }
 
 static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
 {
 	switch (qc->tf.protocol) {
+	case ATA_PROT_ATAPI_NODATA:
+		if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+			break;
+		/*FALLTHROUGH*/
+	case ATA_PROT_ATAPI_DMA:
 	case ATA_PROT_DMA:
 	case ATA_PROT_NODATA:
 		pdc_packet_start(qc);
 		return 0;
 
-	case ATA_PROT_ATAPI_DMA:
-		BUG();
-		break;
-
 	default:
 		break;
 	}
@@ -638,41 +812,82 @@
 	ata_exec_command(ap, tf);
 }
 
-
-static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
 {
-	port->cmd_addr		= base;
-	port->data_addr		= base;
-	port->feature_addr	=
-	port->error_addr	= base + 0x4;
-	port->nsect_addr	= base + 0x8;
-	port->lbal_addr		= base + 0xc;
-	port->lbam_addr		= base + 0x10;
-	port->lbah_addr		= base + 0x14;
-	port->device_addr	= base + 0x18;
-	port->command_addr	=
-	port->status_addr	= base + 0x1c;
-	port->altstatus_addr	=
-	port->ctl_addr		= base + 0x38;
+	u8 *scsicmd = qc->scsicmd->cmnd;
+	int pio = 1; /* atapi dma off by default */
+
+	/* Whitelist commands that may use DMA. */
+	switch (scsicmd[0]) {
+	case WRITE_12:
+	case WRITE_10:
+	case WRITE_6:
+	case READ_12:
+	case READ_10:
+	case READ_6:
+	case 0xad: /* READ_DVD_STRUCTURE */
+	case 0xbe: /* READ_CD */
+		pio = 0;
+	}
+	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
+	if (scsicmd[0] == WRITE_10) {
+		unsigned int lba;
+		lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5];
+		if (lba >= 0xFFFF4FA2)
+			pio = 1;
+	}
+	return pio;
+}
+
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	/* First generation chips cannot use ATAPI DMA on SATA ports */
+	return 1;
+}
+
+static void pdc_ata_setup_port(struct ata_port *ap,
+			       void __iomem *base, void __iomem *scr_addr)
+{
+	ap->ioaddr.cmd_addr		= base;
+	ap->ioaddr.data_addr		= base;
+	ap->ioaddr.feature_addr		=
+	ap->ioaddr.error_addr		= base + 0x4;
+	ap->ioaddr.nsect_addr		= base + 0x8;
+	ap->ioaddr.lbal_addr		= base + 0xc;
+	ap->ioaddr.lbam_addr		= base + 0x10;
+	ap->ioaddr.lbah_addr		= base + 0x14;
+	ap->ioaddr.device_addr		= base + 0x18;
+	ap->ioaddr.command_addr		=
+	ap->ioaddr.status_addr		= base + 0x1c;
+	ap->ioaddr.altstatus_addr	=
+	ap->ioaddr.ctl_addr		= base + 0x38;
+	ap->ioaddr.scr_addr		= scr_addr;
 }
 
 
-static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
-{
-	void __iomem *mmio = pe->mmio_base;
-	struct pdc_host_priv *hp = pe->private_data;
-	int hotplug_offset = hp->hotplug_offset;
+static void pdc_host_init(struct ata_host *host)
+{
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
+	int hotplug_offset;
 	u32 tmp;
 
+	if (is_gen2)
+		hotplug_offset = PDC2_SATA_PLUG_CSR;
+	else
+		hotplug_offset = PDC_SATA_PLUG_CSR;
+
 	/*
 	 * Except for the hotplug stuff, this is voodoo from the
 	 * Promise driver.  Label this entire section
 	 * "TODO: figure out why we do this"
 	 */
 
-	/* change FIFO_SHD to 8 dwords, enable BMR_BURST */
+	/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
 	tmp = readl(mmio + PDC_FLASH_CTL);
-	tmp |= 0x12000;	/* bit 16 (fifo 8 dw) and 13 (bmr burst?) */
+	tmp |= 0x02000;	/* bit 13 (enable bmr burst) */
+	if (!is_gen2)
+		tmp |= 0x10000;	/* bit 16 (fifo threshold at 8 dw) */
 	writel(tmp, mmio + PDC_FLASH_CTL);
 
 	/* clear plug/unplug flags for all ports */
@@ -683,6 +898,10 @@
 	tmp = readl(mmio + hotplug_offset);
 	writel(tmp | 0xff0000, mmio + hotplug_offset);
 
+	/* don't initialise TBG or SLEW on 2nd generation chips */
+	if (is_gen2)
+		return;
+
 	/* reduce TBG clock to 133 Mhz. */
 	tmp = readl(mmio + PDC_TBG_MODE);
 	tmp &= ~0x30000; /* clear bit 17, 16*/
@@ -702,164 +921,88 @@
 static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	struct pdc_host_priv *hp;
-	unsigned long base;
-	void __iomem *mmio_base;
-	unsigned int board_idx = (unsigned int) ent->driver_data;
-	int pci_dev_busy = 0;
-	int rc;
-	u8 tmp;		/* pata fix */
+	const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+	const struct ata_port_info *ppi[PDC_MAX_PORTS];
+	struct ata_host *host;
+	void __iomem *base;
+	int n_ports, i, rc;
+	int is_sataii_tx4;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	/* enable and acquire resources */
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
-
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
 	if (rc)
-		goto err_out_regions;
-
-	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	mmio_base = pci_iomap(pdev, 3, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-	base = (unsigned long) mmio_base;
-
-	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
-	if (hp == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-
-	/* Set default hotplug offset */
-	hp->hotplug_offset = PDC_SATA_PLUG_CSR;
-	probe_ent->private_data = hp;
-
-	probe_ent->sht		= pdc_port_info[board_idx].sht;
-	probe_ent->host_flags	= pdc_port_info[board_idx].host_flags;
-	probe_ent->pio_mask	= pdc_port_info[board_idx].pio_mask;
-	probe_ent->mwdma_mask	= pdc_port_info[board_idx].mwdma_mask;
-	probe_ent->udma_mask	= pdc_port_info[board_idx].udma_mask;
-	probe_ent->port_ops	= pdc_port_info[board_idx].port_ops;
-
-       	probe_ent->irq = pdev->irq;
-       	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-
-	pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
-	pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
-
-	probe_ent->port[0].scr_addr = base + 0x400;
-	probe_ent->port[1].scr_addr = base + 0x500;
-
-	probe_ent->port_flags[0] = ATA_FLAG_SATA;	/* pata fix */
-	probe_ent->port_flags[1] = ATA_FLAG_SATA;	/* pata fix */
-	
-	/* notice 4-port boards */
-	switch (board_idx) {
-	case board_40518:
-		/* Override hotplug offset for SATAII150 */
-		hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
-		/* Fall through */
-	case board_20319:
-       		probe_ent->n_ports = 4;
-
-		pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
-		pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
-
-		probe_ent->port[2].scr_addr = base + 0x600;
-		probe_ent->port[3].scr_addr = base + 0x700;
-
-		probe_ent->port_flags[2] = ATA_FLAG_SATA;	/* pata fix */
-		probe_ent->port_flags[3] = ATA_FLAG_SATA;	/* pata fix */
-		break;
-	case board_2057x:
-		/* Override hotplug offset for SATAII150 */
-		hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
-		/* Fall through */
-	case board_2037x:
-/*		probe_ent->n_ports = 2; */			/* pata fix */
-		/* Some boards have also PATA port */		/* pata fix */
-		tmp = readb(mmio_base + PDC_FLASH_CTL+1);	/* pata fix */
-		if (!(tmp & 0x80))				/* pata fix */
-		{						/* pata fix */
-			probe_ent->n_ports = 3;			/* pata fix */
-			pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);	/* pata fix */
-			probe_ent->port_flags[2] = ATA_FLAG_SLAVE_POSS;		/* pata fix */
-			printk(KERN_INFO DRV_NAME " PATA port found\n");	/* pata fix */
-		}						/* pata fix */
-		else						/* pata fix */
-       			probe_ent->n_ports = 2;			/* pata fix */
-		break;
-	case board_20771:
-		probe_ent->n_ports = 2;
-		break;
-	case board_20619:
-		probe_ent->n_ports = 4;
-
-		pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
-		pdc_ata_setup_port(&probe_ent->port[3], base + 0x380);
-
-		probe_ent->port[2].scr_addr = base + 0x600;
-		probe_ent->port[3].scr_addr = base + 0x700;
+		return rc;
+	base = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
 
-		probe_ent->port_flags[2] = ATA_FLAG_SATA;	/* pata fix */
-		probe_ent->port_flags[3] = ATA_FLAG_SATA;	/* pata fix */
-		break;
-	default:
-		BUG();
-		break;
+	/* determine port configuration and setup host */
+	n_ports = 2;
+	if (pi->flags & PDC_FLAG_4_PORTS)
+		n_ports = 4;
+	for (i = 0; i < n_ports; i++)
+		ppi[i] = pi;
+
+	if (pi->flags & PDC_FLAG_SATA_PATA) {
+		u8 tmp = readb(base + PDC_FLASH_CTL+1);
+		if (!(tmp & 0x80)) {
+			ppi[n_ports++] = pi + 1;
+			dev_printk(KERN_INFO, &pdev->dev, "PATA port found\n");
+		}
 	}
 
-	pci_set_master(pdev);
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host) {
+		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+		return -ENOMEM;
+	}
+	host->iomap = pcim_iomap_table(pdev);
+
+	is_sataii_tx4 = 0;
+	if ((pi->flags & (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) == (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) {
+		is_sataii_tx4 = 1;
+		dev_printk(KERN_INFO, &pdev->dev, "applying SATAII TX4 port numbering workaround\n");
+	}
+	for (i = 0; i < host->n_ports; i++) {
+		static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
+		int ata_nr;
+
+		ata_nr = i;
+		if (is_sataii_tx4)
+			ata_nr = sataii_tx4_port_remap[i];
+
+		pdc_ata_setup_port(host->ports[i],
+				   base + 0x200 + ata_nr * 0x80,
+				   base + 0x400 + ata_nr * 0x100);
+	}
 
 	/* initialize adapter */
-	pdc_host_init(board_idx, probe_ent);
+	pdc_host_init(host);
 
-	/* FIXME: Need any other frees than hp? */
-	if (!ata_device_add(probe_ent))
-		kfree(hp);
-
-	kfree(probe_ent);
-
-	return 0;
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
 
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	/* start host, request IRQ and attach */
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
+				 &pdc_ata_sht);
 }
 
 
 static int __init pdc_ata_init(void)
 {
-	return pci_module_init(&pdc_ata_pci_driver);
+	return pci_register_driver(&pdc_ata_pci_driver);
 }
 
 
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_qstor.c linux-2.6.18.x86_64.p4/drivers/ata/sata_qstor.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_qstor.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_qstor.c	2007-06-06 10:08:00.000000000 -0400
@@ -34,16 +34,16 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
-#include <asm/io.h>
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_qstor"
-#define DRV_VERSION	"0.06"
+#define DRV_VERSION	"0.08"
 
 enum {
+	QS_MMIO_BAR		= 4,
+
 	QS_PORTS		= 4,
 	QS_MAX_PRD		= LIBATA_MAX_PRD,
 	QS_CPB_ORDER		= 6,
@@ -114,10 +114,8 @@
 static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
 static int qs_port_start(struct ata_port *ap);
-static void qs_host_stop(struct ata_host_set *host_set);
-static void qs_port_stop(struct ata_port *ap);
+static void qs_host_stop(struct ata_host *host);
 static void qs_phy_reset(struct ata_port *ap);
 static void qs_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
@@ -157,14 +155,14 @@
 	.phy_reset		= qs_phy_reset,
 	.qc_prep		= qs_qc_prep,
 	.qc_issue		= qs_qc_issue,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 	.eng_timeout		= qs_eng_timeout,
-	.irq_handler		= qs_intr,
 	.irq_clear		= qs_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= qs_scr_read,
 	.scr_write		= qs_scr_write,
 	.port_start		= qs_port_start,
-	.port_stop		= qs_port_stop,
 	.host_stop		= qs_host_stop,
 	.bmdma_stop		= qs_bmdma_stop,
 	.bmdma_status		= qs_bmdma_status,
@@ -173,8 +171,7 @@
 static const struct ata_port_info qs_port_info[] = {
 	/* board_2068_idx */
 	{
-		.sht		= &qs_ata_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_SATA_RESET |
 				  //FIXME ATA_FLAG_SRST |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
@@ -185,8 +182,7 @@
 };
 
 static const struct pci_device_id qs_ata_pci_tbl[] = {
-	{ PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_2068_idx },
+	{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
 
 	{ }	/* terminate list */
 };
@@ -198,6 +194,11 @@
 	.remove			= ata_pci_remove_one,
 };
 
+static void __iomem *qs_mmio_base(struct ata_host *host)
+{
+	return host->iomap[QS_MMIO_BAR];
+}
+
 static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
 {
 	return 1;	/* ATAPI DMA not supported */
@@ -220,7 +221,7 @@
 
 static inline void qs_enter_reg_mode(struct ata_port *ap)
 {
-	u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
 
 	writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
 	readb(chan + QS_CCT_CTR0);        /* flush */
@@ -228,7 +229,7 @@
 
 static inline void qs_reset_channel_logic(struct ata_port *ap)
 {
-	u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
 
 	writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
 	readb(chan + QS_CCT_CTR0);        /* flush */
@@ -258,14 +259,14 @@
 {
 	if (sc_reg > SCR_CONTROL)
 		return ~0U;
-	return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
+	return readl(ap->ioaddr.scr_addr + (sc_reg * 8));
 }
 
 static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
 		return;
-	writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
+	writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
 }
 
 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
@@ -326,7 +327,7 @@
 	/* host control block (HCB) */
 	buf[ 0] = QS_HCB_HDR;
 	buf[ 1] = hflags;
-	*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE);
+	*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
 	*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
 	addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
 	*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
@@ -342,7 +343,7 @@
 static inline void qs_packet_start(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
+	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
 
 	VPRINTK("ENTER, ap %p\n", ap);
 
@@ -375,11 +376,11 @@
 	return ata_qc_issue_prot(qc);
 }
 
-static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
+static inline unsigned int qs_intr_pkt(struct ata_host *host)
 {
 	unsigned int handled = 0;
 	u8 sFFE;
-	u8 __iomem *mmio_base = host_set->mmio_base;
+	u8 __iomem *mmio_base = qs_mmio_base(host);
 
 	do {
 		u32 sff0 = readl(mmio_base + QS_HST_SFF);
@@ -391,7 +392,7 @@
 			u8 sDST = sff0 >> 16;	/* dev status */
 			u8 sHST = sff1 & 0x3f;	/* host status */
 			unsigned int port_no = (sff1 >> 8) & 0x03;
-			struct ata_port *ap = host_set->ports[port_no];
+			struct ata_port *ap = host->ports[port_no];
 
 			DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
 					sff1, sff0, port_no, sHST, sDST);
@@ -421,13 +422,13 @@
 	return handled;
 }
 
-static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
+static inline unsigned int qs_intr_mmio(struct ata_host *host)
 {
 	unsigned int handled = 0, port_no;
 
-	for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
 		struct ata_port *ap;
-		ap = host_set->ports[port_no];
+		ap = host->ports[port_no];
 		if (ap &&
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
@@ -442,7 +443,7 @@
 				if ((status & ATA_BUSY))
 					continue;
 				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
-					ap->id, qc->tf.protocol, status);
+					ap->print_id, qc->tf.protocol, status);
 
 				/* complete taskfile transaction */
 				pp->state = qs_state_idle;
@@ -455,23 +456,23 @@
 	return handled;
 }
 
-static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	unsigned int handled = 0;
 
 	VPRINTK("ENTER\n");
 
-	spin_lock(&host_set->lock);
-	handled  = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
-	spin_unlock(&host_set->lock);
+	spin_lock(&host->lock);
+	handled  = qs_intr_pkt(host) | qs_intr_mmio(host);
+	spin_unlock(&host->lock);
 
 	VPRINTK("EXIT\n");
 
 	return IRQ_RETVAL(handled);
 }
 
-static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
+static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
 {
 	port->cmd_addr		=
 	port->data_addr		= base + 0x400;
@@ -491,9 +492,9 @@
 
 static int qs_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
+	struct device *dev = ap->host->dev;
 	struct qs_port_priv *pp;
-	void __iomem *mmio_base = ap->host_set->mmio_base;
+	void __iomem *mmio_base = qs_mmio_base(ap->host);
 	void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
 	u64 addr;
 	int rc;
@@ -502,17 +503,13 @@
 	if (rc)
 		return rc;
 	qs_enter_reg_mode(ap);
-	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
-	if (!pp) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
-								GFP_KERNEL);
-	if (!pp->pkt) {
-		rc = -ENOMEM;
-		goto err_out_kfree;
-	}
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
+				      GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
 	memset(pp->pkt, 0, QS_PKT_BYTES);
 	ap->private_data = pp;
 
@@ -520,50 +517,26 @@
 	writel((u32) addr,        chan + QS_CCF_CPBA);
 	writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
 	return 0;
-
-err_out_kfree:
-	kfree(pp);
-err_out:
-	ata_port_stop(ap);
-	return rc;
 }
 
-static void qs_port_stop(struct ata_port *ap)
+static void qs_host_stop(struct ata_host *host)
 {
-	struct device *dev = ap->host_set->dev;
-	struct qs_port_priv *pp = ap->private_data;
-
-	if (pp != NULL) {
-		ap->private_data = NULL;
-		if (pp->pkt != NULL)
-			dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
-								pp->pkt_dma);
-		kfree(pp);
-	}
-	ata_port_stop(ap);
-}
-
-static void qs_host_stop(struct ata_host_set *host_set)
-{
-	void __iomem *mmio_base = host_set->mmio_base;
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
+	void __iomem *mmio_base = qs_mmio_base(host);
 
 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
 	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
-
-	pci_iounmap(pdev, mmio_base);
 }
 
-static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
+static void qs_host_init(struct ata_host *host, unsigned int chip_id)
 {
-	void __iomem *mmio_base = pe->mmio_base;
+	void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
 	unsigned int port_no;
 
 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
 	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
 
 	/* reset each channel in turn */
-	for (port_no = 0; port_no < pe->n_ports; ++port_no) {
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
 		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
 		writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
 		writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
@@ -571,7 +544,7 @@
 	}
 	writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
 
-	for (port_no = 0; port_no < pe->n_ports; ++port_no) {
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
 		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
 		/* set FIFO depths to same settings as Windows driver */
 		writew(32, chan + QS_CFC_HUFT);
@@ -631,88 +604,53 @@
 				const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	void __iomem *mmio_base;
 	unsigned int board_idx = (unsigned int) ent->driver_data;
+	const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
+	struct ata_host *host;
 	int rc, port_no;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
-	if (rc)
-		return rc;
+	/* alloc host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
+	if (!host)
+		return -ENOMEM;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
 	if (rc)
-		goto err_out;
+		return rc;
 
-	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
-		rc = -ENODEV;
-		goto err_out_regions;
-	}
+	if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
+		return -ENODEV;
 
-	mmio_base = pci_iomap(pdev, 4, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	rc = qs_set_dma_masks(pdev, mmio_base);
+	rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
 	if (rc)
-		goto err_out_iounmap;
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
 
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
+	rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
+	if (rc)
+		return rc;
 
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	probe_ent->sht		= qs_port_info[board_idx].sht;
-	probe_ent->host_flags	= qs_port_info[board_idx].host_flags;
-	probe_ent->pio_mask	= qs_port_info[board_idx].pio_mask;
-	probe_ent->mwdma_mask	= qs_port_info[board_idx].mwdma_mask;
-	probe_ent->udma_mask	= qs_port_info[board_idx].udma_mask;
-	probe_ent->port_ops	= qs_port_info[board_idx].port_ops;
-
-	probe_ent->irq		= pdev->irq;
-	probe_ent->irq_flags	= IRQF_SHARED;
-	probe_ent->mmio_base	= mmio_base;
-	probe_ent->n_ports	= QS_PORTS;
-
-	for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
-		unsigned long chan = (unsigned long)mmio_base +
-							(port_no * 0x4000);
-		qs_ata_setup_port(&probe_ent->port[port_no], chan);
+	for (port_no = 0; port_no < host->n_ports; ++port_no) {
+		void __iomem *chan =
+			host->iomap[QS_MMIO_BAR] + (port_no * 0x4000);
+		qs_ata_setup_port(&host->ports[port_no]->ioaddr, chan);
 	}
 
-	pci_set_master(pdev);
-
 	/* initialize adapter */
-	qs_host_init(board_idx, probe_ent);
+	qs_host_init(host, board_idx);
 
-	rc = ata_device_add(probe_ent);
-	kfree(probe_ent);
-	if (rc != QS_PORTS)
-		goto err_out_iounmap;
-	return 0;
-
-err_out_iounmap:
-	pci_iounmap(pdev, mmio_base);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	pci_disable_device(pdev);
-	return rc;
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
+				 &qs_ata_sht);
 }
 
 static int __init qs_ata_init(void)
 {
-	return pci_module_init(&qs_ata_pci_driver);
+	return pci_register_driver(&qs_ata_pci_driver);
 }
 
 static void __exit qs_ata_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_sil24.c linux-2.6.18.x86_64.p4/drivers/ata/sata_sil24.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_sil24.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_sil24.c	2007-06-06 10:08:00.000000000 -0400
@@ -28,10 +28,9 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 
 #define DRV_NAME	"sata_sil24"
-#define DRV_VERSION	"0.3"
+#define DRV_VERSION	"0.9"
 
 /*
  * Port request block (PRB) 32 bytes
@@ -61,6 +60,9 @@
 };
 
 enum {
+	SIL24_HOST_BAR		= 0,
+	SIL24_PORT_BAR		= 2,
+
 	/*
 	 * Global controller registers (128 bytes @ BAR0)
 	 */
@@ -100,10 +102,14 @@
 	 */
 	PORT_REGS_SIZE		= 0x2000,
 
-	PORT_LRAM		= 0x0000, /* 31 LRAM slots and PM regs */
+	PORT_LRAM		= 0x0000, /* 31 LRAM slots and PMP regs */
 	PORT_LRAM_SLOT_SZ	= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
 
-	PORT_PM			= 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
+	PORT_PMP		= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
+	PORT_PMP_STATUS		= 0x0000, /* port device status offset */
+	PORT_PMP_QACTIVE	= 0x0004, /* port device QActive offset */
+	PORT_PMP_SIZE		= 0x0008, /* 8 bytes per PMP */
+
 		/* 32 bit regs */
 	PORT_CTRL_STAT		= 0x1000, /* write: ctrl-set, read: stat */
 	PORT_CTRL_CLR		= 0x1004, /* write: ctrl-clear */
@@ -126,6 +132,7 @@
 	PORT_PHY_CFG		= 0x1050,
 	PORT_SLOT_STAT		= 0x1800,
 	PORT_CMD_ACTIVATE	= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+	PORT_CONTEXT		= 0x1e04,
 	PORT_EXEC_DIAG		= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
 	PORT_PSD_DIAG		= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
 	PORT_SCONTROL		= 0x1f00,
@@ -139,9 +146,9 @@
 	PORT_CS_INIT		= (1 << 2), /* port initialize */
 	PORT_CS_IRQ_WOC		= (1 << 3), /* interrupt write one to clear */
 	PORT_CS_CDB16		= (1 << 5), /* 0=12b cdb, 1=16b cdb */
-	PORT_CS_RESUME		= (1 << 6), /* port resume */
+	PORT_CS_PMP_RESUME	= (1 << 6), /* PMP resume */
 	PORT_CS_32BIT_ACTV	= (1 << 10), /* 32-bit activation */
-	PORT_CS_PM_EN		= (1 << 13), /* port multiplier enable */
+	PORT_CS_PMP_EN		= (1 << 13), /* port multiplier enable */
 	PORT_CS_RDY		= (1 << 31), /* port ready to accept commands */
 
 	/* PORT_IRQ_STAT/ENABLE_SET/CLR */
@@ -230,7 +237,8 @@
 	/* host flags */
 	SIL24_COMMON_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-				  ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
+				  ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY |
+				  ATA_FLAG_ACPI_SATA,
 	SIL24_FLAG_PCIX_IRQ_WOC	= (1 << 24), /* IRQ loss errata on PCI-X */
 
 	IRQ_STAT_4PORTS		= 0xf,
@@ -316,13 +324,7 @@
 	struct ata_taskfile tf;			/* Cached taskfile registers */
 };
 
-/* ap->host_set->private_data */
-struct sil24_host_priv {
-	void __iomem *host_base;	/* global controller control (128 bytes @BAR0) */
-	void __iomem *port_base;	/* port registers (4 * 8192 bytes @BAR2) */
-};
-
-static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
+static void sil24_dev_config(struct ata_device *dev);
 static u8 sil24_check_status(struct ata_port *ap);
 static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
 static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
@@ -330,23 +332,24 @@
 static void sil24_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
 static void sil24_irq_clear(struct ata_port *ap);
-static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static void sil24_freeze(struct ata_port *ap);
 static void sil24_thaw(struct ata_port *ap);
 static void sil24_error_handler(struct ata_port *ap);
 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
 static int sil24_port_start(struct ata_port *ap);
-static void sil24_port_stop(struct ata_port *ap);
-static void sil24_host_stop(struct ata_host_set *host_set);
 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM
 static int sil24_pci_device_resume(struct pci_dev *pdev);
+#endif
 
 static const struct pci_device_id sil24_pci_tbl[] = {
-	{ 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
-	{ 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
-	{ 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
-	{ 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
-	{ 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
+	{ PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
+	{ PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
+	{ PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
+	{ PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
+	{ PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
+	{ PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
+
 	{ } /* terminate list */
 };
 
@@ -354,9 +357,11 @@
 	.name			= DRV_NAME,
 	.id_table		= sil24_pci_tbl,
 	.probe			= sil24_init_one,
-	.remove			= ata_pci_remove_one, /* safe? */
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
 	.suspend		= ata_pci_device_suspend,
 	.resume			= sil24_pci_device_resume,
+#endif
 };
 
 static struct scsi_host_template sil24_sht = {
@@ -376,8 +381,6 @@
 	.slave_configure	= ata_scsi_slave_config,
 	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
-	.suspend		= ata_scsi_device_suspend,
-	.resume			= ata_scsi_device_resume,
 };
 
 static const struct ata_port_operations sil24_ops = {
@@ -394,8 +397,9 @@
 	.qc_prep		= sil24_qc_prep,
 	.qc_issue		= sil24_qc_issue,
 
-	.irq_handler		= sil24_interrupt,
 	.irq_clear		= sil24_irq_clear,
+	.irq_on			= ata_dummy_irq_on,
+	.irq_ack		= ata_dummy_irq_ack,
 
 	.scr_read		= sil24_scr_read,
 	.scr_write		= sil24_scr_write,
@@ -406,22 +410,19 @@
 	.post_internal_cmd	= sil24_post_internal_cmd,
 
 	.port_start		= sil24_port_start,
-	.port_stop		= sil24_port_stop,
-	.host_stop		= sil24_host_stop,
 };
 
 /*
- * Use bits 30-31 of host_flags to encode available port numbers.
+ * Use bits 30-31 of port_flags to encode available port numbers.
  * Current maxium is 4.
  */
 #define SIL24_NPORTS2FLAG(nports)	((((unsigned)(nports) - 1) & 0x3) << 30)
 #define SIL24_FLAG2NPORTS(flag)		((((flag) >> 30) & 0x3) + 1)
 
-static struct ata_port_info sil24_port_info[] = {
+static const struct ata_port_info sil24_port_info[] = {
 	/* sil_3124 */
 	{
-		.sht		= &sil24_sht,
-		.host_flags	= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
 				  SIL24_FLAG_PCIX_IRQ_WOC,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
@@ -430,8 +431,7 @@
 	},
 	/* sil_3132 */
 	{
-		.sht		= &sil24_sht,
-		.host_flags	= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= 0x3f,			/* udma0-5 */
@@ -439,8 +439,7 @@
 	},
 	/* sil_3131/sil_3531 */
 	{
-		.sht		= &sil24_sht,
-		.host_flags	= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
+		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= 0x3f,			/* udma0-5 */
@@ -455,9 +454,9 @@
 	return tag;
 }
 
-static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
+static void sil24_dev_config(struct ata_device *dev)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = dev->ap->ioaddr.cmd_addr;
 
 	if (dev->cdb_len == 16)
 		writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
@@ -468,7 +467,7 @@
 static inline void sil24_update_tf(struct ata_port *ap)
 {
 	struct sil24_port_priv *pp = ap->private_data;
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	struct sil24_prb __iomem *prb = port;
 	u8 fis[6 * 4];
 
@@ -491,7 +490,7 @@
 
 static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
 {
-	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->ioaddr.scr_addr;
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
 		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
@@ -502,7 +501,7 @@
 
 static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
 {
-	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->ioaddr.scr_addr;
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
 		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
@@ -518,7 +517,7 @@
 
 static int sil24_init_port(struct ata_port *ap)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	u32 tmp;
 
 	writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
@@ -532,9 +531,10 @@
 	return 0;
 }
 
-static int sil24_softreset(struct ata_port *ap, unsigned int *class)
+static int sil24_softreset(struct ata_port *ap, unsigned int *class,
+			   unsigned long deadline)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	struct sil24_port_priv *pp = ap->private_data;
 	struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
 	dma_addr_t paddr = pp->cmd_block_dma;
@@ -557,14 +557,14 @@
 
 	/* do SRST */
 	prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
-	prb->fis[1] = 0; /* no PM yet */
+	prb->fis[1] = 0; /* no PMP yet */
 
 	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
 	writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
 
 	mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
 	irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
-				     100, ATA_TMOUT_BOOT / HZ * 1000);
+				     100, jiffies_to_msecs(deadline - jiffies));
 
 	writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
 	irq_stat >>= PORT_IRQ_RAW_SHIFT;
@@ -592,9 +592,10 @@
 	return -EIO;
 }
 
-static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
+static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
+			   unsigned long deadline)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	const char *reason;
 	int tout_msec, rc;
 	u32 tmp;
@@ -613,7 +614,7 @@
 	/* SStatus oscillates between zero and valid status after
 	 * DEV_RST, debounce it.
 	 */
-	rc = sata_phy_debounce(ap, sata_deb_timing_long);
+	rc = sata_phy_debounce(ap, sata_deb_timing_long, deadline);
 	if (rc) {
 		reason = "PHY debouncing failed";
 		goto err;
@@ -643,7 +644,6 @@
 				 struct sil24_sge *sge)
 {
 	struct scatterlist *sg;
-	unsigned int idx = 0;
 
 	ata_for_each_sg(sg, qc) {
 		sge->addr = cpu_to_le64(sg_dma_address(sg));
@@ -652,9 +652,7 @@
 			sge->flags = cpu_to_le32(SGE_TRM);
 		else
 			sge->flags = 0;
-
 		sge++;
-		idx++;
 	}
 }
 
@@ -711,7 +709,7 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct sil24_port_priv *pp = ap->private_data;
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	unsigned int tag = sil24_tag(qc->tag);
 	dma_addr_t paddr;
 	void __iomem *activate;
@@ -732,7 +730,7 @@
 
 static void sil24_freeze(struct ata_port *ap)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 
 	/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
 	 * PORT_IRQ_ENABLE instead.
@@ -742,7 +740,7 @@
 
 static void sil24_thaw(struct ata_port *ap)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	u32 tmp;
 
 	/* clear IRQ */
@@ -755,7 +753,7 @@
 
 static void sil24_error_intr(struct ata_port *ap)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	struct ata_eh_info *ehi = &ap->eh_info;
 	int freeze = 0;
 	u32 irq_stat;
@@ -833,7 +831,7 @@
 
 static inline void sil24_host_intr(struct ata_port *ap)
 {
-	void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
+	void __iomem *port = ap->ioaddr.cmd_addr;
 	u32 slot_stat, qc_active;
 	int rc;
 
@@ -865,15 +863,15 @@
 			slot_stat, ap->active_tag, ap->sactive);
 }
 
-static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
-	struct sil24_host_priv *hpriv = host_set->private_data;
+	struct ata_host *host = dev_instance;
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
 	unsigned handled = 0;
 	u32 status;
 	int i;
 
-	status = readl(hpriv->host_base + HOST_IRQ_STAT);
+	status = readl(host_base + HOST_IRQ_STAT);
 
 	if (status == 0xffffffff) {
 		printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
@@ -884,20 +882,20 @@
 	if (!(status & IRQ_STAT_4PORTS))
 		goto out;
 
-	spin_lock(&host_set->lock);
+	spin_lock(&host->lock);
 
-	for (i = 0; i < host_set->n_ports; i++)
+	for (i = 0; i < host->n_ports; i++)
 		if (status & (1 << i)) {
-			struct ata_port *ap = host_set->ports[i];
+			struct ata_port *ap = host->ports[i];
 			if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-				sil24_host_intr(host_set->ports[i]);
+				sil24_host_intr(host->ports[i]);
 				handled++;
 			} else
 				printk(KERN_ERR DRV_NAME
 				       ": interrupt from disabled port %d\n", i);
 		}
 
-	spin_unlock(&host_set->lock);
+	spin_unlock(&host->lock);
  out:
 	return IRQ_RETVAL(handled);
 }
@@ -920,44 +918,34 @@
 {
 	struct ata_port *ap = qc->ap;
 
-	if (qc->flags & ATA_QCFLAG_FAILED)
-		qc->err_mask |= AC_ERR_OTHER;
-
 	/* make DMA engine forget about the failed command */
-	if (qc->err_mask)
+	if (qc->flags & ATA_QCFLAG_FAILED)
 		sil24_init_port(ap);
 }
 
-static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
-{
-	const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
-
-	dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
-}
-
 static int sil24_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
+	struct device *dev = ap->host->dev;
 	struct sil24_port_priv *pp;
 	union sil24_cmd_block *cb;
 	size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
 	dma_addr_t cb_dma;
-	int rc = -ENOMEM;
+	int rc;
 
-	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 	if (!pp)
-		goto err_out;
+		return -ENOMEM;
 
 	pp->tf.command = ATA_DRDY;
 
-	cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
+	cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
 	if (!cb)
-		goto err_out_pp;
+		return -ENOMEM;
 	memset(cb, 0, cb_size);
 
 	rc = ata_pad_alloc(ap, dev);
 	if (rc)
-		goto err_out_pad;
+		return rc;
 
 	pp->cmd_block = cb;
 	pp->cmd_block_dma = cb_dma;
@@ -965,40 +953,12 @@
 	ap->private_data = pp;
 
 	return 0;
-
-err_out_pad:
-	sil24_cblk_free(pp, dev);
-err_out_pp:
-	kfree(pp);
-err_out:
-	return rc;
-}
-
-static void sil24_port_stop(struct ata_port *ap)
-{
-	struct device *dev = ap->host_set->dev;
-	struct sil24_port_priv *pp = ap->private_data;
-
-	sil24_cblk_free(pp, dev);
-	ata_pad_free(ap, dev);
-	kfree(pp);
-}
-
-static void sil24_host_stop(struct ata_host_set *host_set)
-{
-	struct sil24_host_priv *hpriv = host_set->private_data;
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
-
-	pci_iounmap(pdev, hpriv->host_base);
-	pci_iounmap(pdev, hpriv->port_base);
-	kfree(hpriv);
 }
 
-static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
-				  unsigned long host_flags,
-				  void __iomem *host_base,
-				  void __iomem *port_base)
+static void sil24_init_controller(struct ata_host *host)
 {
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+	void __iomem *port_base = host->iomap[SIL24_PORT_BAR];
 	u32 tmp;
 	int i;
 
@@ -1009,7 +969,7 @@
 	writel(0, host_base + HOST_CTRL);
 
 	/* init ports */
-	for (i = 0; i < n_ports; i++) {
+	for (i = 0; i < host->n_ports; i++) {
 		void __iomem *port = port_base + i * PORT_REGS_SIZE;
 
 		/* Initial PHY setting */
@@ -1023,12 +983,12 @@
 						PORT_CS_PORT_RST,
 						PORT_CS_PORT_RST, 10, 100);
 			if (tmp & PORT_CS_PORT_RST)
-				dev_printk(KERN_ERR, &pdev->dev,
+				dev_printk(KERN_ERR, host->dev,
 				           "failed to clear port RST\n");
 		}
 
 		/* Configure IRQ WoC */
-		if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		if (host->ports[0]->flags & SIL24_FLAG_PCIX_IRQ_WOC)
 			writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
 		else
 			writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
@@ -1045,7 +1005,8 @@
 		writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
 
 		/* Clear port multiplier enable and resume bits */
-		writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
+		writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
+		       port + PORT_CTRL_CLR);
 	}
 
 	/* Turn on interrupts */
@@ -1055,65 +1016,56 @@
 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version = 0;
-	unsigned int board_id = (unsigned int)ent->driver_data;
-	struct ata_port_info *pinfo = &sil24_port_info[board_id];
-	struct ata_probe_ent *probe_ent = NULL;
-	struct sil24_host_priv *hpriv = NULL;
-	void __iomem *host_base = NULL;
-	void __iomem *port_base = NULL;
+	struct ata_port_info pi = sil24_port_info[ent->driver_data];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	void __iomem * const *iomap;
+	struct ata_host *host;
 	int i, rc;
 	u32 tmp;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
+	rc = pcim_iomap_regions(pdev,
+				(1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
+				DRV_NAME);
 	if (rc)
-		goto out_disable;
+		return rc;
+	iomap = pcim_iomap_table(pdev);
 
-	rc = -ENOMEM;
-	/* map mmio registers */
-	host_base = pci_iomap(pdev, 0, 0);
-	if (!host_base)
-		goto out_free;
-	port_base = pci_iomap(pdev, 2, 0);
-	if (!port_base)
-		goto out_free;
-
-	/* allocate & init probe_ent and hpriv */
-	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (!probe_ent)
-		goto out_free;
-
-	hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv)
-		goto out_free;
-
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	probe_ent->sht		= pinfo->sht;
-	probe_ent->host_flags	= pinfo->host_flags;
-	probe_ent->pio_mask	= pinfo->pio_mask;
-	probe_ent->mwdma_mask	= pinfo->mwdma_mask;
-	probe_ent->udma_mask	= pinfo->udma_mask;
-	probe_ent->port_ops	= pinfo->port_ops;
-	probe_ent->n_ports	= SIL24_FLAG2NPORTS(pinfo->host_flags);
-
-	probe_ent->irq = pdev->irq;
-	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->private_data = hpriv;
+	/* apply workaround for completion IRQ loss on PCI-X errata */
+	if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
+		tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
+		if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
+			dev_printk(KERN_INFO, &pdev->dev,
+				   "Applying completion IRQ loss on PCI-X "
+				   "errata fix\n");
+		else
+			pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
+	}
 
-	hpriv->host_base = host_base;
-	hpriv->port_base = port_base;
+	/* allocate and fill host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi,
+				    SIL24_FLAG2NPORTS(ppi[0]->flags));
+	if (!host)
+		return -ENOMEM;
+	host->iomap = iomap;
 
-	/*
-	 * Configure the device
-	 */
+	for (i = 0; i < host->n_ports; i++) {
+		void __iomem *port = iomap[SIL24_PORT_BAR] + i * PORT_REGS_SIZE;
+
+		host->ports[i]->ioaddr.cmd_addr = port;
+		host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL;
+
+		ata_std_ports(&host->ports[i]->ioaddr);
+	}
+
+	/* configure and activate the device */
 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 		if (rc) {
@@ -1121,7 +1073,7 @@
 			if (rc) {
 				dev_printk(KERN_ERR, &pdev->dev,
 					   "64-bit DMA enable failed\n");
-				goto out_free;
+				return rc;
 			}
 		}
 	} else {
@@ -1129,83 +1081,48 @@
 		if (rc) {
 			dev_printk(KERN_ERR, &pdev->dev,
 				   "32-bit DMA enable failed\n");
-			goto out_free;
+			return rc;
 		}
 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
 		if (rc) {
 			dev_printk(KERN_ERR, &pdev->dev,
 				   "32-bit consistent DMA enable failed\n");
-			goto out_free;
+			return rc;
 		}
 	}
 
-	/* Apply workaround for completion IRQ loss on PCI-X errata */
-	if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
-		tmp = readl(host_base + HOST_CTRL);
-		if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
-			dev_printk(KERN_INFO, &pdev->dev,
-				   "Applying completion IRQ loss on PCI-X "
-				   "errata fix\n");
-		else
-			probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
-	}
-
-	for (i = 0; i < probe_ent->n_ports; i++) {
-		unsigned long portu =
-			(unsigned long)port_base + i * PORT_REGS_SIZE;
-
-		probe_ent->port[i].cmd_addr = portu;
-		probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
-
-		ata_std_ports(&probe_ent->port[i]);
-	}
-
-	sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
-			      host_base, port_base);
+	sil24_init_controller(host);
 
 	pci_set_master(pdev);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-
-	kfree(probe_ent);
-	return 0;
-
- out_free:
-	if (host_base)
-		pci_iounmap(pdev, host_base);
-	if (port_base)
-		pci_iounmap(pdev, port_base);
-	kfree(probe_ent);
-	kfree(hpriv);
-	pci_release_regions(pdev);
- out_disable:
-	pci_disable_device(pdev);
-	return rc;
+	return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
+				 &sil24_sht);
 }
 
+#ifdef CONFIG_PM
 static int sil24_pci_device_resume(struct pci_dev *pdev)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
-	struct sil24_host_priv *hpriv = host_set->private_data;
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+	int rc;
 
-	ata_pci_device_do_resume(pdev);
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 
 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
-		writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
+		writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
 
-	sil24_init_controller(pdev, host_set->n_ports,
-			      host_set->ports[0]->flags,
-			      hpriv->host_base, hpriv->port_base);
+	sil24_init_controller(host);
 
-	ata_host_set_resume(host_set);
+	ata_host_resume(host);
 
 	return 0;
 }
+#endif
 
 static int __init sil24_init(void)
 {
-	return pci_module_init(&sil24_pci_driver);
+	return pci_register_driver(&sil24_pci_driver);
 }
 
 static void __exit sil24_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_sil.c linux-2.6.18.x86_64.p4/drivers/ata/sata_sil.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_sil.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_sil.c	2007-06-06 10:08:00.000000000 -0400
@@ -46,9 +46,11 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_sil"
-#define DRV_VERSION	"2.0"
+#define DRV_VERSION	"2.2"
 
 enum {
+	SIL_MMIO_BAR		= 5,
+
 	/*
 	 * host flags
 	 */
@@ -56,7 +58,7 @@
 	SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
 	SIL_FLAG_MOD15WRITE	= (1 << 30),
 
-	SIL_DFL_HOST_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
 
 	/*
@@ -109,25 +111,26 @@
 };
 
 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM
 static int sil_pci_device_resume(struct pci_dev *pdev);
-static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
+#endif
+static void sil_dev_config(struct ata_device *dev);
 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-static void sil_post_set_mode (struct ata_port *ap);
-static irqreturn_t sil_interrupt(int irq, void *dev_instance,
-				 struct pt_regs *regs);
+static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed);
 static void sil_freeze(struct ata_port *ap);
 static void sil_thaw(struct ata_port *ap);
 
 
 static const struct pci_device_id sil_pci_tbl[] = {
-	{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
-	{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
-	{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
-	{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
-	{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
-	{ 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
-	{ 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
+	{ PCI_VDEVICE(CMD, 0x3112), sil_3112 },
+	{ PCI_VDEVICE(CMD, 0x0240), sil_3112 },
+	{ PCI_VDEVICE(CMD, 0x3512), sil_3512 },
+	{ PCI_VDEVICE(CMD, 0x3114), sil_3114 },
+	{ PCI_VDEVICE(ATI, 0x436e), sil_3112 },
+	{ PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
+	{ PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
+
 	{ }	/* terminate list */
 };
 
@@ -141,12 +144,8 @@
 	{ "ST330013AS",		SIL_QUIRK_MOD15WRITE },
 	{ "ST340017AS",		SIL_QUIRK_MOD15WRITE },
 	{ "ST360015AS",		SIL_QUIRK_MOD15WRITE },
-	{ "ST380013AS",		SIL_QUIRK_MOD15WRITE },
 	{ "ST380023AS",		SIL_QUIRK_MOD15WRITE },
 	{ "ST3120023AS",	SIL_QUIRK_MOD15WRITE },
-	{ "ST3160023AS",	SIL_QUIRK_MOD15WRITE },
-	{ "ST3120026AS",	SIL_QUIRK_MOD15WRITE },
-	{ "ST3200822AS",	SIL_QUIRK_MOD15WRITE },
 	{ "ST340014ASL",	SIL_QUIRK_MOD15WRITE },
 	{ "ST360014ASL",	SIL_QUIRK_MOD15WRITE },
 	{ "ST380011ASL",	SIL_QUIRK_MOD15WRITE },
@@ -161,8 +160,10 @@
 	.id_table		= sil_pci_tbl,
 	.probe			= sil_init_one,
 	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
 	.suspend		= ata_pci_device_suspend,
 	.resume			= sil_pci_device_resume,
+#endif
 };
 
 static struct scsi_host_template sil_sht = {
@@ -181,8 +182,6 @@
 	.slave_configure	= ata_scsi_slave_config,
 	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
-	.suspend		= ata_scsi_device_suspend,
-	.resume			= ata_scsi_device_resume,
 };
 
 static const struct ata_port_operations sil_ops = {
@@ -193,32 +192,30 @@
 	.check_status		= ata_check_status,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
-	.post_set_mode		= sil_post_set_mode,
+	.set_mode		= sil_set_mode,
 	.bmdma_setup            = ata_bmdma_setup,
 	.bmdma_start            = ata_bmdma_start,
 	.bmdma_stop		= ata_bmdma_stop,
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 	.freeze			= sil_freeze,
 	.thaw			= sil_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_handler		= sil_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= sil_scr_read,
 	.scr_write		= sil_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_pci_host_stop,
 };
 
 static const struct ata_port_info sil_port_info[] = {
 	/* sil_3112 */
 	{
-		.sht		= &sil_sht,
-		.host_flags	= SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= 0x3f,			/* udma0-5 */
@@ -226,8 +223,7 @@
 	},
 	/* sil_3112_no_sata_irq */
 	{
-		.sht		= &sil_sht,
-		.host_flags	= SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 				  SIL_FLAG_NO_SATA_IRQ,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
@@ -236,8 +232,7 @@
 	},
 	/* sil_3512 */
 	{
-		.sht		= &sil_sht,
-		.host_flags	= SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= 0x3f,			/* udma0-5 */
@@ -245,8 +240,7 @@
 	},
 	/* sil_3114 */
 	{
-		.sht		= &sil_sht,
-		.host_flags	= SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
 		.udma_mask	= 0x3f,			/* udma0-5 */
@@ -293,14 +287,28 @@
 	return cache_line;
 }
 
-static void sil_post_set_mode (struct ata_port *ap)
+/**
+ *	sil_set_mode		-	wrap set_mode functions
+ *	@ap: port to set up
+ *	@r_failed: returned device when we fail
+ *
+ *	Wrap the libata method for device setup as after the setup we need
+ *	to inspect the results and do some configuration work
+ */
+
+static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed)
 {
-	struct ata_host_set *host_set = ap->host_set;
+	struct ata_host *host = ap->host;
 	struct ata_device *dev;
-	void __iomem *addr =
-		host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
+	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+	void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
 	u32 tmp, dev_mode[2];
 	unsigned int i;
+	int rc;
+
+	rc = ata_do_set_mode(ap, r_failed);
+	if (rc)
+		return rc;
 
 	for (i = 0; i < 2; i++) {
 		dev = &ap->device[i];
@@ -319,11 +327,12 @@
 	tmp |= (dev_mode[1] << 4);
 	writel(tmp, addr);
 	readl(addr);	/* flush */
+	return 0;
 }
 
-static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
+static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
 {
-	unsigned long offset = ap->ioaddr.scr_addr;
+	void __iomem *offset = ap->ioaddr.scr_addr;
 
 	switch (sc_reg) {
 	case SCR_STATUS:
@@ -337,12 +346,12 @@
 		break;
 	}
 
-	return 0;
+	return NULL;
 }
 
 static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
-	void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
+	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
 	if (mmio)
 		return readl(mmio);
 	return 0xffffffffU;
@@ -350,13 +359,14 @@
 
 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
-	void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
+	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
 	if (mmio)
 		writel(val, mmio);
 }
 
 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 {
+	struct ata_eh_info *ehi = &ap->eh_info;
 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
 	u8 status;
 
@@ -383,9 +393,15 @@
 		goto freeze;
 	}
 
-	if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
+	if (unlikely(!qc))
 		goto freeze;
 
+	if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) {
+		/* this sometimes happens, just clear IRQ */
+		ata_chk_status(ap);
+		return;
+	}
+
 	/* Check whether we are expecting interrupt in this state */
 	switch (ap->hsm_task_state) {
 	case HSM_ST_FIRST:
@@ -429,6 +445,10 @@
 	/* kick HSM in the ass */
 	ata_hsm_move(ap, qc, status, 0);
 
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
+		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
+
 	return;
 
  err_hsm:
@@ -437,18 +457,17 @@
 	ata_port_freeze(ap);
 }
 
-static irqreturn_t sil_interrupt(int irq, void *dev_instance,
-				 struct pt_regs *regs)
+static irqreturn_t sil_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
-	void __iomem *mmio_base = host_set->mmio_base;
+	struct ata_host *host = dev_instance;
+	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 	int handled = 0;
 	int i;
 
-	spin_lock(&host_set->lock);
+	spin_lock(&host->lock);
 
-	for (i = 0; i < host_set->n_ports; i++) {
-		struct ata_port *ap = host_set->ports[i];
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 		u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 
 		if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
@@ -466,14 +485,14 @@
 		handled = 1;
 	}
 
-	spin_unlock(&host_set->lock);
+	spin_unlock(&host->lock);
 
 	return IRQ_RETVAL(handled);
 }
 
 static void sil_freeze(struct ata_port *ap)
 {
-	void __iomem *mmio_base = ap->host_set->mmio_base;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 	u32 tmp;
 
 	/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
@@ -488,7 +507,7 @@
 
 static void sil_thaw(struct ata_port *ap)
 {
-	void __iomem *mmio_base = ap->host_set->mmio_base;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 	u32 tmp;
 
 	/* clear IRQ */
@@ -507,7 +526,6 @@
 
 /**
  *	sil_dev_config - Apply device/host-specific errata fixups
- *	@ap: Port containing device to be examined
  *	@dev: Device to be examined
  *
  *	After the IDENTIFY [PACKET] DEVICE step is complete, and a
@@ -534,12 +552,14 @@
  *	appreciated.
  *	- But then again UDMA5 is hardly anything to complain about
  */
-static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
+static void sil_dev_config(struct ata_device *dev)
 {
+	struct ata_port *ap = dev->ap;
+	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
 	unsigned int n, quirks = 0;
-	unsigned char model_num[41];
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 
-	ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
 
 	for (n = 0; sil_blacklist[n].product; n++)
 		if (!strcmp(sil_blacklist[n].product, model_num)) {
@@ -551,25 +571,27 @@
 	if (slow_down ||
 	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
 	     (quirks & SIL_QUIRK_MOD15WRITE))) {
-		ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
-			       "(mod15write workaround)\n");
+		if (print_info)
+			ata_dev_printk(dev, KERN_INFO, "applying Seagate "
+				       "errata fix (mod15write workaround)\n");
 		dev->max_sectors = 15;
 		return;
 	}
 
 	/* limit to udma5 */
 	if (quirks & SIL_QUIRK_UDMA5MAX) {
-		ata_dev_printk(dev, KERN_INFO,
-			       "applying Maxtor errata fix %s\n", model_num);
+		if (print_info)
+			ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
+				       "errata fix %s\n", model_num);
 		dev->udma_mask &= ATA_UDMA5;
 		return;
 	}
 }
 
-static void sil_init_controller(struct pci_dev *pdev,
-				int n_ports, unsigned long host_flags,
-				void __iomem *mmio_base)
+static void sil_init_controller(struct ata_host *host)
 {
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
 	u8 cls;
 	u32 tmp;
 	int i;
@@ -579,7 +601,7 @@
 	if (cls) {
 		cls >>= 3;
 		cls++;  /* cls = (line_size/8)+1 */
-		for (i = 0; i < n_ports; i++)
+		for (i = 0; i < host->n_ports; i++)
 			writew(cls << 8 | cls,
 			       mmio_base + sil_port[i].fifo_cfg);
 	} else
@@ -587,10 +609,10 @@
 			   "cache line size not set.  Driver may not function\n");
 
 	/* Apply R_ERR on DMA activate FIS errata workaround */
-	if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
+	if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
 		int cnt;
 
-		for (i = 0, cnt = 0; i < n_ports; i++) {
+		for (i = 0, cnt = 0; i < host->n_ports; i++) {
 			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
 			if ((tmp & 0x3) != 0x01)
 				continue;
@@ -603,7 +625,7 @@
 		}
 	}
 
-	if (n_ports == 4) {
+	if (host->n_ports == 4) {
 		/* flip the magic "make 4 ports work" bit */
 		tmp = readl(mmio_base + sil_port[2].bmdma);
 		if ((tmp & SIL_INTR_STEERING) == 0)
@@ -615,106 +637,85 @@
 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	unsigned long base;
+	int board_id = ent->driver_data;
+	const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL };
+	struct ata_host *host;
 	void __iomem *mmio_base;
-	int rc;
+	int n_ports, rc;
 	unsigned int i;
-	int pci_dev_busy = 0;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	/* allocate host */
+	n_ports = 2;
+	if (board_id == sil_3114)
+		n_ports = 4;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
 
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
+		return rc;
 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
-
-	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	INIT_LIST_HEAD(&probe_ent->node);
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
-	probe_ent->sht = sil_port_info[ent->driver_data].sht;
-	probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2;
-	probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
-	probe_ent->mwdma_mask = sil_port_info[ent->driver_data].mwdma_mask;
-	probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
-       	probe_ent->irq = pdev->irq;
-       	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
-
-	mmio_base = pci_iomap(pdev, 5, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-
-	probe_ent->mmio_base = mmio_base;
-
-	base = (unsigned long) mmio_base;
-
-	for (i = 0; i < probe_ent->n_ports; i++) {
-		probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
-		probe_ent->port[i].altstatus_addr =
-		probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
-		probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
-		probe_ent->port[i].scr_addr = base + sil_port[i].scr;
-		ata_std_ports(&probe_ent->port[i]);
-	}
+		return rc;
 
-	sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
-			    mmio_base);
+	mmio_base = host->iomap[SIL_MMIO_BAR];
 
-	pci_set_master(pdev);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_ioports *ioaddr = &host->ports[i]->ioaddr;
 
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
+		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
+		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
+		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
+		ata_std_ports(ioaddr);
+	}
 
-	return 0;
+	/* initialize and activate */
+	sil_init_controller(host);
 
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
+				 &sil_sht);
 }
 
+#ifdef CONFIG_PM
 static int sil_pci_device_resume(struct pci_dev *pdev)
 {
-	struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
 
-	ata_pci_device_do_resume(pdev);
-	sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
-			    host_set->mmio_base);
-	ata_host_set_resume(host_set);
+	sil_init_controller(host);
+	ata_host_resume(host);
 
 	return 0;
 }
+#endif
 
 static int __init sil_init(void)
 {
-	return pci_module_init(&sil_pci_driver);
+	return pci_register_driver(&sil_pci_driver);
 }
 
 static void __exit sil_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_sis.c linux-2.6.18.x86_64.p4/drivers/ata/sata_sis.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_sis.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_sis.c	2007-06-06 10:08:00.000000000 -0400
@@ -40,9 +40,10 @@
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
+#include "sis.h"
 
 #define DRV_NAME	"sata_sis"
-#define DRV_VERSION	"0.6"
+#define DRV_VERSION	"0.8"
 
 enum {
 	sis_180			= 0,
@@ -67,13 +68,16 @@
 static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 static const struct pci_device_id sis_pci_tbl[] = {
-	{ PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
-	{ PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
-	{ PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 },
+	{ PCI_VDEVICE(SI, 0x0180), sis_180 },		/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0181), sis_180 },		/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0182), sis_180 },		/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x0183), sis_180 },		/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x1182), sis_180 },		/* SiS 966/966L */
+	{ PCI_VDEVICE(SI, 0x1183), sis_180 },		/* SiS 966/966L */
+
 	{ }	/* terminate list */
 };
 
-
 static struct pci_driver sis_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= sis_pci_tbl,
@@ -112,54 +116,63 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= sis_scr_read,
 	.scr_write		= sis_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
 };
 
-static struct ata_port_info sis_port_info = {
-	.sht		= &sis_sht,
-	.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+static const struct ata_port_info sis_port_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0x7,
 	.udma_mask	= 0x7f,
 	.port_ops	= &sis_ops,
 };
 
-
 MODULE_AUTHOR("Uwe Koziolek");
 MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device)
+static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
 {
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
+	u8 pmr;
 
-	if (port_no)  {
-		if (device == 0x182)
-			addr += SIS182_SATA1_OFS;
-		else
-			addr += SIS180_SATA1_OFS;
+	if (ap->port_no)  {
+		switch (pdev->device) {
+			case 0x0180:
+			case 0x0181:
+				pci_read_config_byte(pdev, SIS_PMR, &pmr);
+				if ((pmr & SIS_PMR_COMBINED) == 0)
+					addr += SIS180_SATA1_OFS;
+				break;
+
+			case 0x0182:
+			case 0x0183:
+			case 0x1182:
+			case 0x1183:
+				addr += SIS182_SATA1_OFS;
+				break;
+		}
 	}
-
 	return addr;
 }
 
 static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
-	unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
 	u32 val, val2 = 0;
 	u8 pmr;
 
@@ -170,32 +183,34 @@
 
 	pci_read_config_dword(pdev, cfg_addr, &val);
 
-	if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
+	if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
+	    (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
 		pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
 
-	return val|val2;
+	return (val|val2) &  0xfffffffb; /* avoid problems with powerdowned ports */
 }
 
-static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
+static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
-	unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
 	u8 pmr;
 
-	if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */
+	if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
 		return;
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
 
 	pci_write_config_dword(pdev, cfg_addr, val);
 
-	if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
+	if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
+	    (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
 		pci_write_config_dword(pdev, cfg_addr+0x10, val);
 }
 
 static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u32 val, val2 = 0;
 	u8 pmr;
 
@@ -207,17 +222,18 @@
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
 
-	val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
+	val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
 
-	if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
-		val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
+	if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
+	    (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
+		val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
 
-	return val | val2;
+	return (val | val2) &  0xfffffffb;
 }
 
 static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u8 pmr;
 
 	if (sc_reg > SCR_CONTROL)
@@ -228,113 +244,122 @@
 	if (ap->flags & SIS_FLAG_CFGSCR)
 		sis_scr_cfg_write(ap, sc_reg, val);
 	else {
-		outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
-		if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED))
-			outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
+		iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+		if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
+		    (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
+			iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
 	}
 }
 
 static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	int rc;
-	u32 genctl;
-	struct ata_port_info *ppi;
-	int pci_dev_busy = 0;
+	struct ata_port_info pi = sis_port_info;
+	const struct ata_port_info *ppi[] = { &pi, &pi };
+	struct ata_host *host;
+	u32 genctl, val;
 	u8 pmr;
-	u8 port2_start;
+	u8 port2_start = 0x20;
+	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
-
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-
-	ppi = &sis_port_info;
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
-	if (!probe_ent) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
 	/* check and see if the SCRs are in IO space or PCI cfg space */
 	pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
 	if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
-		probe_ent->host_flags |= SIS_FLAG_CFGSCR;
+		pi.flags |= SIS_FLAG_CFGSCR;
 
 	/* if hardware thinks SCRs are in IO space, but there are
 	 * no IO resources assigned, change to PCI cfg space.
 	 */
-	if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
+	if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
 	    ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
 	     (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
 		genctl &= ~GENCTL_IOMAPPED_SCR;
 		pci_write_config_dword(pdev, SIS_GENCTL, genctl);
-		probe_ent->host_flags |= SIS_FLAG_CFGSCR;
+		pi.flags |= SIS_FLAG_CFGSCR;
 	}
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
-	if (ent->device != 0x182) {
+	switch (ent->device) {
+	case 0x0180:
+	case 0x0181:
+
+		/* The PATA-handling is provided by pata_sis */
+		switch (pmr & 0x30) {
+		case 0x10:
+			ppi[1] = &sis_info133;
+			break;
+
+		case 0x30:
+			ppi[0] = &sis_info133;
+			break;
+		}
 		if ((pmr & SIS_PMR_COMBINED) == 0) {
 			dev_printk(KERN_INFO, &pdev->dev,
-				   "Detected SiS 180/181 chipset in SATA mode\n");
+				   "Detected SiS 180/181/964 chipset in SATA mode\n");
 			port2_start = 64;
-		}
-		else {
+		} else {
 			dev_printk(KERN_INFO, &pdev->dev,
 				   "Detected SiS 180/181 chipset in combined mode\n");
 			port2_start=0;
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
 		}
-	}
-	else {
-		dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182 chipset\n");
-		port2_start = 0x20;
+		break;
+
+	case 0x0182:
+	case 0x0183:
+		pci_read_config_dword ( pdev, 0x6C, &val);
+		if (val & (1L << 31)) {
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
+		} else {
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
+		}
+		break;
+
+	case 0x1182:
+	case 0x1183:
+		pci_read_config_dword(pdev, 0x64, &val);
+		if (val & 0x10000000) {
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/1183/966L SATA controller\n");
+		} else {
+			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/1183/966 SATA controller\n");
+			pi.flags |= ATA_FLAG_SLAVE_POSS;
+		}
+		break;
 	}
 
-	if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
-		probe_ent->port[0].scr_addr =
-			pci_resource_start(pdev, SIS_SCR_PCI_BAR);
-		probe_ent->port[1].scr_addr =
-			pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
+	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+
+	if (!(pi.flags & SIS_FLAG_CFGSCR)) {
+		void __iomem *mmio;
+
+		rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
+		if (rc)
+			return rc;
+		mmio = host->iomap[SIS_SCR_PCI_BAR];
+
+		host->ports[0]->ioaddr.scr_addr = mmio;
+		host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
 	}
 
 	pci_set_master(pdev);
 	pci_intx(pdev, 1);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_regions:
-	pci_release_regions(pdev);
-
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
-
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &sis_sht);
 }
 
 static int __init sis_init(void)
 {
-	return pci_module_init(&sis_pci_driver);
+	return pci_register_driver(&sis_pci_driver);
 }
 
 static void __exit sis_exit(void)
@@ -344,4 +369,3 @@
 
 module_init(sis_init);
 module_exit(sis_exit);
-
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_svw.c linux-2.6.18.x86_64.p4/drivers/ata/sata_svw.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_svw.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_svw.c	2007-06-06 10:08:00.000000000 -0400
@@ -53,9 +53,13 @@
 #endif /* CONFIG_PPC_OF */
 
 #define DRV_NAME	"sata_svw"
-#define DRV_VERSION	"2.0"
+#define DRV_VERSION	"2.2"
 
 enum {
+	/* ap->flags bits */
+	K2_FLAG_SATA_8_PORTS		= (1 << 24),
+	K2_FLAG_NO_ATAPI_DMA		= (1 << 25),
+
 	/* Taskfile registers offsets */
 	K2_SATA_TF_CMD_OFFSET		= 0x00,
 	K2_SATA_TF_DATA_OFFSET		= 0x00,
@@ -83,16 +87,27 @@
 
 	/* Port stride */
 	K2_SATA_PORT_OFFSET		= 0x100,
+
+	board_svw4			= 0,
+	board_svw8			= 1,
 };
 
 static u8 k2_stat_check_status(struct ata_port *ap);
 
 
+static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
+		return -1;	/* ATAPI DMA not supported */
+
+	return 0;
+}
+
 static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
 	if (sc_reg > SCR_CONTROL)
 		return 0xffffffffU;
-	return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -101,7 +116,7 @@
 {
 	if (sc_reg > SCR_CONTROL)
 		return;
-	writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -116,11 +131,16 @@
 		ata_wait_idle(ap);
 	}
 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-		writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
-		writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
-		writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
-		writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
-		writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
+		writew(tf->feature | (((u16)tf->hob_feature) << 8),
+		       ioaddr->feature_addr);
+		writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+		       ioaddr->nsect_addr);
+		writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+		       ioaddr->lbal_addr);
+		writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+		       ioaddr->lbam_addr);
+		writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+		       ioaddr->lbah_addr);
 	} else if (is_addr) {
 		writew(tf->feature, ioaddr->feature_addr);
 		writew(tf->nsect, ioaddr->nsect_addr);
@@ -169,7 +189,7 @@
  *	@qc: Info associated with this ATA transaction.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@@ -177,7 +197,7 @@
 	struct ata_port *ap = qc->ap;
 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 	u8 dmactl;
-	void *mmio = (void *) ap->ioaddr.bmdma_addr;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 	/* load PRD table addr. */
 	mb();	/* make sure PRD table writes are visible to controller */
 	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
@@ -199,13 +219,13 @@
  *	@qc: Info associated with this ATA transaction.
  *
  *	LOCKING:
- *	spin_lock_irqsave(host_set lock)
+ *	spin_lock_irqsave(host lock)
  */
 
 static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void *mmio = (void *) ap->ioaddr.bmdma_addr;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 	u8 dmactl;
 
 	/* start host DMA transaction */
@@ -233,7 +253,7 @@
 
 static u8 k2_stat_check_status(struct ata_port *ap)
 {
-       	return readl((void *) ap->ioaddr.status_addr);
+       	return readl((void __iomem *) ap->ioaddr.status_addr);
 }
 
 #ifdef CONFIG_PPC_OF
@@ -261,12 +281,12 @@
 		return 0;
 
 	/* Find the OF node for the PCI device proper */
-	np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
+	np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
 	if (np == NULL)
 		return 0;
 
 	/* Match it to a port node */
-	index = (ap == ap->host_set->ports[0]) ? 0 : 1;
+	index = (ap == ap->host->ports[0]) ? 0 : 1;
 	for (np = np->child; np != NULL; np = np->sibling) {
 		u32 *reg = (u32 *)get_property(np, "reg", NULL);
 		if (!reg)
@@ -313,27 +333,49 @@
 	.check_status		= k2_stat_check_status,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
+	.check_atapi_dma	= k2_sata_check_atapi_dma,
 	.bmdma_setup		= k2_bmdma_setup_mmio,
 	.bmdma_start		= k2_bmdma_start_mmio,
 	.bmdma_stop		= ata_bmdma_stop,
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= k2_sata_scr_read,
 	.scr_write		= k2_sata_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_pci_host_stop,
 };
 
-static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
+static const struct ata_port_info k2_port_info[] = {
+	/* board_svw4 */
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask	= 0x7f,
+		.port_ops	= &k2_sata_ops,
+	},
+	/* board_svw8 */
+	{
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
+				  K2_FLAG_SATA_8_PORTS,
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask	= 0x7f,
+		.port_ops	= &k2_sata_ops,
+	},
+};
+
+static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
 {
 	port->cmd_addr		= base + K2_SATA_TF_CMD_OFFSET;
 	port->data_addr		= base + K2_SATA_TF_DATA_OFFSET;
@@ -356,23 +398,32 @@
 static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	unsigned long base;
+	const struct ata_port_info *ppi[] =
+		{ &k2_port_info[ent->driver_data], NULL };
+	struct ata_host *host;
 	void __iomem *mmio_base;
-	int pci_dev_busy = 0;
-	int rc;
-	int i;
+	int n_ports, i, rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
+	/* allocate host */
+	n_ports = 4;
+	if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS)
+		n_ports = 8;
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
 	/*
 	 * If this driver happens to only be useful on Apple's K2, then
 	 * we should check that here as it has a normal Serverworks ID
 	 */
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
+
 	/*
 	 * Check if we have resources mapped at all (second function may
 	 * have been disabled by firmware)
@@ -380,36 +431,28 @@
 	if (pci_resource_len(pdev, 5) == 0)
 		return -ENODEV;
 
-	/* Request PCI regions */
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	/* Request and iomap PCI regions */
+	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	mmio_base = host->iomap[5];
+
+	/* different controllers have different number of ports - currently 4 or 8 */
+	/* All ports are on the same function. Multi-function device is no
+	 * longer available. This should not be seen in any system. */
+	for (i = 0; i < host->n_ports; i++)
+		k2_sata_setup_port(&host->ports[i]->ioaddr,
+				   mmio_base + i * K2_SATA_PORT_OFFSET);
 
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
+		return rc;
 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
-
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	mmio_base = pci_iomap(pdev, 5, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-	base = (unsigned long) mmio_base;
+		return rc;
 
 	/* Clear a magic bit in SCR1 according to Darwin, those help
 	 * some funky seagate drives (though so far, those were already
@@ -422,44 +465,9 @@
 	writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
 	writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
 
-	probe_ent->sht = &k2_sata_sht;
-	probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				ATA_FLAG_MMIO;
-	probe_ent->port_ops = &k2_sata_ops;
-	probe_ent->n_ports = 4;
-	probe_ent->irq = pdev->irq;
-	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-
-	/* We don't care much about the PIO/UDMA masks, but the core won't like us
-	 * if we don't fill these
-	 */
-	probe_ent->pio_mask = 0x1f;
-	probe_ent->mwdma_mask = 0x7;
-	probe_ent->udma_mask = 0x7f;
-
-	/* different controllers have different number of ports - currently 4 or 8 */
-	/* All ports are on the same function. Multi-function device is no
-	 * longer available. This should not be seen in any system. */
-	for (i = 0; i < ent->driver_data; i++)
-		k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
-
 	pci_set_master(pdev);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &k2_sata_sht);
 }
 
 /* 0x240 is device ID for Apple K2 device
@@ -469,15 +477,15 @@
  * controller
  * */
 static const struct pci_device_id k2_sata_pci_tbl[] = {
-	{ 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
-	{ 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
-	{ 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
-	{ 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
-	{ 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 },
+	{ PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 },
+
 	{ }
 };
 
-
 static struct pci_driver k2_sata_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= k2_sata_pci_tbl,
@@ -485,19 +493,16 @@
 	.remove			= ata_pci_remove_one,
 };
 
-
 static int __init k2_sata_init(void)
 {
-	return pci_module_init(&k2_sata_pci_driver);
+	return pci_register_driver(&k2_sata_pci_driver);
 }
 
-
 static void __exit k2_sata_exit(void)
 {
 	pci_unregister_driver(&k2_sata_pci_driver);
 }
 
-
 MODULE_AUTHOR("Benjamin Herrenschmidt");
 MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
 MODULE_LICENSE("GPL");
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_sx4.c linux-2.6.18.x86_64.p4/drivers/ata/sata_sx4.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_sx4.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_sx4.c	2007-06-06 10:08:00.000000000 -0400
@@ -37,19 +37,20 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 #include "sata_promise.h"
 
 #define DRV_NAME	"sata_sx4"
-#define DRV_VERSION	"0.9"
+#define DRV_VERSION	"0.11"
 
 
 enum {
+	PDC_MMIO_BAR		= 3,
+	PDC_DIMM_BAR		= 4,
+
 	PDC_PRD_TBL		= 0x44,	/* Direct command DMA table addr */
 
 	PDC_PKT_SUBMIT		= 0x40, /* Command packet pointer addr */
@@ -138,8 +139,6 @@
 };
 
 struct pdc_host_priv {
-	void			__iomem *dimm_mmio;
-
 	unsigned int		doing_hdma;
 	unsigned int		hdma_prod;
 	unsigned int		hdma_cons;
@@ -152,26 +151,23 @@
 
 
 static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
 static void pdc_eng_timeout(struct ata_port *ap);
 static void pdc_20621_phy_reset (struct ata_port *ap);
 static int pdc_port_start(struct ata_port *ap);
-static void pdc_port_stop(struct ata_port *ap);
 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
-static void pdc20621_host_stop(struct ata_host_set *host_set);
-static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
-static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
-static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
+static unsigned int pdc20621_dimm_init(struct ata_host *host);
+static int pdc20621_detect_dimm(struct ata_host *host);
+static unsigned int pdc20621_i2c_read(struct ata_host *host,
 				      u32 device, u32 subaddr, u32 *pdata);
-static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
-static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
+static int pdc20621_prog_dimm0(struct ata_host *host);
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
 #ifdef ATA_VERBOSE_DEBUG
-static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
+static void pdc20621_get_from_dimm(struct ata_host *host,
 				   void *psource, u32 offset, u32 size);
 #endif
-static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
+static void pdc20621_put_to_dimm(struct ata_host *host,
 				 void *psource, u32 offset, u32 size);
 static void pdc20621_irq_clear(struct ata_port *ap);
 static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -205,20 +201,18 @@
 	.phy_reset		= pdc_20621_phy_reset,
 	.qc_prep		= pdc20621_qc_prep,
 	.qc_issue		= pdc20621_qc_issue_prot,
-	.data_xfer		= ata_mmio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 	.eng_timeout		= pdc_eng_timeout,
-	.irq_handler		= pdc20621_interrupt,
 	.irq_clear		= pdc20621_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.port_start		= pdc_port_start,
-	.port_stop		= pdc_port_stop,
-	.host_stop		= pdc20621_host_stop,
 };
 
 static const struct ata_port_info pdc_port_info[] = {
 	/* board_20621 */
 	{
-		.sht		= &pdc_sata_sht,
-		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_SRST | ATA_FLAG_MMIO |
 				  ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
 		.pio_mask	= 0x1f, /* pio0-4 */
@@ -230,12 +224,11 @@
 };
 
 static const struct pci_device_id pdc_sata_pci_tbl[] = {
-	{ PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-	  board_20621 },
+	{ PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
+
 	{ }	/* terminate list */
 };
 
-
 static struct pci_driver pdc_sata_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= pdc_sata_pci_tbl,
@@ -244,21 +237,9 @@
 };
 
 
-static void pdc20621_host_stop(struct ata_host_set *host_set)
-{
-	struct pci_dev *pdev = to_pci_dev(host_set->dev);
-	struct pdc_host_priv *hpriv = host_set->private_data;
-	void __iomem *dimm_mmio = hpriv->dimm_mmio;
-
-	pci_iounmap(pdev, dimm_mmio);
-	kfree(hpriv);
-
-	pci_iounmap(pdev, host_set->mmio_base);
-}
-
 static int pdc_port_start(struct ata_port *ap)
 {
-	struct device *dev = ap->host_set->dev;
+	struct device *dev = ap->host->dev;
 	struct pdc_port_priv *pp;
 	int rc;
 
@@ -266,43 +247,19 @@
 	if (rc)
 		return rc;
 
-	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
-	if (!pp) {
-		rc = -ENOMEM;
-		goto err_out;
-	}
-	memset(pp, 0, sizeof(*pp));
-
-	pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
-	if (!pp->pkt) {
-		rc = -ENOMEM;
-		goto err_out_kfree;
-	}
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+	if (!pp->pkt)
+		return -ENOMEM;
 
 	ap->private_data = pp;
 
 	return 0;
-
-err_out_kfree:
-	kfree(pp);
-err_out:
-	ata_port_stop(ap);
-	return rc;
 }
 
-
-static void pdc_port_stop(struct ata_port *ap)
-{
-	struct device *dev = ap->host_set->dev;
-	struct pdc_port_priv *pp = ap->private_data;
-
-	ap->private_data = NULL;
-	dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
-	kfree(pp);
-	ata_port_stop(ap);
-}
-
-
 static void pdc_20621_phy_reset (struct ata_port *ap)
 {
 	VPRINTK("ENTER\n");
@@ -453,16 +410,15 @@
 	struct scatterlist *sg;
 	struct ata_port *ap = qc->ap;
 	struct pdc_port_priv *pp = ap->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	struct pdc_host_priv *hpriv = ap->host_set->private_data;
-	void __iomem *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 	unsigned int portno = ap->port_no;
 	unsigned int i, idx, total_len = 0, sgt_len;
 	u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
 
 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
 
-	VPRINTK("ata%u: ENTER\n", ap->id);
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -514,13 +470,12 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct pdc_port_priv *pp = ap->private_data;
-	void __iomem *mmio = ap->host_set->mmio_base;
-	struct pdc_host_priv *hpriv = ap->host_set->private_data;
-	void __iomem *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 	unsigned int portno = ap->port_no;
 	unsigned int i;
 
-	VPRINTK("ata%u: ENTER\n", ap->id);
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -565,8 +520,8 @@
 				 u32 pkt_ofs)
 {
 	struct ata_port *ap = qc->ap;
-	struct ata_host_set *host_set = ap->host_set;
-	void __iomem *mmio = host_set->mmio_base;
+	struct ata_host *host = ap->host;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -583,7 +538,7 @@
 				u32 pkt_ofs)
 {
 	struct ata_port *ap = qc->ap;
-	struct pdc_host_priv *pp = ap->host_set->private_data;
+	struct pdc_host_priv *pp = ap->host->private_data;
 	unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
 
 	if (!pp->doing_hdma) {
@@ -601,7 +556,7 @@
 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	struct pdc_host_priv *pp = ap->host_set->private_data;
+	struct pdc_host_priv *pp = ap->host->private_data;
 	unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
 
 	/* if nothing on queue, we're done */
@@ -620,8 +575,7 @@
 {
 	struct ata_port *ap = qc->ap;
 	unsigned int port_no = ap->port_no;
-	struct pdc_host_priv *hpriv = ap->host_set->private_data;
-	void *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
 
 	dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
 	dimm_mmio += PDC_DIMM_HOST_PKT;
@@ -638,9 +592,9 @@
 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	struct ata_host_set *host_set = ap->host_set;
+	struct ata_host *host = ap->host;
 	unsigned int port_no = ap->port_no;
-	void __iomem *mmio = host_set->mmio_base;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 	u8 seq = (u8) (port_no + 1);
 	unsigned int port_ofs;
@@ -648,7 +602,7 @@
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
 
-	VPRINTK("ata%u: ENTER\n", ap->id);
+	VPRINTK("ata%u: ENTER\n", ap->print_id);
 
 	wmb();			/* flush PRD, pkt writes */
 
@@ -669,8 +623,8 @@
 		readl(mmio + PDC_20621_SEQCTL + (seq * 4));	/* flush */
 
 		writel(port_ofs + PDC_DIMM_ATA_PKT,
-		       (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-		readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+		       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+		readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 		VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
 			port_ofs + PDC_DIMM_ATA_PKT,
 			port_ofs + PDC_DIMM_ATA_PKT,
@@ -715,7 +669,7 @@
 
 		/* step two - DMA from DIMM to host */
 		if (doing_hdma) {
-			VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id,
+			VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
 				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 			/* get drive status; clear intr; complete txn */
 			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -726,7 +680,7 @@
 		/* step one - exec ATA command */
 		else {
 			u8 seq = (u8) (port_no + 1 + 4);
-			VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id,
+			VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
 				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 
 			/* submit hdma pkt */
@@ -741,20 +695,20 @@
 		/* step one - DMA from host to DIMM */
 		if (doing_hdma) {
 			u8 seq = (u8) (port_no + 1);
-			VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id,
+			VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
 				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 
 			/* submit ata pkt */
 			writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
 			readl(mmio + PDC_20621_SEQCTL + (seq * 4));
 			writel(port_ofs + PDC_DIMM_ATA_PKT,
-			       (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
-			readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+			       ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+			readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
 		}
 
 		/* step two - execute ATA command */
 		else {
-			VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id,
+			VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
 				readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
 			/* get drive status; clear intr; complete txn */
 			qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -781,17 +735,17 @@
 
 static void pdc20621_irq_clear(struct ata_port *ap)
 {
-	struct ata_host_set *host_set = ap->host_set;
-	void __iomem *mmio = host_set->mmio_base;
+	struct ata_host *host = ap->host;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	mmio += PDC_CHIP0_OFS;
 
 	readl(mmio + PDC_20621_SEQMASK);
 }
 
-static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	struct ata_port *ap;
 	u32 mask = 0;
 	unsigned int i, tmp, port_no;
@@ -800,12 +754,12 @@
 
 	VPRINTK("ENTER\n");
 
-	if (!host_set || !host_set->mmio_base) {
+	if (!host || !host->iomap[PDC_MMIO_BAR]) {
 		VPRINTK("QUICK EXIT\n");
 		return IRQ_NONE;
 	}
 
-	mmio_base = host_set->mmio_base;
+	mmio_base = host->iomap[PDC_MMIO_BAR];
 
 	/* reading should also clear interrupts */
 	mmio_base += PDC_CHIP0_OFS;
@@ -822,16 +776,16 @@
 		return IRQ_NONE;
 	}
 
-        spin_lock(&host_set->lock);
+        spin_lock(&host->lock);
 
         for (i = 1; i < 9; i++) {
 		port_no = i - 1;
 		if (port_no > 3)
 			port_no -= 4;
-		if (port_no >= host_set->n_ports)
+		if (port_no >= host->n_ports)
 			ap = NULL;
 		else
-			ap = host_set->ports[port_no];
+			ap = host->ports[port_no];
 		tmp = mask & (1 << i);
 		VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
 		if (tmp && ap &&
@@ -845,7 +799,7 @@
 		}
 	}
 
-        spin_unlock(&host_set->lock);
+        spin_unlock(&host->lock);
 
 	VPRINTK("mask == 0x%x\n", mask);
 
@@ -857,13 +811,13 @@
 static void pdc_eng_timeout(struct ata_port *ap)
 {
 	u8 drv_stat;
-	struct ata_host_set *host_set = ap->host_set;
+	struct ata_host *host = ap->host;
 	struct ata_queued_cmd *qc;
 	unsigned long flags;
 
 	DPRINTK("ENTER\n");
 
-	spin_lock_irqsave(&host_set->lock, flags);
+	spin_lock_irqsave(&host->lock, flags);
 
 	qc = ata_qc_from_tag(ap, ap->active_tag);
 
@@ -885,7 +839,7 @@
 		break;
 	}
 
-	spin_unlock_irqrestore(&host_set->lock, flags);
+	spin_unlock_irqrestore(&host->lock, flags);
 	ata_eh_qc_complete(qc);
 	DPRINTK("EXIT\n");
 }
@@ -906,7 +860,7 @@
 }
 
 
-static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
+static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
 {
 	port->cmd_addr		= base;
 	port->data_addr		= base;
@@ -925,16 +879,15 @@
 
 
 #ifdef ATA_VERBOSE_DEBUG
-static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
+static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
 				   u32 offset, u32 size)
 {
 	u32 window_size;
 	u16 idx;
 	u8 page_mask;
 	long dist;
-	void __iomem *mmio = pe->mmio_base;
-	struct pdc_host_priv *hpriv = pe->private_data;
-	void __iomem *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -981,16 +934,15 @@
 #endif
 
 
-static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
+static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
 				 u32 offset, u32 size)
 {
 	u32 window_size;
 	u16 idx;
 	u8 page_mask;
 	long dist;
-	void __iomem *mmio = pe->mmio_base;
-	struct pdc_host_priv *hpriv = pe->private_data;
-	void __iomem *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+	void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -1032,10 +984,10 @@
 }
 
 
-static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
+static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
 				      u32 subaddr, u32 *pdata)
 {
-	void __iomem *mmio = pe->mmio_base;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 	u32 i2creg  = 0;
 	u32 status;
 	u32 count =0;
@@ -1068,17 +1020,17 @@
 }
 
 
-static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
+static int pdc20621_detect_dimm(struct ata_host *host)
 {
 	u32 data=0 ;
-  	if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
    		if (data == 100)
 			return 100;
   	} else
 		return 0;
 
-   	if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
+	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
 		if(data <= 0x75)
 			return 133;
    	} else
@@ -1088,13 +1040,13 @@
 }
 
 
-static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
+static int pdc20621_prog_dimm0(struct ata_host *host)
 {
 	u32 spd0[50];
 	u32 data = 0;
    	int size, i;
    	u8 bdimmsize;
-   	void __iomem *mmio = pe->mmio_base;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 	static const struct {
 		unsigned int reg;
 		unsigned int ofs;
@@ -1117,7 +1069,7 @@
 	mmio += PDC_CHIP0_OFS;
 
 	for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
-		pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 				  pdc_i2c_read_data[i].reg,
 				  &spd0[pdc_i2c_read_data[i].ofs]);
 
@@ -1153,11 +1105,11 @@
 }
 
 
-static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
 {
 	u32 data, spd0;
-   	int error, i;
-   	void __iomem *mmio = pe->mmio_base;
+	int error, i;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
    	mmio += PDC_CHIP0_OFS;
@@ -1174,7 +1126,7 @@
 	readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
 
 	/* Turn on for ECC */
-	pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+	pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 			  PDC_DIMM_SPD_TYPE, &spd0);
 	if (spd0 == 0x02) {
 		data |= (0x01 << 16);
@@ -1201,7 +1153,7 @@
 }
 
 
-static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
+static unsigned int pdc20621_dimm_init(struct ata_host *host)
 {
 	int speed, size, length;
 	u32 addr,spd0,pci_status;
@@ -1211,7 +1163,7 @@
 	u32 ticks=0;
 	u32 clock=0;
 	u32 fparam=0;
-   	void __iomem *mmio = pe->mmio_base;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
    	mmio += PDC_CHIP0_OFS;
@@ -1270,18 +1222,18 @@
 	   Read SPD of DIMM by I2C interface,
 	   and program the DIMM Module Controller.
 	*/
- 	if (!(speed = pdc20621_detect_dimm(pe))) {
+	if (!(speed = pdc20621_detect_dimm(host))) {
 		printk(KERN_ERR "Detect Local DIMM Fail\n");
 		return 1;	/* DIMM error */
    	}
    	VPRINTK("Local DIMM Speed = %d\n", speed);
 
    	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
-   	size = pdc20621_prog_dimm0(pe);
+	size = pdc20621_prog_dimm0(host);
    	VPRINTK("Local DIMM Size = %dMB\n",size);
 
    	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
-   	if (pdc20621_prog_dimm_global(pe)) {
+	if (pdc20621_prog_dimm_global(host)) {
 		printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
 		return 1;
    	}
@@ -1294,20 +1246,20 @@
   				'9','8','0','3','1','6','1','2',0,0};
 		u8 test_parttern2[40] = {0};
 
-		pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40);
-		pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x10040, 40);
+		pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x40, 40);
 
-		pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
-		pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x10040, 40);
+		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
-		pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
+		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x10040,
 				       40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
 
-		pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
-		pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x40, 40);
+		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
 	}
@@ -1315,14 +1267,14 @@
 
 	/* ECC initiliazation. */
 
-	pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+	pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 			  PDC_DIMM_SPD_TYPE, &spd0);
 	if (spd0 == 0x02) {
 		VPRINTK("Start ECC initialization\n");
 		addr = 0;
 		length = size * 1024 * 1024;
 		while (addr < length) {
-			pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
+			pdc20621_put_to_dimm(host, (void *) &tmp, addr,
 					     sizeof(u32));
 			addr += sizeof(u32);
 		}
@@ -1332,10 +1284,10 @@
 }
 
 
-static void pdc_20621_init(struct ata_probe_ent *pe)
+static void pdc_20621_init(struct ata_host *host)
 {
 	u32 tmp;
-	void __iomem *mmio = pe->mmio_base;
+	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -1366,123 +1318,64 @@
 static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	unsigned long base;
-	void __iomem *mmio_base;
-	void __iomem *dimm_mmio = NULL;
-	struct pdc_host_priv *hpriv = NULL;
-	unsigned int board_idx = (unsigned int) ent->driver_data;
-	int pci_dev_busy = 0;
+	const struct ata_port_info *ppi[] =
+		{ &pdc_port_info[ent->driver_data], NULL };
+	struct ata_host *host;
+	void __iomem *base;
+	struct pdc_host_priv *hpriv;
 	int rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	/* allocate host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!host || !hpriv)
+		return -ENOMEM;
+
+	host->private_data = hpriv;
+
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
+				DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+
+	base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
+	pdc_sata_setup_port(&host->ports[0]->ioaddr, base + 0x200);
+	pdc_sata_setup_port(&host->ports[1]->ioaddr, base + 0x280);
+	pdc_sata_setup_port(&host->ports[2]->ioaddr, base + 0x300);
+	pdc_sata_setup_port(&host->ports[3]->ioaddr, base + 0x380);
 
+	/* configure and activate */
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
+		return rc;
 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
-		goto err_out_regions;
-
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	mmio_base = pci_iomap(pdev, 3, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-	base = (unsigned long) mmio_base;
-
-	hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv) {
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
-	memset(hpriv, 0, sizeof(*hpriv));
-
-	dimm_mmio = pci_iomap(pdev, 4, 0);
-	if (!dimm_mmio) {
-		kfree(hpriv);
-		rc = -ENOMEM;
-		goto err_out_iounmap;
-	}
-
-	hpriv->dimm_mmio = dimm_mmio;
+		return rc;
 
-	probe_ent->sht		= pdc_port_info[board_idx].sht;
-	probe_ent->host_flags	= pdc_port_info[board_idx].host_flags;
-	probe_ent->pio_mask	= pdc_port_info[board_idx].pio_mask;
-	probe_ent->mwdma_mask	= pdc_port_info[board_idx].mwdma_mask;
-	probe_ent->udma_mask	= pdc_port_info[board_idx].udma_mask;
-	probe_ent->port_ops	= pdc_port_info[board_idx].port_ops;
-
-       	probe_ent->irq = pdev->irq;
-       	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-
-	probe_ent->private_data = hpriv;
-	base += PDC_CHIP0_OFS;
-
-	probe_ent->n_ports = 4;
-	pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
-	pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
-	pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
-	pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
+	if (pdc20621_dimm_init(host))
+		return -ENOMEM;
+	pdc_20621_init(host);
 
 	pci_set_master(pdev);
-
-	/* initialize adapter */
-	/* initialize local dimm */
-	if (pdc20621_dimm_init(probe_ent)) {
-		rc = -ENOMEM;
-		goto err_out_iounmap_dimm;
-	}
-	pdc_20621_init(probe_ent);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_iounmap_dimm:		/* only get to this label if 20621 */
-	kfree(hpriv);
-	pci_iounmap(pdev, dimm_mmio);
-err_out_iounmap:
-	pci_iounmap(pdev, mmio_base);
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
+				 IRQF_SHARED, &pdc_sata_sht);
 }
 
 
 static int __init pdc_sata_init(void)
 {
-	return pci_module_init(&pdc_sata_pci_driver);
+	return pci_register_driver(&pdc_sata_pci_driver);
 }
 
 
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_uli.c linux-2.6.18.x86_64.p4/drivers/ata/sata_uli.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_uli.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_uli.c	2007-06-06 10:08:00.000000000 -0400
@@ -36,7 +36,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_uli"
-#define DRV_VERSION	"1.0"
+#define DRV_VERSION	"1.2"
 
 enum {
 	uli_5289		= 0,
@@ -61,13 +61,13 @@
 static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 static const struct pci_device_id uli_pci_tbl[] = {
-	{ PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 },
-	{ PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 },
-	{ PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 },
+	{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
+	{ PCI_VDEVICE(AL, 0x5287), uli_5287 },
+	{ PCI_VDEVICE(AL, 0x5281), uli_5281 },
+
 	{ }	/* terminate list */
 };
 
-
 static struct pci_driver uli_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= uli_pci_tbl,
@@ -108,27 +108,26 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 
-	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= uli_scr_read,
 	.scr_write		= uli_scr_write,
 
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
 };
 
-static struct ata_port_info uli_port_info = {
-	.sht            = &uli_sht,
-	.host_flags     = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+static const struct ata_port_info uli_port_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+			  ATA_FLAG_IGN_SIMPLEX,
 	.pio_mask       = 0x1f,		/* pio0-4 */
 	.udma_mask      = 0x7f,		/* udma0-6 */
 	.port_ops       = &uli_ops,
@@ -143,13 +142,13 @@
 
 static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
 {
-	struct uli_priv *hpriv = ap->host_set->private_data;
+	struct uli_priv *hpriv = ap->host->private_data;
 	return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
 }
 
 static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
 	u32 val;
 
@@ -159,7 +158,7 @@
 
 static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
 
 	pci_write_config_dword(pdev, cfg_addr, val);
@@ -184,70 +183,71 @@
 static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
-	struct ata_probe_ent *probe_ent;
-	struct ata_port_info *ppi;
-	int rc;
+	const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
 	unsigned int board_idx = (unsigned int) ent->driver_data;
-	int pci_dev_busy = 0;
+	struct ata_host *host;
 	struct uli_priv *hpriv;
+	void __iomem * const *iomap;
+	struct ata_ioports *ioaddr;
+	int n_ports, rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	n_ports = 2;
+	if (board_idx == uli_5287)
+		n_ports = 4;
+
+	/* allocate the host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+
+	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	host->private_data = hpriv;
 
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	/* the first two ports are standard SFF */
+	rc = ata_pci_init_native_host(host);
 	if (rc)
-		goto err_out_regions;
-
-	ppi = &uli_port_info;
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
-	if (!probe_ent) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
+		return rc;
 
-	hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv) {
-		rc = -ENOMEM;
-		goto err_out_probe_ent;
-	}
+	rc = ata_pci_init_bmdma(host);
+	if (rc)
+		return rc;
 
-	probe_ent->private_data = hpriv;
+	iomap = host->iomap;
 
 	switch (board_idx) {
 	case uli_5287:
+		/* If there are four, the last two live right after
+		 * the standard SFF ports.
+		 */
 		hpriv->scr_cfg_addr[0] = ULI5287_BASE;
 		hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
-       		probe_ent->n_ports = 4;
 
-       		probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
-		probe_ent->port[2].altstatus_addr =
-		probe_ent->port[2].ctl_addr =
-			(pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
-		probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
+		ioaddr = &host->ports[2]->ioaddr;
+		ioaddr->cmd_addr = iomap[0] + 8;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = (void __iomem *)
+			((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
+		ioaddr->bmdma_addr = iomap[4] + 16;
 		hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
+		ata_std_ports(ioaddr);
 
-		probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
-		probe_ent->port[3].altstatus_addr =
-		probe_ent->port[3].ctl_addr =
-			(pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
-		probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
+		ioaddr = &host->ports[3]->ioaddr;
+		ioaddr->cmd_addr = iomap[2] + 8;
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = (void __iomem *)
+			((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
+		ioaddr->bmdma_addr = iomap[4] + 24;
 		hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
-
-		ata_std_ports(&probe_ent->port[2]);
-		ata_std_ports(&probe_ent->port[3]);
+		ata_std_ports(ioaddr);
 		break;
 
 	case uli_5289:
@@ -267,27 +267,13 @@
 
 	pci_set_master(pdev);
 	pci_intx(pdev, 1);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_probe_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
-
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &uli_sht);
 }
 
 static int __init uli_init(void)
 {
-	return pci_module_init(&uli_pci_driver);
+	return pci_register_driver(&uli_pci_driver);
 }
 
 static void __exit uli_exit(void)
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_via.c linux-2.6.18.x86_64.p4/drivers/ata/sata_via.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_via.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_via.c	2007-06-06 10:08:00.000000000 -0400
@@ -44,10 +44,9 @@
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 
 #define DRV_NAME	"sata_via"
-#define DRV_VERSION	"2.0"
+#define DRV_VERSION	"2.2"
 
 enum board_ids_enum {
 	vt6420,
@@ -59,11 +58,12 @@
 	SATA_INT_GATE		= 0x41, /* SATA interrupt gating */
 	SATA_NATIVE_MODE	= 0x42, /* Native mode enable */
 	SATA_PATA_SHARING	= 0x49, /* PATA/SATA sharing func ctrl */
+	PATA_UDMA_TIMING	= 0xB3, /* PATA timing for DMA/ cable detect */
+	PATA_PIO_TIMING		= 0xAB, /* PATA timing register */
 
 	PORT0			= (1 << 1),
 	PORT1			= (1 << 0),
 	ALL_PORTS		= PORT0 | PORT1,
-	N_PORTS			= 2,
 
 	NATIVE_MODE_ALL		= (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
 
@@ -74,12 +74,20 @@
 static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
 static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
 static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static void svia_noop_freeze(struct ata_port *ap);
 static void vt6420_error_handler(struct ata_port *ap);
+static int vt6421_pata_cable_detect(struct ata_port *ap);
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
 
 static const struct pci_device_id svia_pci_tbl[] = {
-	{ 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
-	{ 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
-	{ 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
+	{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x0591), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x3149), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x3249), vt6421 },
+	{ PCI_VDEVICE(VIA, 0x5287), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x5372), vt6420 },
+	{ PCI_VDEVICE(VIA, 0x7372), vt6420 },
 
 	{ }	/* terminate list */
 };
@@ -88,6 +96,10 @@
 	.name			= DRV_NAME,
 	.id_table		= svia_pci_tbl,
 	.probe			= svia_init_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
 	.remove			= ata_pci_remove_one,
 };
 
@@ -125,19 +137,52 @@
 
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
-	.freeze			= ata_bmdma_freeze,
+	.freeze			= svia_noop_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= vt6420_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 
-	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
+
+	.port_start		= ata_port_start,
+};
+
+static const struct ata_port_operations vt6421_pata_ops = {
+	.port_disable		= ata_port_disable,
+
+	.set_piomode		= vt6421_set_pio_mode,
+	.set_dmamode		= vt6421_set_dma_mode,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.bmdma_setup            = ata_bmdma_setup,
+	.bmdma_start            = ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= vt6421_pata_cable_detect,
+
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
 };
 
 static const struct ata_port_operations vt6421_sata_ops = {
@@ -156,33 +201,48 @@
 
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_pio_data_xfer,
+	.data_xfer		= ata_data_xfer,
 
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_sata,
 
-	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= svia_scr_read,
 	.scr_write		= svia_scr_write,
 
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
 };
 
-static struct ata_port_info vt6420_port_info = {
-	.sht		= &svia_sht,
-	.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+static const struct ata_port_info vt6420_port_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0x07,
 	.udma_mask	= 0x7f,
 	.port_ops	= &vt6420_sata_ops,
 };
 
+static struct ata_port_info vt6421_sport_info = {
+	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+	.pio_mask	= 0x1f,
+	.mwdma_mask	= 0x07,
+	.udma_mask	= 0x7f,
+	.port_ops	= &vt6421_sata_ops,
+};
+
+static struct ata_port_info vt6421_pport_info = {
+	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
+	.pio_mask	= 0x1f,
+	.mwdma_mask	= 0,
+	.udma_mask	= 0x7f,
+	.port_ops	= &vt6421_pata_ops,
+};
+
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
 MODULE_LICENSE("GPL");
@@ -193,19 +253,29 @@
 {
 	if (sc_reg > SCR_CONTROL)
 		return 0xffffffffU;
-	return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
+	return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
 }
 
 static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
 		return;
-	outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
+	iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
+}
+
+static void svia_noop_freeze(struct ata_port *ap)
+{
+	/* Some VIA controllers choke if ATA_NIEN is manipulated in
+	 * certain way.  Leave it alone and just clear pending IRQ.
+	 */
+	ata_chk_status(ap);
+	ata_bmdma_irq_clear(ap);
 }
 
 /**
  *	vt6420_prereset - prereset for vt6420
  *	@ap: target ATA port
+ *	@deadline: deadline jiffies for the operation
  *
  *	SCR registers on vt6420 are pieces of shit and may hang the
  *	whole machine completely if accessed with the wrong timing.
@@ -222,7 +292,7 @@
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-static int vt6420_prereset(struct ata_port *ap)
+static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
 {
 	struct ata_eh_context *ehc = &ap->eh_context;
 	unsigned long timeout = jiffies + (HZ * 5);
@@ -230,7 +300,7 @@
 	int online;
 
 	/* don't do any SCR stuff if we're not loading */
-	if (!ATA_PFLAG_LOADING)
+	if (!(ap->pflags & ATA_PFLAG_LOADING))
 		goto skip_scr;
 
 	/* Resume phy.  This is the old resume sequence from
@@ -267,7 +337,7 @@
 
  skip_scr:
 	/* wait for !BSY */
-	ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+	ata_wait_ready(ap, deadline);
 
 	return 0;
 }
@@ -278,6 +348,31 @@
 				  NULL, ata_std_postreset);
 }
 
+static int vt6421_pata_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u8 tmp;
+
+	pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
+	if (tmp & 0x10)
+		return ATA_CBL_PATA40;
+	return ATA_CBL_PATA80;
+}
+
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
+	pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]);
+}
+
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
+	pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]);
+}
+
 static const unsigned int svia_bar_sizes[] = {
 	8, 4, 8, 4, 16, 256
 };
@@ -286,79 +381,88 @@
 	16, 16, 16, 16, 32, 128
 };
 
-static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
+static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port)
 {
 	return addr + (port * 128);
 }
 
-static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
+static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port)
 {
 	return addr + (port * 64);
 }
 
-static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
-			      struct pci_dev *pdev,
-			      unsigned int port)
+static void vt6421_init_addrs(struct ata_port *ap)
 {
-	unsigned long reg_addr = pci_resource_start(pdev, port);
-	unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
-	unsigned long scr_addr;
+	void __iomem * const * iomap = ap->host->iomap;
+	void __iomem *reg_addr = iomap[ap->port_no];
+	void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
+	struct ata_ioports *ioaddr = &ap->ioaddr;
 
-	probe_ent->port[port].cmd_addr = reg_addr;
-	probe_ent->port[port].altstatus_addr =
-	probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
-	probe_ent->port[port].bmdma_addr = bmdma_addr;
+	ioaddr->cmd_addr = reg_addr;
+	ioaddr->altstatus_addr =
+	ioaddr->ctl_addr = (void __iomem *)
+		((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
+	ioaddr->bmdma_addr = bmdma_addr;
+	ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
 
-	scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
-	probe_ent->port[port].scr_addr = scr_addr;
-
-	ata_std_ports(&probe_ent->port[port]);
+	ata_std_ports(ioaddr);
 }
 
-static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
+static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
 {
-	struct ata_probe_ent *probe_ent;
-	struct ata_port_info *ppi = &vt6420_port_info;
+	const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
+	struct ata_host *host;
+	int rc;
 
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
-	if (!probe_ent)
-		return NULL;
+	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+	if (rc)
+		return rc;
+	*r_host = host;
 
-	probe_ent->port[0].scr_addr =
-		svia_scr_addr(pci_resource_start(pdev, 5), 0);
-	probe_ent->port[1].scr_addr =
-		svia_scr_addr(pci_resource_start(pdev, 5), 1);
+	rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+		return rc;
+	}
+
+	host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
+	host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
 
-	return probe_ent;
+	return 0;
 }
 
-static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
+static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
 {
-	struct ata_probe_ent *probe_ent;
-	unsigned int i;
+	const struct ata_port_info *ppi[] =
+		{ &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
+	struct ata_host *host;
+	int i, rc;
+
+	*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
+	if (!host) {
+		dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+		return -ENOMEM;
+	}
 
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (!probe_ent)
-		return NULL;
-
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	probe_ent->sht		= &svia_sht;
-	probe_ent->host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
-	probe_ent->port_ops	= &vt6421_sata_ops;
-	probe_ent->n_ports	= N_PORTS;
-	probe_ent->irq		= pdev->irq;
-	probe_ent->irq_flags	= IRQF_SHARED;
-	probe_ent->pio_mask	= 0x1f;
-	probe_ent->mwdma_mask	= 0x07;
-	probe_ent->udma_mask	= 0x7f;
+	rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+	if (rc) {
+		dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
+			   "PCI BARs (errno=%d)\n", rc);
+		return rc;
+	}
+	host->iomap = pcim_iomap_table(pdev);
 
-	for (i = 0; i < N_PORTS; i++)
-		vt6421_init_addrs(probe_ent, pdev, i);
+	for (i = 0; i < host->n_ports; i++)
+		vt6421_init_addrs(host->ports[i]);
 
-	return probe_ent;
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	return 0;
 }
 
 static void svia_configure(struct pci_dev *pdev)
@@ -405,33 +509,25 @@
 	static int printed_version;
 	unsigned int i;
 	int rc;
-	struct ata_probe_ent *probe_ent;
+	struct ata_host *host;
 	int board_id = (int) ent->driver_data;
 	const int *bar_sizes;
-	int pci_dev_busy = 0;
 	u8 tmp8;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
-
 	if (board_id == vt6420) {
 		pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
 		if (tmp8 & SATA_2DEV) {
 			dev_printk(KERN_ERR, &pdev->dev,
 				   "SATA master/slave not supported (0x%x)\n",
 		       		   (int) tmp8);
-			rc = -EIO;
-			goto err_out_regions;
+			return -EIO;
 		}
 
 		bar_sizes = &svia_bar_sizes[0];
@@ -447,49 +543,26 @@
 				i,
 			        (unsigned long long)pci_resource_start(pdev, i),
 			        (unsigned long long)pci_resource_len(pdev, i));
-			rc = -ENODEV;
-			goto err_out_regions;
+			return -ENODEV;
 		}
 
-	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
-	if (rc)
-		goto err_out_regions;
-
 	if (board_id == vt6420)
-		probe_ent = vt6420_init_probe_ent(pdev);
+		rc = vt6420_prepare_host(pdev, &host);
 	else
-		probe_ent = vt6421_init_probe_ent(pdev);
-
-	if (!probe_ent) {
-		dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
+		rc = vt6421_prepare_host(pdev, &host);
+	if (rc)
+		return rc;
 
 	svia_configure(pdev);
 
 	pci_set_master(pdev);
-
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &svia_sht);
 }
 
 static int __init svia_init(void)
 {
-	return pci_module_init(&svia_pci_driver);
+	return pci_register_driver(&svia_pci_driver);
 }
 
 static void __exit svia_exit(void)
@@ -499,4 +572,3 @@
 
 module_init(svia_init);
 module_exit(svia_exit);
-
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sata_vsc.c linux-2.6.18.x86_64.p4/drivers/ata/sata_vsc.c
--- linux-2.6.18.x86_64.p3/drivers/ata/sata_vsc.c	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/ata/sata_vsc.c	2007-06-06 10:08:00.000000000 -0400
@@ -47,9 +47,11 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_vsc"
-#define DRV_VERSION	"2.0"
+#define DRV_VERSION	"2.2"
 
 enum {
+	VSC_MMIO_BAR			= 0,
+
 	/* Interrupt register offsets (from chip base address) */
 	VSC_SATA_INT_STAT_OFFSET	= 0x00,
 	VSC_SATA_INT_MASK_OFFSET	= 0x04,
@@ -96,16 +98,11 @@
 			      VSC_SATA_INT_PHY_CHANGE),
 };
 
-
-#define is_vsc_sata_int_err(port_idx, int_status) \
-	 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
-
-
 static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
 {
 	if (sc_reg > SCR_CONTROL)
 		return 0xffffffffU;
-	return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
 }
 
 
@@ -114,7 +111,29 @@
 {
 	if (sc_reg > SCR_CONTROL)
 		return;
-	writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+
+static void vsc_freeze(struct ata_port *ap)
+{
+	void __iomem *mask_addr;
+
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+	writeb(0, mask_addr);
+}
+
+
+static void vsc_thaw(struct ata_port *ap)
+{
+	void __iomem *mask_addr;
+
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+	writeb(0xff, mask_addr);
 }
 
 
@@ -123,7 +142,7 @@
 	void __iomem *mask_addr;
 	u8 mask;
 
-	mask_addr = ap->host_set->mmio_base +
+	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
 		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
 	mask = readb(mask_addr);
 	if (ctl & ATA_NIEN)
@@ -149,11 +168,16 @@
 		vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
 	}
 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-		writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr);
-		writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
-		writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
-		writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
-		writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
+		writew(tf->feature | (((u16)tf->hob_feature) << 8),
+		       ioaddr->feature_addr);
+		writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+		       ioaddr->nsect_addr);
+		writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+		       ioaddr->lbal_addr);
+		writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+		       ioaddr->lbam_addr);
+		writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+		       ioaddr->lbah_addr);
 	} else if (is_addr) {
 		writew(tf->feature, ioaddr->feature_addr);
 		writew(tf->nsect, ioaddr->nsect_addr);
@@ -197,70 +221,77 @@
         }
 }
 
+static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
+{
+	if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void vsc_port_intr(u8 port_status, struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	int handled = 0;
+
+	if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
+		vsc_error_intr(port_status, ap);
+		return;
+	}
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
+		handled = ata_host_intr(ap, qc);
+
+	/* We received an interrupt during a polled command,
+	 * or some other spurious condition.  Interrupt reporting
+	 * with this hardware is fairly reliable so it is safe to
+	 * simply clear the interrupt
+	 */
+	if (unlikely(!handled))
+		ata_chk_status(ap);
+}
 
 /*
  * vsc_sata_interrupt
  *
  * Read the interrupt register and process for the devices that have them pending.
  */
-static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
-				       struct pt_regs *regs)
+static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
-	struct ata_host_set *host_set = dev_instance;
+	struct ata_host *host = dev_instance;
 	unsigned int i;
 	unsigned int handled = 0;
-	u32 int_status;
+	u32 status;
 
-	spin_lock(&host_set->lock);
+	status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
 
-	int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
-
-	for (i = 0; i < host_set->n_ports; i++) {
-		if (int_status & ((u32) 0xFF << (8 * i))) {
-			struct ata_port *ap;
+	if (unlikely(status == 0xffffffff || status == 0)) {
+		if (status)
+			dev_printk(KERN_ERR, host->dev,
+				": IRQ status == 0xffffffff, "
+				"PCI fault or device removal?\n");
+		goto out;
+	}
 
-			ap = host_set->ports[i];
+	spin_lock(&host->lock);
 
-			if (is_vsc_sata_int_err(i, int_status)) {
-				u32 err_status;
-				printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
-				err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
-				vsc_sata_scr_write(ap, SCR_ERROR, err_status);
-				handled++;
-			}
+	for (i = 0; i < host->n_ports; i++) {
+		u8 port_status = (status >> (8 * i)) & 0xff;
+		if (port_status) {
+			struct ata_port *ap = host->ports[i];
 
 			if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-				struct ata_queued_cmd *qc;
-
-				qc = ata_qc_from_tag(ap, ap->active_tag);
-				if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
-					handled += ata_host_intr(ap, qc);
-				else if (is_vsc_sata_int_err(i, int_status)) {
-					/*
-					 * On some chips (i.e. Intel 31244), an error
-					 * interrupt will sneak in at initialization
-					 * time (phy state changes).  Clearing the SCR
-					 * error register is not required, but it prevents
-					 * the phy state change interrupts from recurring
-					 * later.
-					 */
-					u32 err_status;
-					err_status = vsc_sata_scr_read(ap, SCR_ERROR);
-					printk(KERN_DEBUG "%s: clearing interrupt, "
-					       "status %x; sata err status %x\n",
-					       __FUNCTION__,
-					       int_status, err_status);
-					vsc_sata_scr_write(ap, SCR_ERROR, err_status);
-					/* Clear interrupt status */
-					ata_chk_status(ap);
-					handled++;
-				}
-			}
+				vsc_port_intr(port_status, ap);
+				handled++;
+			} else
+				dev_printk(KERN_ERR, host->dev,
+					": interrupt from disabled port %d\n", i);
 		}
 	}
 
-	spin_unlock(&host_set->lock);
-
+	spin_unlock(&host->lock);
+out:
 	return IRQ_RETVAL(handled);
 }
 
@@ -297,21 +328,21 @@
 	.bmdma_status		= ata_bmdma_status,
 	.qc_prep		= ata_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
-	.data_xfer		= ata_mmio_data_xfer,
-	.freeze			= ata_bmdma_freeze,
-	.thaw			= ata_bmdma_thaw,
+	.data_xfer		= ata_data_xfer,
+	.freeze			= vsc_freeze,
+	.thaw			= vsc_thaw,
 	.error_handler		= ata_bmdma_error_handler,
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
-	.irq_handler		= vsc_sata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.irq_ack		= ata_irq_ack,
 	.scr_read		= vsc_sata_scr_read,
 	.scr_write		= vsc_sata_scr_write,
 	.port_start		= ata_port_start,
-	.port_stop		= ata_port_stop,
-	.host_stop		= ata_pci_host_stop,
 };
 
-static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
+static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
+					  void __iomem *base)
 {
 	port->cmd_addr		= base + VSC_SATA_TF_CMD_OFFSET;
 	port->data_addr		= base + VSC_SATA_TF_DATA_OFFSET;
@@ -335,88 +366,71 @@
 
 static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	static const struct ata_port_info pi = {
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO,
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask	= 0x7f,
+		.port_ops	= &vsc_sata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &pi, NULL };
 	static int printed_version;
-	struct ata_probe_ent *probe_ent = NULL;
-	unsigned long base;
-	int pci_dev_busy = 0;
+	struct ata_host *host;
 	void __iomem *mmio_base;
-	int rc;
+	int i, rc;
+	u8 cls;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	rc = pci_enable_device(pdev);
+	/* allocate host */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+	if (!host)
+		return -ENOMEM;
+
+	rc = pcim_enable_device(pdev);
 	if (rc)
 		return rc;
 
-	/*
-	 * Check if we have needed resource mapped.
-	 */
-	if (pci_resource_len(pdev, 0) == 0) {
-		rc = -ENODEV;
-		goto err_out;
-	}
+	/* check if we have needed resource mapped */
+	if (pci_resource_len(pdev, 0) == 0)
+		return -ENODEV;
+
+	/* map IO regions and intialize host accordingly */
+	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
 
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc) {
-		pci_dev_busy = 1;
-		goto err_out;
-	}
+	mmio_base = host->iomap[VSC_MMIO_BAR];
+
+	for (i = 0; i < host->n_ports; i++)
+		vsc_sata_setup_port(&host->ports[i]->ioaddr,
+				    mmio_base + (i + 1) * VSC_SATA_PORT_OFFSET);
 
 	/*
 	 * Use 32 bit DMA mask, because 64 bit address support is poor.
 	 */
 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 	if (rc)
-		goto err_out_regions;
+		return rc;
 	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
 	if (rc)
-		goto err_out_regions;
-
-	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
-	if (probe_ent == NULL) {
-		rc = -ENOMEM;
-		goto err_out_regions;
-	}
-	memset(probe_ent, 0, sizeof(*probe_ent));
-	probe_ent->dev = pci_dev_to_dev(pdev);
-	INIT_LIST_HEAD(&probe_ent->node);
-
-	mmio_base = pci_iomap(pdev, 0, 0);
-	if (mmio_base == NULL) {
-		rc = -ENOMEM;
-		goto err_out_free_ent;
-	}
-	base = (unsigned long) mmio_base;
+		return rc;
 
 	/*
-	 * Due to a bug in the chip, the default cache line size can't be used
+	 * Due to a bug in the chip, the default cache line size can't be
+	 * used (unless the default is non-zero).
 	 */
-	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
+	if (cls == 0x00)
+		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
 
-	probe_ent->sht = &vsc_sata_sht;
-	probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				ATA_FLAG_MMIO;
-	probe_ent->port_ops = &vsc_sata_ops;
-	probe_ent->n_ports = 4;
-	probe_ent->irq = pdev->irq;
-	probe_ent->irq_flags = IRQF_SHARED;
-	probe_ent->mmio_base = mmio_base;
-
-	/* We don't care much about the PIO/UDMA masks, but the core won't like us
-	 * if we don't fill these
-	 */
-	probe_ent->pio_mask = 0x1f;
-	probe_ent->mwdma_mask = 0x07;
-	probe_ent->udma_mask = 0x7f;
-
-	/* We have 4 ports per PCI function */
-	vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
-	vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
-	vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
-	vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
-
-	pci_set_master(pdev);
+	if (pci_enable_msi(pdev) == 0)
+		pci_intx(pdev, 0);
 
 	/*
 	 * Config offset 0x98 is "Extended Control and Status Register 0"
@@ -426,32 +440,20 @@
 	 */
 	pci_write_config_dword(pdev, 0x98, 0);
 
-	/* FIXME: check ata_device_add return value */
-	ata_device_add(probe_ent);
-	kfree(probe_ent);
-
-	return 0;
-
-err_out_free_ent:
-	kfree(probe_ent);
-err_out_regions:
-	pci_release_regions(pdev);
-err_out:
-	if (!pci_dev_busy)
-		pci_disable_device(pdev);
-	return rc;
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
+				 IRQF_SHARED, &vsc_sata_sht);
 }
 
-
 static const struct pci_device_id vsc_sata_pci_tbl[] = {
 	{ PCI_VENDOR_ID_VITESSE, 0x7174,
 	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
 	{ PCI_VENDOR_ID_INTEL, 0x3200,
 	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
+
 	{ }	/* terminate list */
 };
 
-
 static struct pci_driver vsc_sata_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= vsc_sata_pci_tbl,
@@ -459,19 +461,16 @@
 	.remove			= ata_pci_remove_one,
 };
 
-
 static int __init vsc_sata_init(void)
 {
-	return pci_module_init(&vsc_sata_pci_driver);
+	return pci_register_driver(&vsc_sata_pci_driver);
 }
 
-
 static void __exit vsc_sata_exit(void)
 {
 	pci_unregister_driver(&vsc_sata_pci_driver);
 }
 
-
 MODULE_AUTHOR("Jeremy Higdon");
 MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
 MODULE_LICENSE("GPL");
diff -urN linux-2.6.18.x86_64.p3/drivers/ata/sis.h linux-2.6.18.x86_64.p4/drivers/ata/sis.h
--- linux-2.6.18.x86_64.p3/drivers/ata/sis.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/drivers/ata/sis.h	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,5 @@
+
+struct ata_port_info;
+
+/* pata_sis.c */
+extern const struct ata_port_info sis_info133;
diff -urN linux-2.6.18.x86_64.p3/drivers/Makefile linux-2.6.18.x86_64.p4/drivers/Makefile
--- linux-2.6.18.x86_64.p3/drivers/Makefile	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/Makefile	2007-06-06 10:08:00.000000000 -0400
@@ -35,7 +35,7 @@
 obj-$(CONFIG_IDE)		+= ide/
 obj-$(CONFIG_FC4)		+= fc4/
 obj-$(CONFIG_SCSI)		+= scsi/
-obj-$(CONFIG_SCSI)		+= ata/
+obj-$(CONFIG_ATA)		+= ata/
 obj-$(CONFIG_FUSION)		+= message/
 obj-$(CONFIG_IEEE1394)		+= ieee1394/
 obj-y				+= cdrom/
diff -urN linux-2.6.18.x86_64.p3/drivers/pci/quirks.c linux-2.6.18.x86_64.p4/drivers/pci/quirks.c
--- linux-2.6.18.x86_64.p3/drivers/pci/quirks.c	2007-06-05 10:43:12.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/pci/quirks.c	2007-06-06 10:08:00.000000000 -0400
@@ -1277,6 +1277,8 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic );
 #endif
 
+#ifdef CONFIG_ATA_INTEL_COMBINED
+
 enum ide_combined_type { COMBINED = 0, IDE = 1, LIBATA = 2 };
 /* Defaults to combined */
 static enum ide_combined_type combined_mode;
@@ -1294,7 +1296,6 @@
 }
 __setup("combined_mode=", combined_setup);
 
-#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED
 static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
 {
 	u8 prog, comb, tmp;
@@ -1387,7 +1388,7 @@
 		request_region(0x170, 8, "libata");	/* port 1 */
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_ANY_ID,	  quirk_intel_ide_combined );
-#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */
+#endif /* CONFIG_ATA_INTEL_COMBINED */
 
 
 int pcie_mch_quirk;
diff -urN linux-2.6.18.x86_64.p3/drivers/scsi/ipr.c linux-2.6.18.x86_64.p4/drivers/scsi/ipr.c
--- linux-2.6.18.x86_64.p3/drivers/scsi/ipr.c	2007-06-05 10:43:13.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/scsi/ipr.c	2007-06-06 10:08:00.000000000 -0400
@@ -3469,7 +3469,7 @@
 		if (!sata_port)
 			return -ENOMEM;
 
-		ap = ata_sas_port_alloc(&ioa_cfg->ata_host_set, &sata_port_info, shost);
+		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
 		if (ap) {
 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 			sata_port->ioa_cfg = ioa_cfg;
@@ -3765,7 +3765,8 @@
  * Return value:
  *	0 on success / non-zero on failure
  **/
-static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
+static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes,
+			  unsigned long deadline)
 {
 	struct ipr_sata_port *sata_port = ap->private_data;
 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
@@ -5211,7 +5212,7 @@
 };
 
 static struct ata_port_info sata_port_info = {
-	.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
+	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
 	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
 	.pio_mask	= 0x10, /* pio4 */
 	.mwdma_mask = 0x07,
@@ -7262,8 +7263,8 @@
 
 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
-	ata_host_set_init(&ioa_cfg->ata_host_set, &pdev->dev,
-			  sata_port_info.host_flags, &ipr_sata_ops);
+	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
+			  sata_port_info.flags, &ipr_sata_ops);
 
 	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
 
diff -urN linux-2.6.18.x86_64.p3/drivers/scsi/ipr.h linux-2.6.18.x86_64.p4/drivers/scsi/ipr.h
--- linux-2.6.18.x86_64.p3/drivers/scsi/ipr.h	2007-06-05 10:43:13.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/scsi/ipr.h	2007-06-06 10:08:00.000000000 -0400
@@ -1153,7 +1153,7 @@
 	struct ipr_cmnd *reset_cmd;
 	int (*reset) (struct ipr_cmnd *);
 
-	struct ata_host_set ata_host_set;
+	struct ata_host ata_host;
 	char ipr_cmd_label[8];
 #define IPR_CMD_LABEL		"ipr_cmnd"
 	struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
diff -urN linux-2.6.18.x86_64.p3/drivers/scsi/Kconfig linux-2.6.18.x86_64.p4/drivers/scsi/Kconfig
--- linux-2.6.18.x86_64.p3/drivers/scsi/Kconfig	2007-06-06 10:07:07.000000000 -0400
+++ linux-2.6.18.x86_64.p4/drivers/scsi/Kconfig	2007-06-06 10:08:00.000000000 -0400
@@ -1000,9 +1000,8 @@
 
 config SCSI_IPR
 	tristate "IBM Power Linux RAID adapter support"
-	depends on PCI && SCSI
+	depends on PCI && SCSI && ATA
 	select FW_LOADER
-	select SCSI_SATA
 	---help---
 	  This driver supports the IBM Power Linux family RAID adapters.
 	  This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
diff -urN linux-2.6.18.x86_64.p3/include/asm-generic/libata-portmap.h linux-2.6.18.x86_64.p4/include/asm-generic/libata-portmap.h
--- linux-2.6.18.x86_64.p3/include/asm-generic/libata-portmap.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/include/asm-generic/libata-portmap.h	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,12 @@
+#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
+#define __ASM_GENERIC_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_CMD		0x1F0
+#define ATA_PRIMARY_CTL		0x3F6
+#define ATA_PRIMARY_IRQ(dev)	14
+
+#define ATA_SECONDARY_CMD	0x170
+#define ATA_SECONDARY_CTL	0x376
+#define ATA_SECONDARY_IRQ(dev)	15
+
+#endif
diff -urN linux-2.6.18.x86_64.p3/include/linux/ata.h linux-2.6.18.x86_64.p4/include/linux/ata.h
--- linux-2.6.18.x86_64.p3/include/linux/ata.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.x86_64.p4/include/linux/ata.h	2007-06-06 10:08:00.000000000 -0400
@@ -40,11 +40,14 @@
 	ATA_MAX_DEVICES		= 2,	/* per bus/port */
 	ATA_MAX_PRD		= 256,	/* we could make these 256/256 */
 	ATA_SECT_SIZE		= 512,
+	ATA_MAX_SECTORS_128	= 128,
+	ATA_MAX_SECTORS		= 256,
+	ATA_MAX_SECTORS_LBA48	= 65535,/* TODO: 65536? */
 
 	ATA_ID_WORDS		= 256,
-	ATA_ID_SERNO_OFS	= 10,
-	ATA_ID_FW_REV_OFS	= 23,
-	ATA_ID_PROD_OFS		= 27,
+	ATA_ID_SERNO		= 10,
+	ATA_ID_FW_REV		= 23,
+	ATA_ID_PROD		= 27,
 	ATA_ID_OLD_PIO_MODES	= 51,
 	ATA_ID_FIELD_VALID	= 53,
 	ATA_ID_MWDMA_MODES	= 63,
@@ -56,8 +59,11 @@
 	ATA_ID_MAJOR_VER	= 80,
 	ATA_ID_PIO4		= (1 << 1),
 
+	ATA_ID_SERNO_LEN	= 20,
+	ATA_ID_FW_REV_LEN	= 8,
+	ATA_ID_PROD_LEN		= 40,
+
 	ATA_PCI_CTL_OFS		= 2,
-	ATA_SERNO_LEN		= 20,
 	ATA_UDMA0		= (1 << 0),
 	ATA_UDMA1		= ATA_UDMA0 | (1 << 1),
 	ATA_UDMA2		= ATA_UDMA1 | (1 << 2),
@@ -153,11 +159,19 @@
 	ATA_CMD_INIT_DEV_PARAMS	= 0x91,
 	ATA_CMD_READ_NATIVE_MAX	= 0xF8,
 	ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
+	ATA_CMD_SET_MAX		= 0xF9,
+	ATA_CMD_SET_MAX_EXT	= 0x37,
 	ATA_CMD_READ_LOG_EXT	= 0x2f,
 
 	/* READ_LOG_EXT pages */
 	ATA_LOG_SATA_NCQ	= 0x10,
 
+	/* READ/WRITE LONG (obsolete) */
+	ATA_CMD_READ_LONG	= 0x22,
+	ATA_CMD_READ_LONG_ONCE	= 0x23,
+	ATA_CMD_WRITE_LONG	= 0x32,
+	ATA_CMD_WRITE_LONG_ONCE	= 0x33,
+
 	/* SETFEATURES stuff */
 	SETFEATURES_XFER	= 0x03,
 	XFER_UDMA_7		= 0x47,
@@ -168,12 +182,16 @@
 	XFER_UDMA_2		= 0x42,
 	XFER_UDMA_1		= 0x41,
 	XFER_UDMA_0		= 0x40,
+	XFER_MW_DMA_4		= 0x24,	/* CFA only */
+	XFER_MW_DMA_3		= 0x23,	/* CFA only */
 	XFER_MW_DMA_2		= 0x22,
 	XFER_MW_DMA_1		= 0x21,
 	XFER_MW_DMA_0		= 0x20,
 	XFER_SW_DMA_2		= 0x12,
 	XFER_SW_DMA_1		= 0x11,
 	XFER_SW_DMA_0		= 0x10,
+	XFER_PIO_6		= 0x0E,	/* CFA only */
+	XFER_PIO_5		= 0x0D,	/* CFA only */
 	XFER_PIO_4		= 0x0C,
 	XFER_PIO_3		= 0x0B,
 	XFER_PIO_2		= 0x0A,
@@ -184,6 +202,8 @@
 	SETFEATURES_WC_ON	= 0x02, /* Enable write cache */
 	SETFEATURES_WC_OFF	= 0x82, /* Disable write cache */
 
+	SETFEATURES_SPINUP	= 0x07, /* Spin-up drive */
+
 	/* ATAPI stuff */
 	ATAPI_PKT_DMA		= (1 << 0),
 	ATAPI_DMADIR		= (1 << 2),	/* ATAPI data dir:
@@ -194,8 +214,9 @@
 	ATA_CBL_NONE		= 0,
 	ATA_CBL_PATA40		= 1,
 	ATA_CBL_PATA80		= 2,
-	ATA_CBL_PATA_UNK	= 3,
-	ATA_CBL_SATA		= 4,
+	ATA_CBL_PATA40_SHORT	= 3,		/* 40 wire cable to high UDMA spec */
+	ATA_CBL_PATA_UNK	= 4,
+	ATA_CBL_SATA		= 5,
 
 	/* SATA Status and Control Registers */
 	SCR_STATUS		= 0,
@@ -272,8 +293,6 @@
 };
 
 #define ata_id_is_ata(id)	(((id)[0] & (1 << 15)) == 0)
-#define ata_id_is_cfa(id)	((id)[0] == 0x848A)
-#define ata_id_is_sata(id)	((id)[93] == 0)
 #define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
 #define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
 #define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
@@ -290,6 +309,8 @@
 #define ata_id_queue_depth(id)	(((id)[75] & 0x1f) + 1)
 #define ata_id_removeable(id)	((id)[0] & (1 << 7))
 #define ata_id_has_dword_io(id)	((id)[50] & (1 << 0))
+#define ata_id_iordy_disable(id) ((id)[49] & (1 << 10))
+#define ata_id_has_iordy(id) ((id)[49] & (1 << 9))
 #define ata_id_u32(id,n)	\
 	(((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)]))
 #define ata_id_u64(id,n)	\
@@ -304,16 +325,24 @@
 {
 	unsigned int mver;
 
+	if (id[ATA_ID_MAJOR_VER] == 0xFFFF)
+		return 0;
+
 	for (mver = 14; mver >= 1; mver--)
 		if (id[ATA_ID_MAJOR_VER] & (1 << mver))
 			break;
 	return mver;
 }
 
+static inline int ata_id_is_sata(const u16 *id)
+{
+	return ata_id_major_version(id) >= 5 && id[93] == 0;
+}
+
 static inline int ata_id_current_chs_valid(const u16 *id)
 {
-	/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 
-	   has not been issued to the device then the values of 
+	/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
+	   has not been issued to the device then the values of
 	   id[54] to id[56] are vendor specific. */
 	return (id[53] & 0x01) && /* Current translation valid */
 		id[54] &&  /* cylinders in current translation */
@@ -322,6 +351,27 @@
 		id[56];    /* sectors in current translation */
 }
 
+static inline int ata_id_is_cfa(const u16 *id)
+{
+	u16 v = id[0];
+	if (v == 0x848A)	/* Standard CF */
+		return 1;
+	/* Could be CF hiding as standard ATA */
+	if (ata_id_major_version(id) >= 3 &&  id[82] != 0xFFFF &&
+			(id[82] & ( 1 << 2)))
+		return 1;
+	return 0;
+}
+
+static inline int ata_drive_40wire(const u16 *dev_id)
+{
+	if (ata_id_is_sata(dev_id))
+		return 0;	/* SATA */
+	if ((dev_id[93] & 0xE000) == 0x6000)
+		return 0;	/* 80 wire */
+	return 1;
+}
+
 static inline int atapi_cdb_len(const u16 *dev_id)
 {
 	u16 tmp = dev_id[0] & 0x3;
diff -urN linux-2.6.18.x86_64.p3/include/linux/libata-compat.h linux-2.6.18.x86_64.p4/include/linux/libata-compat.h
--- linux-2.6.18.x86_64.p3/include/linux/libata-compat.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/include/linux/libata-compat.h	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,6 @@
+#ifndef __LIBATA_COMPAT_H__
+#define __LIBATA_COMPAT_H__
+
+typedef void (*work_func_t)(void *);
+
+#endif /* __LIBATA_COMPAT_H__ */
diff -urN linux-2.6.18.x86_64.p3/include/linux/libata.h linux-2.6.18.x86_64.p4/include/linux/libata.h
--- linux-2.6.18.x86_64.p3/include/linux/libata.h	2007-06-05 10:43:12.000000000 -0400
+++ linux-2.6.18.x86_64.p4/include/linux/libata.h	2007-06-06 10:08:00.000000000 -0400
@@ -31,10 +31,23 @@
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <asm/scatterlist.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/ata.h>
 #include <linux/workqueue.h>
 #include <scsi/scsi_host.h>
+#include <linux/acpi.h>
+
+#include <linux/libata-compat.h>
+
+/*
+ * Define if arch has non-standard setup.  This is a _PCI_ standard
+ * not a legacy or ISA standard.
+ */
+#ifdef CONFIG_ATA_NONSTANDARD
+#include <asm/libata-portmap.h>
+#else
+#include <asm-generic/libata-portmap.h>
+#endif
 
 /*
  * compile-time options: to be removed as soon as all the drivers are
@@ -44,8 +57,6 @@
 #undef ATA_VERBOSE_DEBUG	/* yet more debugging output */
 #undef ATA_IRQ_TRAP		/* define to ack screaming irqs */
 #undef ATA_NDEBUG		/* define to disable quick runtime checks */
-#undef ATA_ENABLE_PATA		/* define to enable PATA support in some
-				 * low-level drivers */
 
 
 /* note: prints function name for you */
@@ -112,8 +123,6 @@
 	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
 	ATA_MAX_QUEUE		= 32,
 	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
-	ATA_MAX_SECTORS		= 200,	/* FIXME */
-	ATA_MAX_SECTORS_LBA48	= 65535,
 	ATA_MAX_BUS		= 2,
 	ATA_DEF_BUSY_WAIT	= 10000,
 	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
@@ -128,10 +137,12 @@
 	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
 	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
 	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
+	ATA_DFLAG_FLUSH_EXT	= (1 << 4), /* do FLUSH_EXT instead of FLUSH */
 	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
 
-	ATA_DFLAG_PIO		= (1 << 8), /* device currently in PIO mode */
-	ATA_DFLAG_SUSPENDED	= (1 << 9), /* device suspended */
+	ATA_DFLAG_PIO		= (1 << 8), /* device limited to PIO mode */
+	ATA_DFLAG_NCQ_OFF	= (1 << 9), /* device limited to non-NCQ mode */
+	ATA_DFLAG_SPUNDOWN	= (1 << 10), /* XXX: for spindown_compat */
 	ATA_DFLAG_INIT_MASK	= (1 << 16) - 1,
 
 	ATA_DFLAG_DETACH	= (1 << 16),
@@ -162,6 +173,10 @@
 	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
 					      * Register FIS clearing BSY */
 	ATA_FLAG_DEBUGMSG	= (1 << 13),
+	ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
+	ATA_FLAG_IGN_SIMPLEX	= (1 << 15), /* ignore SIMPLEX */
+	ATA_FLAG_NO_IORDY	= (1 << 16), /* controller lacks iordy */
+	ATA_FLAG_ACPI_SATA	= (1 << 17), /* need native SATA ACPI layout */
 
 	/* The following flag belongs to ap->pflags but is kept in
 	 * ap->flags because it's referenced in many LLDs and will be
@@ -179,6 +194,7 @@
 	ATA_PFLAG_LOADING	= (1 << 4), /* boot/loading probe */
 	ATA_PFLAG_UNLOADING	= (1 << 5), /* module is unloading */
 	ATA_PFLAG_SCSI_HOTPLUG	= (1 << 6), /* SCSI hotplug scheduled */
+	ATA_PFLAG_INITIALIZING	= (1 << 7), /* being initialized, don't touch */
 
 	ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
 	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
@@ -197,8 +213,9 @@
 	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
 
 	/* host set flags */
-	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host_set only */
-	
+	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host only */
+	ATA_HOST_STARTED	= (1 << 1),	/* Host started */
+
 	/* various lengths of time */
 	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
 	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
@@ -225,8 +242,8 @@
 	/* encoding various smaller bitmaps into a single
 	 * unsigned int bitmap
 	 */
-	ATA_BITS_PIO		= 5,
-	ATA_BITS_MWDMA		= 3,
+	ATA_BITS_PIO		= 7,
+	ATA_BITS_MWDMA		= 5,
 	ATA_BITS_UDMA		= 8,
 
 	ATA_SHIFT_PIO		= 0,
@@ -241,10 +258,6 @@
 	ATA_DMA_PAD_SZ		= 4,
 	ATA_DMA_PAD_BUF_SZ	= ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
 
-	/* masks for port functions */
-	ATA_PORT_PRIMARY	= (1 << 0),
-	ATA_PORT_SECONDARY	= (1 << 1),
-
 	/* ering size */
 	ATA_ERING_SIZE		= 32,
 
@@ -255,13 +268,9 @@
 	ATA_EH_REVALIDATE	= (1 << 0),
 	ATA_EH_SOFTRESET	= (1 << 1),
 	ATA_EH_HARDRESET	= (1 << 2),
-	ATA_EH_SUSPEND		= (1 << 3),
-	ATA_EH_RESUME		= (1 << 4),
-	ATA_EH_PM_FREEZE	= (1 << 5),
 
 	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
-	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
-				  ATA_EH_RESUME | ATA_EH_PM_FREEZE,
+	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE,
 
 	/* ata_eh_info->flags */
 	ATA_EHI_HOTPLUGGED	= (1 << 0),  /* could have been hotplugged */
@@ -269,8 +278,13 @@
 	ATA_EHI_NO_AUTOPSY	= (1 << 2),  /* no autopsy */
 	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
 
-	ATA_EHI_DID_RESET	= (1 << 16), /* already reset this port */
+	ATA_EHI_DID_SOFTRESET	= (1 << 16), /* already soft-reset this port */
+	ATA_EHI_DID_HARDRESET	= (1 << 17), /* already soft-reset this port */
+	ATA_EHI_PRINTINFO	= (1 << 18), /* print configuration info */
+	ATA_EHI_SETMODE		= (1 << 19), /* configure transfer mode */
+	ATA_EHI_POST_SETMODE	= (1 << 20), /* revaildating after setmode */
 
+	ATA_EHI_DID_RESET	= ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
 	ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
 
 	/* max repeat if error condition is still set after ->error_handler */
@@ -278,27 +292,25 @@
 
 	/* how hard are we gonna try to probe/recover devices */
 	ATA_PROBE_MAX_TRIES	= 3,
-	ATA_EH_RESET_TRIES	= 3,
 	ATA_EH_DEV_TRIES	= 3,
 
-	/* Drive spinup time (time from power-on to the first D2H FIS)
-	 * in msecs - 8s currently.  Failing to get ready in this time
-	 * isn't critical.  It will result in reset failure for
-	 * controllers which can't wait for the first D2H FIS.  libata
-	 * will retry, so it just has to be long enough to spin up
-	 * most devices.
-	 */
-	ATA_SPINUP_WAIT		= 8000,
+	/* Horkage types. May be set by libata or controller on drives
+	   (some horkage may be drive/controller pair dependant */
+
+	ATA_HORKAGE_DIAGNOSTIC	= (1 << 0),	/* Failed boot diag */
+	ATA_HORKAGE_NODMA	= (1 << 1),	/* DMA problems */
+	ATA_HORKAGE_NONCQ	= (1 << 2),	/* Don't use NCQ */
+	ATA_HORKAGE_MAX_SEC_128	= (1 << 3),	/* Limit max sects to 128 */
+	ATA_HORKAGE_DMA_RW_ONLY	= (1 << 4),	/* ATAPI DMA for RW only */
 };
 
 enum hsm_task_states {
-	HSM_ST_UNKNOWN,		/* state unknown */
 	HSM_ST_IDLE,		/* no command on going */
+	HSM_ST_FIRST,		/* (waiting the device to)
+				   write CDB or first data block */
 	HSM_ST,			/* (waiting the device to) transfer data */
 	HSM_ST_LAST,		/* (waiting the device to) complete command */
 	HSM_ST_ERR,		/* error */
-	HSM_ST_FIRST,		/* (waiting the device to)
-				   write CDB or first data block */
 };
 
 enum ata_completion_errors {
@@ -311,6 +323,7 @@
 	AC_ERR_SYSTEM		= (1 << 6), /* system error */
 	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
 	AC_ERR_OTHER		= (1 << 8), /* unknown */
+	AC_ERR_NODEV_HINT	= (1 << 9), /* polling device detection hint */
 };
 
 /* forward declarations */
@@ -321,61 +334,40 @@
 
 /* typedefs */
 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
-typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
-typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
+typedef int (*ata_prereset_fn_t)(struct ata_port *ap, unsigned long deadline);
+typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes,
+			      unsigned long deadline);
 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
 
 struct ata_ioports {
-	unsigned long		cmd_addr;
-	unsigned long		data_addr;
-	unsigned long		error_addr;
-	unsigned long		feature_addr;
-	unsigned long		nsect_addr;
-	unsigned long		lbal_addr;
-	unsigned long		lbam_addr;
-	unsigned long		lbah_addr;
-	unsigned long		device_addr;
-	unsigned long		status_addr;
-	unsigned long		command_addr;
-	unsigned long		altstatus_addr;
-	unsigned long		ctl_addr;
-	unsigned long		bmdma_addr;
-	unsigned long		scr_addr;
-};
-
-struct ata_probe_ent {
-	struct list_head	node;
-	struct device 		*dev;
-	const struct ata_port_operations *port_ops;
-	struct scsi_host_template *sht;
-	struct ata_ioports	port[ATA_MAX_PORTS];
-	unsigned int		n_ports;
-	unsigned int		hard_port_no;
-	unsigned int		pio_mask;
-	unsigned int		mwdma_mask;
-	unsigned int		udma_mask;
-	unsigned int		legacy_mode;
-	unsigned long		irq;
-	unsigned int		irq_flags;
-	unsigned long		host_flags;
-	unsigned long		port_flags[ATA_MAX_PORTS];	/* pata fix */
-	unsigned long		host_set_flags;
-	void __iomem		*mmio_base;
-	void			*private_data;
+	void __iomem		*cmd_addr;
+	void __iomem		*data_addr;
+	void __iomem		*error_addr;
+	void __iomem		*feature_addr;
+	void __iomem		*nsect_addr;
+	void __iomem		*lbal_addr;
+	void __iomem		*lbam_addr;
+	void __iomem		*lbah_addr;
+	void __iomem		*device_addr;
+	void __iomem		*status_addr;
+	void __iomem		*command_addr;
+	void __iomem		*altstatus_addr;
+	void __iomem		*ctl_addr;
+	void __iomem		*bmdma_addr;
+	void __iomem		*scr_addr;
 };
 
-struct ata_host_set {
+struct ata_host {
 	spinlock_t		lock;
 	struct device 		*dev;
 	unsigned long		irq;
-	void __iomem		*mmio_base;
+	unsigned long		irq2;
+	void __iomem * const	*iomap;
 	unsigned int		n_ports;
 	void			*private_data;
 	const struct ata_port_operations *ops;
 	unsigned long		flags;
-	int			simplex_claimed;	/* Keep seperate in case we
-							   ever need to do this locked */
-	struct ata_host_set	*next;		/* for legacy mode */
+	struct ata_port		*simplex_claimed;	/* channel owning the DMA */
 	struct ata_port		*ports[0];
 };
 
@@ -397,9 +389,7 @@
 	int			dma_dir;
 
 	unsigned int		pad_len;
-
-	unsigned int		nsect;
-	unsigned int		cursect;
+	unsigned int		sect_size;
 
 	unsigned int		nbytes;
 	unsigned int		curbytes;
@@ -421,7 +411,7 @@
 	void			*private_data;
 };
 
-struct ata_host_stats {
+struct ata_port_stats {
 	unsigned long		unhandled_irq;
 	unsigned long		idle_irq;
 	unsigned long		rw_reqbuf;
@@ -469,6 +459,12 @@
 
 	/* error history */
 	struct ata_ering	ering;
+	int			spdn_cnt;
+	unsigned int		horkage;	/* List of broken features */
+#ifdef CONFIG_ATA_ACPI
+	/* ACPI objects info */
+	acpi_handle obj_handle;
+#endif
 };
 
 /* Offset into struct ata_device.  Fields above it are maintained
@@ -484,7 +480,6 @@
 	unsigned int		dev_action[ATA_MAX_DEVICES]; /* dev EH action */
 	unsigned int		flags;		/* ATA_EHI_* flags */
 
-	unsigned long		hotplug_timestamp;
 	unsigned int		probe_mask;
 
 	char			desc[ATA_EH_DESC_LEN];
@@ -499,14 +494,13 @@
 };
 
 struct ata_port {
-	struct Scsi_Host	*host;	/* our co-allocated scsi host */
+	struct Scsi_Host	*scsi_host; /* our co-allocated scsi host */
 	const struct ata_port_operations *ops;
 	spinlock_t		*lock;
 	unsigned long		flags;	/* ATA_FLAG_xxx */
 	unsigned int		pflags; /* ATA_PFLAG_xxx */
-	unsigned int		id;	/* unique id req'd by scsi midlyr */
-	unsigned int		port_no; /* unique port #; from zero */
-	unsigned int		hard_port_no;	/* hardware port #; from zero */
+	unsigned int		print_id; /* user visible unique port ID */
+	unsigned int		port_no; /* 0 based port no. inside the host */
 
 	struct ata_prd		*prd;	 /* our SG list */
 	dma_addr_t		prd_dma; /* and its DMA mapping */
@@ -525,7 +519,7 @@
 	unsigned int		hw_sata_spd_limit;
 	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
 
-	/* record runtime error info, protected by host_set lock */
+	/* record runtime error info, protected by host lock */
 	struct ata_eh_info	eh_info;
 	/* EH context owned by EH */
 	struct ata_eh_context	eh_context;
@@ -539,10 +533,11 @@
 	unsigned int		active_tag;
 	u32			sactive;
 
-	struct ata_host_stats	stats;
-	struct ata_host_set	*host_set;
+	struct ata_port_stats	stats;
+	struct ata_host		*host;
 	struct device 		*dev;
 
+	void			*port_task_data;
 	struct work_struct	port_task;
 	struct work_struct	hotplug_task;
 	struct work_struct	scsi_rescan_task;
@@ -564,11 +559,11 @@
 struct ata_port_operations {
 	void (*port_disable) (struct ata_port *);
 
-	void (*dev_config) (struct ata_port *, struct ata_device *);
+	void (*dev_config) (struct ata_device *);
 
 	void (*set_piomode) (struct ata_port *, struct ata_device *);
 	void (*set_dmamode) (struct ata_port *, struct ata_device *);
-	unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
+	unsigned long (*mode_filter) (struct ata_device *, unsigned long);
 
 	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
 	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
@@ -579,11 +574,11 @@
 	void (*dev_select)(struct ata_port *ap, unsigned int device);
 
 	void (*phy_reset) (struct ata_port *ap); /* obsolete */
-	void (*set_mode) (struct ata_port *ap);
+	int  (*set_mode) (struct ata_port *ap, struct ata_device **r_failed_dev);
 
-	void (*post_set_mode) (struct ata_port *ap);
+	int (*cable_detect) (struct ata_port *ap);
 
-	int (*check_atapi_dma) (struct ata_queued_cmd *qc);
+	int  (*check_atapi_dma) (struct ata_queued_cmd *qc);
 
 	void (*bmdma_setup) (struct ata_queued_cmd *qc);
 	void (*bmdma_start) (struct ata_queued_cmd *qc);
@@ -603,8 +598,10 @@
 	void (*error_handler) (struct ata_port *ap);
 	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
 
-	irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
+	irq_handler_t irq_handler;
 	void (*irq_clear) (struct ata_port *);
+	u8 (*irq_on) (struct ata_port *);
+	u8 (*irq_ack) (struct ata_port *ap, unsigned int chk_drq);
 
 	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
 	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
@@ -616,7 +613,7 @@
 	int (*port_start) (struct ata_port *ap);
 	void (*port_stop) (struct ata_port *ap);
 
-	void (*host_stop) (struct ata_host_set *host_set);
+	void (*host_stop) (struct ata_host *host);
 
 	void (*bmdma_stop) (struct ata_queued_cmd *qc);
 	u8   (*bmdma_status) (struct ata_port *ap);
@@ -624,11 +621,12 @@
 
 struct ata_port_info {
 	struct scsi_host_template	*sht;
-	unsigned long		host_flags;
+	unsigned long		flags;
 	unsigned long		pio_mask;
 	unsigned long		mwdma_mask;
 	unsigned long		udma_mask;
 	const struct ata_port_operations *port_ops;
+	irq_handler_t		irq_handler;
 	void 			*private_data;
 };
 
@@ -650,6 +648,9 @@
 extern const unsigned long sata_deb_timing_hotplug[];
 extern const unsigned long sata_deb_timing_long[];
 
+extern const struct ata_port_operations ata_dummy_port_ops;
+extern const struct ata_port_info ata_dummy_port_info;
+
 static inline const unsigned long *
 sata_ehc_deb_timing(struct ata_eh_context *ehc)
 {
@@ -659,41 +660,60 @@
 		return sata_deb_timing_normal;
 }
 
+static inline int ata_port_is_dummy(struct ata_port *ap)
+{
+	return ap->ops == &ata_dummy_port_ops;
+}
+
+extern void sata_print_link_status(struct ata_port *ap);
 extern void ata_port_probe(struct ata_port *);
 extern void __sata_phy_reset(struct ata_port *ap);
 extern void sata_phy_reset(struct ata_port *ap);
 extern void ata_bus_reset(struct ata_port *ap);
 extern int sata_set_spd(struct ata_port *ap);
-extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
-extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
-extern int ata_std_prereset(struct ata_port *ap);
-extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
-extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
+extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param,
+			     unsigned long deadline);
+extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param,
+			   unsigned long deadline);
+extern int ata_std_prereset(struct ata_port *ap, unsigned long deadline);
+extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
+			     unsigned long deadline);
+extern int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
+			       unsigned long deadline);
+extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
+			      unsigned long deadline);
 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
-extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
 extern void ata_port_disable(struct ata_port *);
 extern void ata_std_ports(struct ata_ioports *ioaddr);
 #ifdef CONFIG_PCI
-extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
-			     unsigned int n_ports);
+extern int ata_pci_init_one (struct pci_dev *pdev,
+			     const struct ata_port_info * const * ppi);
 extern void ata_pci_remove_one (struct pci_dev *pdev);
-extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
-extern void ata_pci_device_do_resume(struct pci_dev *pdev);
-extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
+extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 extern int ata_pci_device_resume(struct pci_dev *pdev);
+#endif
 extern int ata_pci_clear_simplex(struct pci_dev *pdev);
 #endif /* CONFIG_PCI */
-extern int ata_device_add(const struct ata_probe_ent *ent);
-extern void ata_port_detach(struct ata_port *ap);
-extern void ata_host_set_init(struct ata_host_set *, struct device *,
-			      unsigned long, const struct ata_port_operations *);
-extern void ata_host_set_remove(struct ata_host_set *host_set);
+extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+			const struct ata_port_info * const * ppi, int n_ports);
+extern int ata_host_start(struct ata_host *host);
+extern int ata_host_register(struct ata_host *host,
+			     struct scsi_host_template *sht);
+extern int ata_host_activate(struct ata_host *host, int irq,
+			     irq_handler_t irq_handler, unsigned long irq_flags,
+			     struct scsi_host_template *sht);
+extern void ata_host_detach(struct ata_host *host);
+extern void ata_host_init(struct ata_host *, struct device *,
+			  unsigned long, const struct ata_port_operations *);
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
-extern int ata_scsi_release(struct Scsi_Host *host);
 extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host_set *,
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
 					   struct ata_port_info *, struct Scsi_Host *);
 extern int ata_sas_port_init(struct ata_port *);
 extern int ata_sas_port_start(struct ata_port *ap);
@@ -708,20 +728,20 @@
 extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
 extern int ata_port_online(struct ata_port *ap);
 extern int ata_port_offline(struct ata_port *ap);
-extern int ata_scsi_device_resume(struct scsi_device *);
-extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
-extern int ata_host_set_suspend(struct ata_host_set *host_set,
-				pm_message_t mesg);
-extern void ata_host_set_resume(struct ata_host_set *host_set);
+#ifdef CONFIG_PM
+extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_resume(struct ata_host *host);
+#endif
 extern int ata_ratelimit(void);
-extern unsigned int ata_busy_sleep(struct ata_port *ap,
-				   unsigned long timeout_pat,
-				   unsigned long timeout);
-extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
+extern int ata_busy_sleep(struct ata_port *ap,
+			  unsigned long timeout_pat, unsigned long timeout);
+extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
+extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
 				void *data, unsigned long delay);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 			     unsigned long interval_msec,
 			     unsigned long timeout_msec);
+extern unsigned int ata_dev_try_classify(struct ata_port *, unsigned int, u8 *);
 
 /*
  * Default driver ops implementations
@@ -736,15 +756,11 @@
 extern u8 ata_altstatus(struct ata_port *ap);
 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
 extern int ata_port_start (struct ata_port *ap);
-extern void ata_port_stop (struct ata_port *ap);
-extern void ata_host_stop (struct ata_host_set *host_set);
-extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
-extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
-			       unsigned int buflen, int write_data);
-extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
-			      unsigned int buflen, int write_data);
-extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
-			      unsigned int buflen, int write_data);
+extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs);
+extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
+			  unsigned int buflen, int write_data);
+extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
+				unsigned int buflen, int write_data);
 extern void ata_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -753,10 +769,13 @@
 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
 		 unsigned int n_elem);
 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern void ata_dev_disable(struct ata_device *adev);
 extern void ata_id_string(const u16 *id, unsigned char *s,
 			  unsigned int ofs, unsigned int len);
 extern void ata_id_c_string(const u16 *id, unsigned char *s,
 			    unsigned int ofs, unsigned int len);
+extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown);
+extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
@@ -785,6 +804,16 @@
 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
 				       int queue_depth);
 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
+extern int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern u8 ata_irq_on(struct ata_port *ap);
+extern u8 ata_dummy_irq_on(struct ata_port *ap);
+extern u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq);
+extern u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq);
+
+extern int ata_cable_40wire(struct ata_port *ap);
+extern int ata_cable_80wire(struct ata_port *ap);
+extern int ata_cable_sata(struct ata_port *ap);
+extern int ata_cable_unknown(struct ata_port *ap);
 
 /*
  * Timing helpers
@@ -823,11 +852,13 @@
 	unsigned long		val;
 };
 
-extern void ata_pci_host_stop (struct ata_host_set *host_set);
-extern struct ata_probe_ent *
-ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
+extern int ata_pci_init_native_host(struct ata_host *host);
+extern int ata_pci_init_bmdma(struct ata_host *host);
+extern int ata_pci_prepare_native_host(struct pci_dev *pdev,
+				const struct ata_port_info * const * ppi,
+				struct ata_host **r_host);
 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
-extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
+extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long);
 #endif /* CONFIG_PCI */
 
 /*
@@ -853,10 +884,10 @@
  * printk helpers
  */
 #define ata_port_printk(ap, lv, fmt, args...) \
-	printk(lv"ata%u: "fmt, (ap)->id , ##args)
+	printk(lv"ata%u: "fmt, (ap)->print_id , ##args)
 
 #define ata_dev_printk(dev, lv, fmt, args...) \
-	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
+	printk(lv"ata%u.%02u: "fmt, (dev)->ap->print_id, (dev)->devno , ##args)
 
 /*
  * ata_eh_info helpers
@@ -874,12 +905,7 @@
 
 static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
 {
-	if (ehi->flags & ATA_EHI_HOTPLUGGED)
-		return;
-
 	ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
-	ehi->hotplug_timestamp = jiffies;
-
 	ehi->action |= ATA_EH_SOFTRESET;
 	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
 }
@@ -973,11 +999,6 @@
 	return ata_class_absent(dev->class);
 }
 
-static inline unsigned int ata_dev_ready(const struct ata_device *dev)
-{
-	return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
-}
-
 /*
  * port helpers
  */
@@ -994,6 +1015,21 @@
 	return ap->ops->check_status(ap);
 }
 
+/**
+ *	ata_ncq_enabled - Test whether NCQ is enabled
+ *	@dev: ATA device to test for
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	1 if NCQ is enabled for @dev, 0 otherwise.
+ */
+static inline int ata_ncq_enabled(struct ata_device *dev)
+{
+	return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
+			      ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
 
 /**
  *	ata_pause - Flush writes and pause 400 nanoseconds.
@@ -1013,6 +1049,8 @@
 /**
  *	ata_busy_wait - Wait for a port status register
  *	@ap: Port to wait for.
+ *	@bits: bits that must be clear
+ *	@max: number of 10uS waits to perform
  *
  *	Waits up to max*10 microseconds for the selected bits in the port's
  *	status register to be cleared.
@@ -1031,7 +1069,7 @@
 		udelay(10);
 		status = ata_chk_status(ap);
 		max--;
-	} while ((status & bits) && (max > 0));
+	} while (status != 0xff && (status & bits) && (max > 0));
 
 	return status;
 }
@@ -1052,11 +1090,10 @@
 {
 	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
-	if (status & (ATA_BUSY | ATA_DRQ)) {
-		unsigned long l = ap->ioaddr.status_addr;
+	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) {
 		if (ata_msg_warn(ap))
-			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
-				status, l);
+			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%p\n",
+				status, ap->ioaddr.status_addr);
 	}
 
 	return status;
@@ -1103,12 +1140,15 @@
 
 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
 {
+	qc->dma_dir = DMA_NONE;
 	qc->__sg = NULL;
 	qc->flags = 0;
-	qc->cursect = qc->cursg = qc->cursg_ofs = 0;
-	qc->nsect = 0;
+	qc->cursg = qc->cursg_ofs = 0;
 	qc->nbytes = qc->curbytes = 0;
+	qc->n_elem = 0;
 	qc->err_mask = 0;
+	qc->pad_len = 0;
+	qc->sect_size = ATA_SECT_SIZE;
 
 	ata_tf_init(qc->dev, &qc->tf);
 
@@ -1117,82 +1157,6 @@
 	qc->result_tf.feature = 0;
 }
 
-/**
- *	ata_irq_on - Enable interrupts on a port.
- *	@ap: Port on which interrupts are enabled.
- *
- *	Enable interrupts on a legacy IDE device using MMIO or PIO,
- *	wait for idle, clear any pending interrupts.
- *
- *	LOCKING:
- *	Inherited from caller.
- */
-
-static inline u8 ata_irq_on(struct ata_port *ap)
-{
-	struct ata_ioports *ioaddr = &ap->ioaddr;
-	u8 tmp;
-
-	ap->ctl &= ~ATA_NIEN;
-	ap->last_ctl = ap->ctl;
-
-	if (ap->flags & ATA_FLAG_MMIO)
-		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
-	else
-		outb(ap->ctl, ioaddr->ctl_addr);
-	tmp = ata_wait_idle(ap);
-
-	ap->ops->irq_clear(ap);
-
-	return tmp;
-}
-
-
-/**
- *	ata_irq_ack - Acknowledge a device interrupt.
- *	@ap: Port on which interrupts are enabled.
- *
- *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
- *	or BUSY+DRQ clear).  Obtain dma status and port status from
- *	device.  Clear the interrupt.  Return port status.
- *
- *	LOCKING:
- */
-
-static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
-{
-	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
-	u8 host_stat, post_stat, status;
-
-	status = ata_busy_wait(ap, bits, 1000);
-	if (status & bits)
-		if (ata_msg_err(ap))
-			printk(KERN_ERR "abnormal status 0x%X\n", status);
-
-	/* get controller status; clear intr, err bits */
-	if (ap->flags & ATA_FLAG_MMIO) {
-		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
-		host_stat = readb(mmio + ATA_DMA_STATUS);
-		writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
-		       mmio + ATA_DMA_STATUS);
-
-		post_stat = readb(mmio + ATA_DMA_STATUS);
-	} else {
-		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-		outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
-		     ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-
-		post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-	}
-
-	if (ata_msg_intr(ap))
-		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
-			__FUNCTION__,
-			host_stat, post_stat, status);
-
-	return status;
-}
-
 static inline int ata_try_flush_cache(const struct ata_device *dev)
 {
 	return ata_id_wcache_enabled(dev->id) ||
@@ -1220,19 +1184,19 @@
 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
 {
 	ap->pad_dma = 0;
-	ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
-				     &ap->pad_dma, GFP_KERNEL);
+	ap->pad = dmam_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
+				      &ap->pad_dma, GFP_KERNEL);
 	return (ap->pad == NULL) ? -ENOMEM : 0;
 }
 
 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
 {
-	dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
+	dmam_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
 }
 
 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
 {
-	return (struct ata_port *) &host->hostdata[0];
+	return *(struct ata_port **)&host->hostdata[0];
 }
 
 #endif /* __LINUX_LIBATA_H__ */
diff -urN linux-2.6.18.x86_64.p3/include/linux/pata_platform.h linux-2.6.18.x86_64.p4/include/linux/pata_platform.h
--- linux-2.6.18.x86_64.p3/include/linux/pata_platform.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.x86_64.p4/include/linux/pata_platform.h	2007-06-06 10:08:00.000000000 -0400
@@ -0,0 +1,13 @@
+#ifndef __LINUX_PATA_PLATFORM_H
+#define __LINUX_PATA_PLATFORM_H
+
+struct pata_platform_info {
+	/*
+	 * I/O port shift, for platforms with ports that are
+	 * constantly spaced and need larger than the 1-byte
+	 * spacing used by ata_std_ports().
+	 */
+	unsigned int ioport_shift;
+};
+
+#endif /* __LINUX_PATA_PLATFORM_H */


diff -urN linux-2.6.18.i686.orig/include/scsi/scsi_device.h linux-2.6.18.i686/include/scsi/scsi_device.h
--- linux-2.6.18.i686.orig/include/scsi/scsi_device.h	2007-06-13 13:13:21.000000000 -0400
+++ linux-2.6.18.i686/include/scsi/scsi_device.h	2007-06-13 13:14:58.000000000 -0400
@@ -124,8 +124,10 @@
 	unsigned fix_capacity:1;	/* READ_CAPACITY is too high by 1 */
 	unsigned retry_hwerror:1;	/* Retry HARDWARE_ERROR */
 
+#ifndef __GENKSYMS__
 	/* added at end for kABI */
 	unsigned manage_start_stop:1;   /* Let HLD (sd) manage start/stop */
+#endif
 
 	unsigned int device_blocked;	/* Device returned QUEUE_FULL. */
 
diff -ur linux-2.6.18.x86_64.orig/include/linux/libata.h linux-2.6.18.x86_64/include/linux/libata.h
--- linux-2.6.18.x86_64.orig/include/linux/libata.h	2007-06-15 10:50:30.000000000 -0400
+++ linux-2.6.18.x86_64/include/linux/libata.h	2007-06-15 10:52:41.000000000 -0400
@@ -35,7 +35,6 @@
 #include <linux/ata.h>
 #include <linux/workqueue.h>
 #include <scsi/scsi_host.h>
-#include <linux/acpi.h>
 
 #include <linux/libata-compat.h>
 
@@ -463,7 +462,7 @@
 	unsigned int		horkage;	/* List of broken features */
 #ifdef CONFIG_ATA_ACPI
 	/* ACPI objects info */
-	acpi_handle obj_handle;
+	void			*obj_handle;
 #endif
 };