Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > fc11cd6e1c513a17304da94a5390f3cd > files > 3263

kernel-2.6.18-194.11.1.el5.src.rpm

From: Jeff Garzik <jgarzik@redhat.com>
Date: Wed, 19 Dec 2007 07:25:50 -0500
Subject: [sata] rhel5.2 driver update
Message-id: 20071219122550.GB25477@devserv.devel.redhat.com
O-Subject: [RHEL5.2 PATCH 2/2] SATA update - driver changes
Bugzilla: 184884 307911

This is the RHEL5.2 SATA update, which addresses 184884 and 307911
among others.  This is the latest upstream (2.6.24-rcX), backported.

Two Kconfig symbols [that are relevant to our platforms] are added, and
the following must be applied to various RH kernel config files:

# CONFIG_PATA_ACPI is not set
# CONFIG_PATA_NS87415 is not set

 drivers/ata/Kconfig             |   85 +
 drivers/ata/Makefile            |   10
 drivers/ata/ahci.c              | 1236 ++++++++++++++------
 drivers/ata/ata_generic.c       |   24
 drivers/ata/ata_piix.c          |  459 ++++++-
 drivers/ata/libata-acpi.c       | 1238 +++++++++++---------
 drivers/ata/libata-core.c       | 2368 ++++++++++++++++++++++++++--------------
 drivers/ata/libata-eh.c         | 1407 ++++++++++++++++-------
 drivers/ata/libata-pmp.c        | 1191 ++++++++++++++++++++
 drivers/ata/libata-scsi.c       |  835 +++++++++-----
 drivers/ata/libata-sff.c        |  401 +-----
 drivers/ata/libata.h            |   69 -
 drivers/ata/pata_acpi.c         |  397 ++++++
 drivers/ata/pata_ali.c          |  114 +
 drivers/ata/pata_amd.c          |   94 -
 drivers/ata/pata_artop.c        |   45
 drivers/ata/pata_at32.c         |  446 +++++++
 drivers/ata/pata_atiixp.c       |   20
 drivers/ata/pata_bf54x.c        | 1631 +++++++++++++++++++++++++++
 drivers/ata/pata_cmd640.c       |    6
 drivers/ata/pata_cmd64x.c       |   63 -
 drivers/ata/pata_cs5520.c       |   52
 drivers/ata/pata_cs5530.c       |   18
 drivers/ata/pata_cs5535.c       |   10
 drivers/ata/pata_cs5536.c       |  344 +++++
 drivers/ata/pata_cypress.c      |    6
 drivers/ata/pata_efar.c         |   13
 drivers/ata/pata_hpt366.c       |    8
 drivers/ata/pata_hpt37x.c       |  133 +-
 drivers/ata/pata_hpt3x2n.c      |   23
 drivers/ata/pata_hpt3x3.c       |  103 +
 drivers/ata/pata_icside.c       |   89 -
 drivers/ata/pata_isapnp.c       |   24
 drivers/ata/pata_it8213.c       |   15
 drivers/ata/pata_it821x.c       |   51
 drivers/ata/pata_ixp4xx_cf.c    |   95 -
 drivers/ata/pata_jmicron.c      |   37
 drivers/ata/pata_legacy.c       |   27
 drivers/ata/pata_marvell.c      |   26
 drivers/ata/pata_mpc52xx.c      |   30
 drivers/ata/pata_mpiix.c        |   27
 drivers/ata/pata_netcell.c      |    9
 drivers/ata/pata_ns87410.c      |   15
 drivers/ata/pata_ns87415.c      |  469 +++++++
 drivers/ata/pata_oldpiix.c      |   15
 drivers/ata/pata_opti.c         |   13
 drivers/ata/pata_optidma.c      |   32
 drivers/ata/pata_pcmcia.c       |   33
 drivers/ata/pata_pdc2027x.c     |  145 +-
 drivers/ata/pata_pdc202xx_old.c |   33
 drivers/ata/pata_platform.c     |   52
 drivers/ata/pata_qdi.c          |   15
 drivers/ata/pata_radisys.c      |    8
 drivers/ata/pata_rz1000.c       |   15
 drivers/ata/pata_sc1200.c       |   14
 drivers/ata/pata_scc.c          |  142 +-
 drivers/ata/pata_serverworks.c  |   40
 drivers/ata/pata_sil680.c       |  119 +-
 drivers/ata/pata_sis.c          |   86 -
 drivers/ata/pata_sl82c105.c     |   24
 drivers/ata/pata_triflex.c      |   13
 drivers/ata/pata_via.c          |   55
 drivers/ata/pata_winbond.c      |   15
 drivers/ata/pdc_adma.c          |  146 +-
 drivers/ata/sata_fsl.c          | 1395 +++++++++++++++++++++++
 drivers/ata/sata_inic162x.c     |   62 -
 drivers/ata/sata_mv.c           | 1269 +++++++++++++--------
 drivers/ata/sata_nv.c           | 1149 +++++++++++++++++--
 drivers/ata/sata_promise.c      |  299 +++--
 drivers/ata/sata_qstor.c        |  156 +-
 drivers/ata/sata_sil.c          |  120 +-
 drivers/ata/sata_sil24.c        |  544 +++++++--
 drivers/ata/sata_sis.c          |  103 -
 drivers/ata/sata_svw.c          |   52
 drivers/ata/sata_sx4.c          |  370 +++---
 drivers/ata/sata_uli.c          |   48
 drivers/ata/sata_via.c          |  100 -
 drivers/ata/sata_vsc.c          |   48
 include/linux/ata.h             |  226 +++
 include/linux/libata-compat.h   |   44
 include/linux/libata.h          |  491 +++++---
 81 files changed, 16024 insertions(+), 5230 deletions(-)

diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index fa0692b..60d758a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,6 +14,11 @@ menuconfig ATA
 	  that "speaks" the ATA protocol, also called ATA controller),
 	  because you will be asked for it.
 
+	  NOTE: ATA enables basic SCSI support; *however*,
+	  'SCSI disk support', 'SCSI tape support', or
+	  'SCSI CDROM support' may also be needed,
+	  depending on your hardware configuration.
+
 if ATA
 
 config ATA_NONSTANDARD
@@ -166,6 +171,24 @@ config SATA_INIC162X
 	help
 	  This option enables support for Initio 162x Serial ATA.
 
+config PATA_ACPI
+	tristate "ACPI firmware driver for PATA"
+	depends on ATA_ACPI
+	help
+	  This option enables an ACPI method driver which drives
+	  motherboard PATA controller interfaces through the ACPI
+	  firmware in the BIOS. This driver can sometimes handle
+	  otherwise unsupported hardware.
+
+config SATA_FSL
+	tristate "Freescale 3.0Gbps SATA support"
+	depends on PPC_MPC837x
+	help
+	  This option enables support for Freescale 3.0Gbps SATA controller.
+	  It can be found on MPC837x and MPC8315.
+
+	  If unsure, say N.
+
 config PATA_ALI
 	tristate "ALi PATA support (Experimental)"
 	depends on PCI && EXPERIMENTAL
@@ -185,16 +208,25 @@ config PATA_AMD
 	  If unsure, say N.
 
 config PATA_ARTOP
-	tristate "ARTOP 6210/6260 PATA support (Experimental)"
-	depends on PCI && EXPERIMENTAL
+	tristate "ARTOP 6210/6260 PATA support"
+	depends on PCI
 	help
 	  This option enables support for ARTOP PATA controllers.
 
 	  If unsure, say N.
 
+config PATA_AT32
+	tristate "Atmel AVR32 PATA support (Experimental)"
+	depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
+	help
+	  This option enables support for the IDE devices on the
+	  Atmel AT32AP platform.
+
+	  If unsure, say N.
+
 config PATA_ATIIXP
-	tristate "ATI PATA support (Experimental)"
-	depends on PCI && EXPERIMENTAL
+	tristate "ATI PATA support"
+	depends on PCI
 	help
 	  This option enables support for the ATI ATA interfaces
 	  found on the many ATI chipsets.
@@ -212,8 +244,8 @@ config PATA_CMD640_PCI
 	  If unsure, say N.
 
 config PATA_CMD64X
-	tristate "CMD64x PATA support (Very Experimental)"
-	depends on PCI&& EXPERIMENTAL
+	tristate "CMD64x PATA support"
+	depends on PCI
 	help
 	  This option enables support for the CMD64x series chips
 	  except for the CMD640.
@@ -247,6 +279,15 @@ config PATA_CS5535
 
 	  If unsure, say N.
 
+config PATA_CS5536
+	tristate "CS5536 PATA support (Experimental)"
+	depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+	help
+	  This option enables support for the AMD CS5536
+	  companion chip used with the Geode LX processor family.
+
+	  If unsure, say N.
+
 config PATA_CYPRESS
 	tristate "Cypress CY82C693 PATA support (Very Experimental)"
 	depends on PCI && EXPERIMENTAL
@@ -275,8 +316,8 @@ config ATA_GENERIC
 	  If unsure, say N.
 
 config PATA_HPT366
-	tristate "HPT 366/368 PATA support (Experimental)"
-	depends on PCI && EXPERIMENTAL
+	tristate "HPT 366/368 PATA support"
+	depends on PCI
 	help
 	  This option enables support for the HPT 366 and 368
 	  PATA controllers via the new ATA layer.
@@ -302,7 +343,7 @@ config PATA_HPT3X2N
 	  If unsure, say N.
 
 config PATA_HPT3X3
-	tristate "HPT 343/363 PATA support (Experimental)"
+	tristate "HPT 343/363 PATA support"
 	depends on PCI
 	help
 	  This option enables support for the HPT 343/363
@@ -310,6 +351,14 @@ config PATA_HPT3X3
 
 	  If unsure, say N.
 
+config PATA_HPT3X3_DMA
+	bool "HPT 343/363 DMA support (Experimental)"
+	depends on PATA_HPT3X3
+	help
+	  This option enables DMA support for the HPT343/363
+	  controllers. Enable with care as there are still some
+	  problems with DMA on this chipset.
+
 config PATA_ISAPNP
 	tristate "ISA Plug and Play PATA support (Experimental)"
 	depends on EXPERIMENTAL && ISAPNP
@@ -417,6 +466,15 @@ config PATA_NS87410
 
 	  If unsure, say N.
 
+config PATA_NS87415
+	tristate "Nat Semi NS87415 PATA support (Experimental)"
+	depends on PCI && EXPERIMENTAL
+	help
+	  This option enables support for the National Semiconductor
+	  NS87415 PCI-IDE controller.
+
+	  If unsure, say N.
+
 config PATA_OPTI
 	tristate "OPTI621/6215 PATA support (Very Experimental)"
 	depends on PCI && EXPERIMENTAL
@@ -581,6 +639,15 @@ config PATA_SCC
 
 	  If unsure, say N.
 
+config PATA_BF54X
+	tristate "Blackfin 54x ATAPI support"
+	depends on BF542 || BF548 || BF549
+	help
+	  This option enables support for the built-in ATAPI controller on
+	  Blackfin 54x family chips.
+
+	  If unsure, say N.
+
 config ATA_INTEL_COMBINED
 	bool
 	depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8149c68..b13feb2 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -17,16 +17,19 @@ obj-$(CONFIG_SATA_ULI)		+= sata_uli.o
 obj-$(CONFIG_SATA_MV)		+= sata_mv.o
 obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
 obj-$(CONFIG_PDC_ADMA)		+= pdc_adma.o
+obj-$(CONFIG_SATA_FSL)		+= sata_fsl.o
 
 obj-$(CONFIG_PATA_ALI)		+= pata_ali.o
 obj-$(CONFIG_PATA_AMD)		+= pata_amd.o
 obj-$(CONFIG_PATA_ARTOP)	+= pata_artop.o
+obj-$(CONFIG_PATA_AT32)		+= pata_at32.o
 obj-$(CONFIG_PATA_ATIIXP)	+= pata_atiixp.o
 obj-$(CONFIG_PATA_CMD640_PCI)	+= pata_cmd640.o
 obj-$(CONFIG_PATA_CMD64X)	+= pata_cmd64x.o
 obj-$(CONFIG_PATA_CS5520)	+= pata_cs5520.o
 obj-$(CONFIG_PATA_CS5530)	+= pata_cs5530.o
 obj-$(CONFIG_PATA_CS5535)	+= pata_cs5535.o
+obj-$(CONFIG_PATA_CS5536)	+= pata_cs5536.o
 obj-$(CONFIG_PATA_CYPRESS)	+= pata_cypress.o
 obj-$(CONFIG_PATA_EFAR)		+= pata_efar.o
 obj-$(CONFIG_PATA_HPT366)	+= pata_hpt366.o
@@ -39,6 +42,7 @@ obj-$(CONFIG_PATA_IT8213)	+= pata_it8213.o
 obj-$(CONFIG_PATA_JMICRON)	+= pata_jmicron.o
 obj-$(CONFIG_PATA_NETCELL)	+= pata_netcell.o
 obj-$(CONFIG_PATA_NS87410)	+= pata_ns87410.o
+obj-$(CONFIG_PATA_NS87415)	+= pata_ns87415.o
 obj-$(CONFIG_PATA_OPTI)		+= pata_opti.o
 obj-$(CONFIG_PATA_OPTIDMA)	+= pata_optidma.o
 obj-$(CONFIG_PATA_MPC52xx)	+= pata_mpc52xx.o
@@ -61,12 +65,16 @@ obj-$(CONFIG_PATA_SIS)		+= pata_sis.o
 obj-$(CONFIG_PATA_TRIFLEX)	+= pata_triflex.o
 obj-$(CONFIG_PATA_IXP4XX_CF)	+= pata_ixp4xx_cf.o
 obj-$(CONFIG_PATA_SCC)		+= pata_scc.o
+obj-$(CONFIG_PATA_BF54X)	+= pata_bf54x.o
 obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
 obj-$(CONFIG_PATA_ICSIDE)	+= pata_icside.o
+# Should be last but two libata driver
+obj-$(CONFIG_PATA_ACPI)		+= pata_acpi.o
 # Should be last but one libata driver
 obj-$(CONFIG_ATA_GENERIC)	+= ata_generic.o
 # Should be last libata driver
 obj-$(CONFIG_PATA_LEGACY)	+= pata_legacy.o
 
-libata-objs	:= libata-core.o libata-scsi.o libata-sff.o libata-eh.o
+libata-objs	:= libata-core.o libata-scsi.o libata-sff.o libata-eh.o \
+		   libata-pmp.o
 libata-$(CONFIG_ATA_ACPI)	+= libata-acpi.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 407d6c4..7171c2c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -41,20 +41,24 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
 
 #define DRV_NAME	"ahci"
-#define DRV_VERSION	"2.2"
+#define DRV_VERSION	"3.0"
 
+static int ahci_enable_alpm(struct ata_port *ap,
+		enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
 
 enum {
 	AHCI_PCI_BAR		= 5,
 	AHCI_MAX_PORTS		= 32,
 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
 	AHCI_DMA_BOUNDARY	= 0xffffffff,
-	AHCI_USE_CLUSTERING	= 0,
+	AHCI_USE_CLUSTERING	= 1,
 	AHCI_MAX_CMDS		= 32,
 	AHCI_CMD_SZ		= 32,
 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
@@ -77,10 +81,10 @@ enum {
 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
 
 	board_ahci		= 0,
-	board_ahci_pi		= 1,
-	board_ahci_vt8251	= 2,
-	board_ahci_ign_iferr	= 3,
-	board_ahci_sb600	= 4,
+	board_ahci_vt8251	= 1,
+	board_ahci_ign_iferr	= 2,
+	board_ahci_sb600	= 3,
+	board_ahci_mv		= 4,
 
 	/* global controller registers */
 	HOST_CAP		= 0x00, /* host capabilities */
@@ -96,8 +100,11 @@ enum {
 
 	/* HOST_CAP bits */
 	HOST_CAP_SSC		= (1 << 14), /* Slumber capable */
+	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
+	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
+	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
 
@@ -112,11 +119,11 @@ enum {
 	PORT_TFDATA		= 0x20,	/* taskfile data */
 	PORT_SIG		= 0x24,	/* device TF signature */
 	PORT_CMD_ISSUE		= 0x38, /* command issue */
-	PORT_SCR		= 0x28, /* SATA phy register block */
 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
+	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
 
 	/* PORT_IRQ_{STAT,MASK} bits */
 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
@@ -142,7 +149,8 @@ enum {
 				  PORT_IRQ_IF_ERR |
 				  PORT_IRQ_CONNECT |
 				  PORT_IRQ_PHYRDY |
-				  PORT_IRQ_UNK_FIS,
+				  PORT_IRQ_UNK_FIS |
+				  PORT_IRQ_BAD_PMP,
 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
 				  PORT_IRQ_TF_ERR |
 				  PORT_IRQ_HBUS_DATA_ERR,
@@ -151,7 +159,10 @@ enum {
 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
 
 	/* PORT_CMD bits */
+	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
+	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
+	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
@@ -165,17 +176,25 @@ enum {
 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
 
+	/* hpriv->flags bits */
+	AHCI_HFLAG_NO_NCQ		= (1 << 0),
+	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
+	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
+	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
+	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
+	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
+	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
+	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
+
 	/* ap->flags bits */
-	AHCI_FLAG_NO_NCQ		= (1 << 24),
-	AHCI_FLAG_IGN_IRQ_IF_ERR	= (1 << 25), /* ignore IRQ_IF_ERR */
-	AHCI_FLAG_HONOR_PI		= (1 << 26), /* honor PORTS_IMPL */
-	AHCI_FLAG_IGN_SERR_INTERNAL	= (1 << 27), /* ignore SERR_INTERNAL */
-	AHCI_FLAG_32BIT_ONLY		= (1 << 28), /* force 32bit */
 
 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-					  ATA_FLAG_SKIP_D2H_BSY |
-					  ATA_FLAG_ACPI_SATA,
+					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+					  ATA_FLAG_IPM,
+	AHCI_LFLAG_COMMON		= ATA_LFLAG_SKIP_D2H_BSY,
+
+	ICH_MAP				= 0x90, /* ICH MAP register */
 };
 
 struct ahci_cmd_hdr {
@@ -194,6 +213,7 @@ struct ahci_sg {
 };
 
 struct ahci_host_priv {
+	unsigned int		flags;		/* AHCI_HFLAG_* */
 	u32			cap;		/* cap to use */
 	u32			port_map;	/* port map to use */
 	u32			saved_cap;	/* saved initial cap */
@@ -201,6 +221,7 @@ struct ahci_host_priv {
 };
 
 struct ahci_port_priv {
+	struct ata_link		*active_link;
 	struct ahci_cmd_hdr	*cmd_slot;
 	dma_addr_t		cmd_slot_dma;
 	void			*cmd_tbl;
@@ -211,11 +232,12 @@ struct ahci_port_priv {
 	unsigned int		ncq_saw_d2h:1;
 	unsigned int		ncq_saw_dmas:1;
 	unsigned int		ncq_saw_sdb:1;
+	u32 			intr_mask;	/* interrupts to enable */
 };
 
-static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
 static void ahci_irq_clear(struct ata_port *ap);
 static int ahci_port_start(struct ata_port *ap);
@@ -225,16 +247,27 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc);
 static u8 ahci_check_status(struct ata_port *ap);
 static void ahci_freeze(struct ata_port *ap);
 static void ahci_thaw(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
 static void ahci_error_handler(struct ata_port *ap);
 static void ahci_vt8251_error_handler(struct ata_port *ap);
+static void ahci_p5wdh_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+			       u32 opts);
 #ifdef CONFIG_PM
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
-static int ahci_port_resume(struct ata_port *ap);
 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
 static int ahci_pci_device_resume(struct pci_dev *pdev);
 #endif
 
+static struct class_device_attribute *ahci_shost_attrs[] = {
+	&class_device_attr_link_power_management_policy,
+	NULL
+};
+
 static struct scsi_host_template ahci_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
@@ -252,23 +285,21 @@ static struct scsi_host_template ahci_sht = {
 	.slave_configure	= ata_scsi_slave_config,
 	.slave_destroy		= ata_scsi_slave_destroy,
 	.bios_param		= ata_std_bios_param,
+	.shost_attrs		= ahci_shost_attrs,
 };
 
 static const struct ata_port_operations ahci_ops = {
-	.port_disable		= ata_port_disable,
-
 	.check_status		= ahci_check_status,
 	.check_altstatus	= ahci_check_status,
 	.dev_select		= ata_noop_dev_select,
 
 	.tf_read		= ahci_tf_read,
 
+	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
 	.qc_prep		= ahci_qc_prep,
 	.qc_issue		= ahci_qc_issue,
 
 	.irq_clear		= ahci_irq_clear,
-	.irq_on			= ata_dummy_irq_on,
-	.irq_ack		= ata_dummy_irq_ack,
 
 	.scr_read		= ahci_scr_read,
 	.scr_write		= ahci_scr_write,
@@ -279,30 +310,32 @@ static const struct ata_port_operations ahci_ops = {
 	.error_handler		= ahci_error_handler,
 	.post_internal_cmd	= ahci_post_internal_cmd,
 
+	.pmp_attach		= ahci_pmp_attach,
+	.pmp_detach		= ahci_pmp_detach,
+
 #ifdef CONFIG_PM
 	.port_suspend		= ahci_port_suspend,
 	.port_resume		= ahci_port_resume,
 #endif
+	.enable_pm		= ahci_enable_alpm,
+	.disable_pm		= ahci_disable_alpm,
 
 	.port_start		= ahci_port_start,
 	.port_stop		= ahci_port_stop,
 };
 
 static const struct ata_port_operations ahci_vt8251_ops = {
-	.port_disable		= ata_port_disable,
-
 	.check_status		= ahci_check_status,
 	.check_altstatus	= ahci_check_status,
 	.dev_select		= ata_noop_dev_select,
 
 	.tf_read		= ahci_tf_read,
 
+	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
 	.qc_prep		= ahci_qc_prep,
 	.qc_issue		= ahci_qc_issue,
 
 	.irq_clear		= ahci_irq_clear,
-	.irq_on			= ata_dummy_irq_on,
-	.irq_ack		= ata_dummy_irq_ack,
 
 	.scr_read		= ahci_scr_read,
 	.scr_write		= ahci_scr_write,
@@ -313,6 +346,9 @@ static const struct ata_port_operations ahci_vt8251_ops = {
 	.error_handler		= ahci_vt8251_error_handler,
 	.post_internal_cmd	= ahci_post_internal_cmd,
 
+	.pmp_attach		= ahci_pmp_attach,
+	.pmp_detach		= ahci_pmp_detach,
+
 #ifdef CONFIG_PM
 	.port_suspend		= ahci_port_suspend,
 	.port_resume		= ahci_port_resume,
@@ -322,43 +358,88 @@ static const struct ata_port_operations ahci_vt8251_ops = {
 	.port_stop		= ahci_port_stop,
 };
 
+static const struct ata_port_operations ahci_p5wdh_ops = {
+	.check_status		= ahci_check_status,
+	.check_altstatus	= ahci_check_status,
+	.dev_select		= ata_noop_dev_select,
+
+	.tf_read		= ahci_tf_read,
+
+	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
+	.qc_prep		= ahci_qc_prep,
+	.qc_issue		= ahci_qc_issue,
+
+	.irq_clear		= ahci_irq_clear,
+
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
+
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+
+	.error_handler		= ahci_p5wdh_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+
+	.pmp_attach		= ahci_pmp_attach,
+	.pmp_detach		= ahci_pmp_detach,
+
+#ifdef CONFIG_PM
+	.port_suspend		= ahci_port_suspend,
+	.port_resume		= ahci_port_resume,
+#endif
+
+	.port_start		= ahci_port_start,
+	.port_stop		= ahci_port_stop,
+};
+
+#define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
+
 static const struct ata_port_info ahci_port_info[] = {
 	/* board_ahci */
 	{
 		.flags		= AHCI_FLAG_COMMON,
+		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
-		.port_ops	= &ahci_ops,
-	},
-	/* board_ahci_pi */
-	{
-		.flags		= AHCI_FLAG_COMMON | AHCI_FLAG_HONOR_PI,
-		.pio_mask	= 0x1f, /* pio0-4 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
 	},
 	/* board_ahci_vt8251 */
 	{
-		.flags		= AHCI_FLAG_COMMON | ATA_FLAG_HRST_TO_RESUME |
-				  AHCI_FLAG_NO_NCQ,
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+		.flags		= AHCI_FLAG_COMMON,
+		.link_flags	= AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= 0x1f, /* pio0-4 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_vt8251_ops,
 	},
 	/* board_ahci_ign_iferr */
 	{
-		.flags		= AHCI_FLAG_COMMON | AHCI_FLAG_IGN_IRQ_IF_ERR,
+		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
+		.flags		= AHCI_FLAG_COMMON,
+		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
 	},
 	/* board_ahci_sb600 */
 	{
-		.flags		= AHCI_FLAG_COMMON |
-				  AHCI_FLAG_IGN_SERR_INTERNAL |
-				  AHCI_FLAG_32BIT_ONLY,
+		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
+				 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP),
+		.flags		= AHCI_FLAG_COMMON,
+		.link_flags	= AHCI_LFLAG_COMMON,
 		.pio_mask	= 0x1f, /* pio0-4 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &ahci_ops,
+	},
+	/* board_ahci_mv */
+	{
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+				 AHCI_HFLAG_MV_PATA),
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+		.link_flags	= AHCI_LFLAG_COMMON,
+		.pio_mask	= 0x1f, /* pio0-4 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
 	},
 };
@@ -375,23 +456,23 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
-	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci_pi }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_pi }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci_pi }, /* ICH8 */
-	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci_pi }, /* ICH8M */
-	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci_pi }, /* ICH8M */
-	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci_pi }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci_pi }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci_pi }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci_pi }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci_pi }, /* ICH9M */
-	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci_pi }, /* ICH9 */
-	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci_pi }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
+	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
+	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
+	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
 
@@ -433,12 +514,51 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci },		/* MCP67 */
 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci },		/* MCP67 */
 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci },		/* MCP67 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci },		/* MCP73 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
+	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
 
 	/* SiS */
 	{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
 	{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
 	{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
 
+	/* Marvell */
+	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
+
 	/* Generic, PCI class code for AHCI */
 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -464,17 +584,22 @@ static inline int ahci_nr_ports(u32 cap)
 	return (cap & 0x1f) + 1;
 }
 
-static inline void __iomem *ahci_port_base(struct ata_port *ap)
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+					     unsigned int port_no)
 {
-	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
 
-	return mmio + 0x100 + (ap->port_no * 0x80);
+	return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+	return __ahci_port_base(ap->host, ap->port_no);
 }
 
 /**
  *	ahci_save_initial_config - Save and fixup initial config values
  *	@pdev: target PCI device
- *	@pi: associated ATA port info
  *	@hpriv: host private area to store config values
  *
  *	Some registers containing configuration info might be setup by
@@ -488,7 +613,6 @@ static inline void __iomem *ahci_port_base(struct ata_port *ap)
  *	None.
  */
 static void ahci_save_initial_config(struct pci_dev *pdev,
-				     const struct ata_port_info *pi,
 				     struct ahci_host_priv *hpriv)
 {
 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
@@ -501,25 +625,41 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
 
-	/* some chips lie about 64bit support */
-	if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) {
+	/* some chips have errata preventing 64bit use */
+	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
 		dev_printk(KERN_INFO, &pdev->dev,
 			   "controller can't do 64bit DMA, forcing 32bit\n");
 		cap &= ~HOST_CAP_64;
 	}
 
-	/* fixup zero port_map */
-	if (!port_map) {
-		port_map = (1 << ahci_nr_ports(cap)) - 1;
-		dev_printk(KERN_WARNING, &pdev->dev,
-			   "PORTS_IMPL is zero, forcing 0x%x\n", port_map);
+	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "controller can't do NCQ, turning off CAP_NCQ\n");
+		cap &= ~HOST_CAP_NCQ;
+	}
 
-		/* write the fixed up value to the PI register */
-		hpriv->saved_port_map = port_map;
+	if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "controller can't do PMP, turning off CAP_PMP\n");
+		cap &= ~HOST_CAP_PMP;
+	}
+
+	/*
+	 * Temporary Marvell 6145 hack: PATA port presence
+	 * is asserted through the standard AHCI port
+	 * presence register, as bit 4 (counting from 0)
+	 */
+	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "MV_AHCI HACK: port_map %x -> %x\n",
+			   hpriv->port_map,
+			   hpriv->port_map & 0xf);
+
+		port_map &= 0xf;
 	}
 
 	/* cross check port_map and cap.n_ports */
-	if (pi->flags & AHCI_FLAG_HONOR_PI) {
+	if (port_map) {
 		u32 tmp_port_map = port_map;
 		int n_ports = ahci_nr_ports(cap);
 
@@ -530,17 +670,26 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
 			}
 		}
 
-		/* Whine if inconsistent.  No need to update cap.
-		 * port_map is used to determine number of ports.
+		/* If n_ports and port_map are inconsistent, whine and
+		 * clear port_map and let it be generated from n_ports.
 		 */
-		if (n_ports || tmp_port_map)
+		if (n_ports || tmp_port_map) {
 			dev_printk(KERN_WARNING, &pdev->dev,
 				   "nr_ports (%u) and implemented port map "
-				   "(0x%x) don't match\n",
+				   "(0x%x) don't match, using nr_ports\n",
 				   ahci_nr_ports(cap), port_map);
-	} else {
-		/* fabricate port_map from cap.nr_ports */
+			port_map = 0;
+		}
+	}
+
+	/* fabricate port_map from cap.nr_ports */
+	if (!port_map) {
 		port_map = (1 << ahci_nr_ports(cap)) - 1;
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+		/* write the fixed up value to the PI register */
+		hpriv->saved_port_map = port_map;
 	}
 
 	/* record values to use during operation */
@@ -567,38 +716,45 @@ static void ahci_restore_initial_config(struct ata_host *host)
 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
 }
 
-static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
 {
-	unsigned int sc_reg;
-
-	switch (sc_reg_in) {
-	case SCR_STATUS:	sc_reg = 0; break;
-	case SCR_CONTROL:	sc_reg = 1; break;
-	case SCR_ERROR:		sc_reg = 2; break;
-	case SCR_ACTIVE:	sc_reg = 3; break;
-	default:
-		return 0xffffffffU;
-	}
+	static const int offset[] = {
+		[SCR_STATUS]		= PORT_SCR_STAT,
+		[SCR_CONTROL]		= PORT_SCR_CTL,
+		[SCR_ERROR]		= PORT_SCR_ERR,
+		[SCR_ACTIVE]		= PORT_SCR_ACT,
+		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
+	};
+	struct ahci_host_priv *hpriv = ap->host->private_data;
 
-	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+	if (sc_reg < ARRAY_SIZE(offset) &&
+	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+		return offset[sc_reg];
+	return 0;
 }
 
-
-static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
-			       u32 val)
+static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
-	unsigned int sc_reg;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	int offset = ahci_scr_offset(ap, sc_reg);
 
-	switch (sc_reg_in) {
-	case SCR_STATUS:	sc_reg = 0; break;
-	case SCR_CONTROL:	sc_reg = 1; break;
-	case SCR_ERROR:		sc_reg = 2; break;
-	case SCR_ACTIVE:	sc_reg = 3; break;
-	default:
-		return;
+	if (offset) {
+		*val = readl(port_mmio + offset);
+		return 0;
 	}
+	return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	int offset = ahci_scr_offset(ap, sc_reg);
 
-	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+	if (offset) {
+		writel(val, port_mmio + offset);
+		return 0;
+	}
+	return -EINVAL;
 }
 
 static void ahci_start_engine(struct ata_port *ap)
@@ -630,7 +786,7 @@ static int ahci_stop_engine(struct ata_port *ap)
 
 	/* wait for engine to stop. This could be as long as 500 msec */
 	tmp = ata_wait_register(port_mmio + PORT_CMD,
-			        PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
 	if (tmp & PORT_CMD_LIST_ON)
 		return -EIO;
 
@@ -701,6 +857,130 @@ static void ahci_power_up(struct ata_port *ap)
 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
 }
 
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 cmd;
+	struct ahci_port_priv *pp = ap->private_data;
+
+	/* IPM bits should be disabled by libata-core */
+	/* get the existing command bits */
+	cmd = readl(port_mmio + PORT_CMD);
+
+	/* disable ALPM and ASP */
+	cmd &= ~PORT_CMD_ASP;
+	cmd &= ~PORT_CMD_ALPE;
+
+	/* force the interface back to active */
+	cmd |= PORT_CMD_ICC_ACTIVE;
+
+	/* write out new cmd value */
+	writel(cmd, port_mmio + PORT_CMD);
+	cmd = readl(port_mmio + PORT_CMD);
+
+	/* wait 10ms to be sure we've come out of any low power state */
+	msleep(10);
+
+	/* clear out any PhyRdy stuff from interrupt status */
+	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+	/* go ahead and clean out PhyRdy Change from Serror too */
+	ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+	/*
+ 	 * Clear flag to indicate that we should ignore all PhyRdy
+ 	 * state changes
+ 	 */
+	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+	/*
+ 	 * Enable interrupts on Phy Ready.
+ 	 */
+	pp->intr_mask |= PORT_IRQ_PHYRDY;
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+	/*
+ 	 * don't change the link pm policy - we can be called
+ 	 * just to turn of link pm temporarily
+ 	 */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+	enum link_pm policy)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	void __iomem *port_mmio = ahci_port_base(ap);
+	u32 cmd;
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 asp;
+
+	/* Make sure the host is capable of link power management */
+	if (!(hpriv->cap & HOST_CAP_ALPM))
+		return -EINVAL;
+
+	switch (policy) {
+	case MAX_PERFORMANCE:
+	case NOT_AVAILABLE:
+		/*
+ 		 * if we came here with NOT_AVAILABLE,
+ 		 * it just means this is the first time we
+ 		 * have tried to enable - default to max performance,
+ 		 * and let the user go to lower power modes on request.
+ 		 */
+		ahci_disable_alpm(ap);
+		return 0;
+	case MIN_POWER:
+		/* configure HBA to enter SLUMBER */
+		asp = PORT_CMD_ASP;
+		break;
+	case MEDIUM_POWER:
+		/* configure HBA to enter PARTIAL */
+		asp = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+ 	 * Disable interrupts on Phy Ready. This keeps us from
+ 	 * getting woken up due to spurious phy ready interrupts
+	 * TBD - Hot plug should be done via polling now, is
+	 * that even supported?
+ 	 */
+	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+	/*
+ 	 * Set a flag to indicate that we should ignore all PhyRdy
+ 	 * state changes since these can happen now whenever we
+ 	 * change link state
+ 	 */
+	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+	/* get the existing command bits */
+	cmd = readl(port_mmio + PORT_CMD);
+
+	/*
+ 	 * Set ASP based on Policy
+ 	 */
+	cmd |= asp;
+
+	/*
+ 	 * Setting this bit will instruct the HBA to aggressively
+ 	 * enter a lower power link state when it's appropriate and
+ 	 * based on the value set above for ASP
+ 	 */
+	cmd |= PORT_CMD_ALPE;
+
+	/* write out new cmd value */
+	writel(cmd, port_mmio + PORT_CMD);
+	cmd = readl(port_mmio + PORT_CMD);
+
+	/* IPM bits should be set by libata-core */
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static void ahci_power_down(struct ata_port *ap)
 {
@@ -723,7 +1003,7 @@ static void ahci_power_down(struct ata_port *ap)
 }
 #endif
 
-static void ahci_init_port(struct ata_port *ap)
+static void ahci_start_port(struct ata_port *ap)
 {
 	/* enable FIS reception */
 	ahci_start_fis_rx(ap);
@@ -759,8 +1039,16 @@ static int ahci_reset_controller(struct ata_host *host)
 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
 	u32 tmp;
 
-	/* global controller reset */
+	/* we must be in AHCI mode, before using anything
+	 * AHCI-specific, such as HOST_RESET.
+	 */
 	tmp = readl(mmio + HOST_CTL);
+	if (!(tmp & HOST_AHCI_EN)) {
+		tmp |= HOST_AHCI_EN;
+		writel(tmp, mmio + HOST_CTL);
+	}
+
+	/* global controller reset */
 	if ((tmp & HOST_RESET) == 0) {
 		writel(tmp | HOST_RESET, mmio + HOST_CTL);
 		readl(mmio + HOST_CTL); /* flush */
@@ -797,39 +1085,63 @@ static int ahci_reset_controller(struct ata_host *host)
 	return 0;
 }
 
+static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
+			   int port_no, void __iomem *mmio,
+			   void __iomem *port_mmio)
+{
+	const char *emsg = NULL;
+	int rc;
+	u32 tmp;
+
+	/* make sure port is not active */
+	rc = ahci_deinit_port(ap, &emsg);
+	if (rc)
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "%s (%d)\n", emsg, rc);
+
+	/* clear SError */
+	tmp = readl(port_mmio + PORT_SCR_ERR);
+	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+	writel(tmp, port_mmio + PORT_SCR_ERR);
+
+	/* clear port IRQ */
+	tmp = readl(port_mmio + PORT_IRQ_STAT);
+	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+	if (tmp)
+		writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+	writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
 static void ahci_init_controller(struct ata_host *host)
 {
+	struct ahci_host_priv *hpriv = host->private_data;
 	struct pci_dev *pdev = to_pci_dev(host->dev);
 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-	int i, rc;
+	int i;
+	void __iomem *port_mmio;
 	u32 tmp;
 
-	for (i = 0; i < host->n_ports; i++) {
-		struct ata_port *ap = host->ports[i];
-		void __iomem *port_mmio = ahci_port_base(ap);
-		const char *emsg = NULL;
-
-		if (ata_port_is_dummy(ap))
-			continue;
-
-		/* make sure port is not active */
-		rc = ahci_deinit_port(ap, &emsg);
-		if (rc)
-			dev_printk(KERN_WARNING, &pdev->dev,
-				   "%s (%d)\n", emsg, rc);
+	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+		port_mmio = __ahci_port_base(host, 4);
 
-		/* clear SError */
-		tmp = readl(port_mmio + PORT_SCR_ERR);
-		VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
-		writel(tmp, port_mmio + PORT_SCR_ERR);
+		writel(0, port_mmio + PORT_IRQ_MASK);
 
 		/* clear port IRQ */
 		tmp = readl(port_mmio + PORT_IRQ_STAT);
 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
 		if (tmp)
 			writel(tmp, port_mmio + PORT_IRQ_STAT);
+	}
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
-		writel(1 << i, mmio + HOST_IRQ_STAT);
+		port_mmio = ahci_port_base(ap);
+		if (ata_port_is_dummy(ap))
+			continue;
+
+		ahci_port_init(pdev, ap, i, mmio, port_mmio);
 	}
 
 	tmp = readl(mmio + HOST_CTL);
@@ -867,85 +1179,115 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
 }
 
-static int ahci_clo(struct ata_port *ap)
+static int ahci_kick_engine(struct ata_port *ap, int force_restart)
 {
 	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
 	struct ahci_host_priv *hpriv = ap->host->private_data;
 	u32 tmp;
+	int busy, rc;
 
-	if (!(hpriv->cap & HOST_CAP_CLO))
-		return -EOPNOTSUPP;
+	/* do we need to kick the port? */
+	busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ);
+	if (!busy && !force_restart)
+		return 0;
+
+	/* stop engine */
+	rc = ahci_stop_engine(ap);
+	if (rc)
+		goto out_restart;
 
+	/* need to do CLO? */
+	if (!busy) {
+		rc = 0;
+		goto out_restart;
+	}
+
+	if (!(hpriv->cap & HOST_CAP_CLO)) {
+		rc = -EOPNOTSUPP;
+		goto out_restart;
+	}
+
+	/* perform CLO */
 	tmp = readl(port_mmio + PORT_CMD);
 	tmp |= PORT_CMD_CLO;
 	writel(tmp, port_mmio + PORT_CMD);
 
+	rc = 0;
 	tmp = ata_wait_register(port_mmio + PORT_CMD,
 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
 	if (tmp & PORT_CMD_CLO)
-		return -EIO;
+		rc = -EIO;
 
-	return 0;
+	/* restart engine */
+ out_restart:
+	ahci_start_engine(ap);
+	return rc;
 }
 
-static int ahci_softreset(struct ata_port *ap, unsigned int *class,
-			  unsigned long deadline)
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+				struct ata_taskfile *tf, int is_cmd, u16 flags,
+				unsigned long timeout_msec)
 {
+	const u32 cmd_fis_len = 5; /* five dwords */
 	struct ahci_port_priv *pp = ap->private_data;
 	void __iomem *port_mmio = ahci_port_base(ap);
-	const u32 cmd_fis_len = 5; /* five dwords */
+	u8 *fis = pp->cmd_tbl;
+	u32 tmp;
+
+	/* prep the command */
+	ata_tf_to_fis(tf, pmp, is_cmd, fis);
+	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+	/* issue & wait */
+	writel(1, port_mmio + PORT_CMD_ISSUE);
+
+	if (timeout_msec) {
+		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+					1, timeout_msec);
+		if (tmp & 0x1) {
+			ahci_kick_engine(ap, 1);
+			return -EBUSY;
+		}
+	} else
+		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
+
+	return 0;
+}
+
+static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+			     int pmp, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
 	const char *reason = NULL;
+	unsigned long now, msecs;
 	struct ata_taskfile tf;
-	u32 tmp;
-	u8 *fis;
 	int rc;
 
 	DPRINTK("ENTER\n");
 
-	if (ata_port_offline(ap)) {
+	if (ata_link_offline(link)) {
 		DPRINTK("PHY reports no device\n");
 		*class = ATA_DEV_NONE;
 		return 0;
 	}
 
 	/* prepare for SRST (AHCI-1.1 10.4.1) */
-	rc = ahci_stop_engine(ap);
-	if (rc) {
-		reason = "failed to stop engine";
-		goto fail_restart;
-	}
-
-	/* check BUSY/DRQ, perform Command List Override if necessary */
-	if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
-		rc = ahci_clo(ap);
-
-		if (rc == -EOPNOTSUPP) {
-			reason = "port busy but CLO unavailable";
-			goto fail_restart;
-		} else if (rc) {
-			reason = "port busy but CLO failed";
-			goto fail_restart;
-		}
-	}
-
-	/* restart engine */
-	ahci_start_engine(ap);
+	rc = ahci_kick_engine(ap, 1);
+	if (rc && rc != -EOPNOTSUPP)
+		ata_link_printk(link, KERN_WARNING,
+				"failed to reset engine (errno=%d)\n", rc);
 
-	ata_tf_init(ap->device, &tf);
-	fis = pp->cmd_tbl;
+	ata_tf_init(link->device, &tf);
 
 	/* issue the first D2H Register FIS */
-	ahci_fill_cmd_slot(pp, 0,
-			   cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
+	msecs = 0;
+	now = jiffies;
+	if (time_after(now, deadline))
+		msecs = jiffies_to_msecs(deadline - now);
 
 	tf.ctl |= ATA_SRST;
-	ata_tf_to_fis(&tf, fis, 0);
-	fis[1] &= ~(1 << 7);	/* turn off Command FIS bit */
-
-	writel(1, port_mmio + PORT_CMD_ISSUE);
-
-	tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
-	if (tmp & 0x1) {
+	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
 		rc = -EIO;
 		reason = "1st FIS failed";
 		goto fail;
@@ -955,24 +1297,11 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class,
 	msleep(1);
 
 	/* issue the second D2H Register FIS */
-	ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
-
 	tf.ctl &= ~ATA_SRST;
-	ata_tf_to_fis(&tf, fis, 0);
-	fis[1] &= ~(1 << 7);	/* turn off Command FIS bit */
-
-	writel(1, port_mmio + PORT_CMD_ISSUE);
-	readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
+	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
 
-	/* spec mandates ">= 2ms" before checking status.
-	 * We wait 150ms, because that was the magic delay used for
-	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
-	 * between when the ATA command register is written, and then
-	 * status is checked.  Because waiting for "a while" before
-	 * checking status is fine, post SRST, we perform this magic
-	 * delay here as well.
-	 */
-	msleep(150);
+	/* wait a while before checking status */
+	ata_wait_after_reset(ap, deadline);
 
 	rc = ata_wait_ready(ap, deadline);
 	/* link occupied, -ENODEV too is an error */
@@ -985,16 +1314,26 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class,
 	DPRINTK("EXIT, class=%u\n", *class);
 	return 0;
 
- fail_restart:
-	ahci_start_engine(ap);
  fail:
-	ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
+	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
 	return rc;
 }
 
-static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+			  unsigned long deadline)
+{
+	int pmp = 0;
+
+	if (link->ap->flags & ATA_FLAG_PMP)
+		pmp = SATA_PMP_CTRL_PORT;
+
+	return ahci_do_softreset(link, class, pmp, deadline);
+}
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
 			  unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct ahci_port_priv *pp = ap->private_data;
 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
 	struct ata_taskfile tf;
@@ -1005,37 +1344,40 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
 	ahci_stop_engine(ap);
 
 	/* clear D2H reception area to properly wait for D2H FIS */
-	ata_tf_init(ap->device, &tf);
+	ata_tf_init(link->device, &tf);
 	tf.command = 0x80;
-	ata_tf_to_fis(&tf, d2h_fis, 0);
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
 
-	rc = sata_std_hardreset(ap, class, deadline);
+	rc = sata_std_hardreset(link, class, deadline);
 
 	ahci_start_engine(ap);
 
-	if (rc == 0 && ata_port_online(ap))
+	if (rc == 0 && ata_link_online(link))
 		*class = ahci_dev_classify(ap);
-	if (*class == ATA_DEV_UNKNOWN)
+	if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN)
 		*class = ATA_DEV_NONE;
 
 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
 	return rc;
 }
 
-static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
 				 unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
+	u32 serror;
 	int rc;
 
 	DPRINTK("ENTER\n");
 
 	ahci_stop_engine(ap);
 
-	rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context),
+	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
 				 deadline);
 
 	/* vt8251 needs SError cleared for the port to operate */
-	ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
+	ahci_scr_read(ap, SCR_ERROR, &serror);
+	ahci_scr_write(ap, SCR_ERROR, serror);
 
 	ahci_start_engine(ap);
 
@@ -1047,12 +1389,60 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
 	return rc ?: -EAGAIN;
 }
 
-static void ahci_postreset(struct ata_port *ap, unsigned int *class)
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	int rc;
+
+	ahci_stop_engine(ap);
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = 0x80;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+				 deadline);
+
+	ahci_start_engine(ap);
+
+	if (rc || ata_link_offline(link))
+		return rc;
+
+	/* spec mandates ">= 2ms" before checking status */
+	msleep(150);
+
+	/* The pseudo configuration device on SIMG4726 attached to
+	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
+	 * hardreset if no device is attached to the first downstream
+	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
+	 * work around this, wait for !BSY only briefly.  If BSY isn't
+	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
+	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
+	 *
+	 * Wait for two seconds.  Devices attached to downstream port
+	 * which can't process the following IDENTIFY after this will
+	 * have to be reset again.  For most cases, this should
+	 * suffice while making probing snappish enough.
+	 */
+	rc = ata_wait_ready(ap, jiffies + 2 * HZ);
+	if (rc)
+		ahci_kick_engine(ap, 0);
+
+	return 0;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+	struct ata_port *ap = link->ap;
 	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 new_tmp, tmp;
 
-	ata_std_postreset(ap, class);
+	ata_std_postreset(link, class);
 
 	/* Make sure port's ATAPI bit is set appropriately */
 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
@@ -1066,6 +1456,12 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
 	}
 }
 
+static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	return ahci_do_softreset(link, class, link->pmp, deadline);
+}
+
 static u8 ahci_check_status(struct ata_port *ap)
 {
 	void __iomem *mmio = ap->ioaddr.cmd_addr;
@@ -1124,7 +1520,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
 	 */
 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
 
-	ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
 	if (is_atapi) {
 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
@@ -1137,7 +1533,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
 	/*
 	 * Fill in command slot information.
 	 */
-	opts = cmd_fis_len | n_elem << 16;
+	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
 	if (qc->tf.flags & ATA_TFLAG_WRITE)
 		opts |= AHCI_CMD_WRITE;
 	if (is_atapi)
@@ -1148,66 +1544,87 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
 
 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
 {
+	struct ahci_host_priv *hpriv = ap->host->private_data;
 	struct ahci_port_priv *pp = ap->private_data;
-	struct ata_eh_info *ehi = &ap->eh_info;
-	unsigned int err_mask = 0, action = 0;
-	struct ata_queued_cmd *qc;
+	struct ata_eh_info *host_ehi = &ap->link.eh_info;
+	struct ata_link *link = NULL;
+	struct ata_queued_cmd *active_qc;
+	struct ata_eh_info *active_ehi;
 	u32 serror;
 
-	ata_ehi_clear_desc(ehi);
+	/* determine active link */
+	ata_port_for_each_link(link, ap)
+		if (ata_link_active(link))
+			break;
+	if (!link)
+		link = &ap->link;
+
+	active_qc = ata_qc_from_tag(ap, link->active_tag);
+	active_ehi = &link->eh_info;
+
+	/* record irq stat */
+	ata_ehi_clear_desc(host_ehi);
+	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
 
 	/* AHCI needs SError cleared; otherwise, it might lock up */
-	serror = ahci_scr_read(ap, SCR_ERROR);
+	ahci_scr_read(ap, SCR_ERROR, &serror);
 	ahci_scr_write(ap, SCR_ERROR, serror);
-
-	/* analyze @irq_stat */
-	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+	host_ehi->serror |= serror;
 
 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
-	if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
+	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
 		irq_stat &= ~PORT_IRQ_IF_ERR;
 
 	if (irq_stat & PORT_IRQ_TF_ERR) {
-		err_mask |= AC_ERR_DEV;
-		if (ap->flags & AHCI_FLAG_IGN_SERR_INTERNAL)
-			serror &= ~SERR_INTERNAL;
+		/* If qc is active, charge it; otherwise, the active
+		 * link.  There's no active qc on NCQ errors.  It will
+		 * be determined by EH by reading log page 10h.
+		 */
+		if (active_qc)
+			active_qc->err_mask |= AC_ERR_DEV;
+		else
+			active_ehi->err_mask |= AC_ERR_DEV;
+
+		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+			host_ehi->serror &= ~SERR_INTERNAL;
+	}
+
+	if (irq_stat & PORT_IRQ_UNK_FIS) {
+		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+		active_ehi->err_mask |= AC_ERR_HSM;
+		active_ehi->action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(active_ehi,
+				  "unknown FIS %08x %08x %08x %08x" ,
+				  unk[0], unk[1], unk[2], unk[3]);
+	}
+
+	if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) {
+		active_ehi->err_mask |= AC_ERR_HSM;
+		active_ehi->action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(active_ehi, "incorrect PMP");
 	}
 
 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
-		err_mask |= AC_ERR_HOST_BUS;
-		action |= ATA_EH_SOFTRESET;
+		host_ehi->err_mask |= AC_ERR_HOST_BUS;
+		host_ehi->action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(host_ehi, "host bus error");
 	}
 
 	if (irq_stat & PORT_IRQ_IF_ERR) {
-		err_mask |= AC_ERR_ATA_BUS;
-		action |= ATA_EH_SOFTRESET;
-		ata_ehi_push_desc(ehi, ", interface fatal error");
+		host_ehi->err_mask |= AC_ERR_ATA_BUS;
+		host_ehi->action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(host_ehi, "interface fatal error");
 	}
 
 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
-		ata_ehi_hotplugged(ehi);
-		ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
+		ata_ehi_hotplugged(host_ehi);
+		ata_ehi_push_desc(host_ehi, "%s",
+			irq_stat & PORT_IRQ_CONNECT ?
 			"connection status changed" : "PHY RDY changed");
 	}
 
-	if (irq_stat & PORT_IRQ_UNK_FIS) {
-		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
-
-		err_mask |= AC_ERR_HSM;
-		action |= ATA_EH_SOFTRESET;
-		ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
-				  unk[0], unk[1], unk[2], unk[3]);
-	}
-
 	/* okay, let's hand over to EH */
-	ehi->serror |= serror;
-	ehi->action |= action;
-
-	qc = ata_qc_from_tag(ap, ap->active_tag);
-	if (qc)
-		qc->err_mask |= err_mask;
-	else
-		ehi->err_mask |= err_mask;
 
 	if (irq_stat & PORT_IRQ_FREEZE)
 		ata_port_freeze(ap);
@@ -1215,96 +1632,78 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
 		ata_port_abort(ap);
 }
 
-static void ahci_host_intr(struct ata_port *ap)
+static void ahci_port_intr(struct ata_port *ap)
 {
 	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
-	struct ata_eh_info *ehi = &ap->eh_info;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
 	struct ahci_port_priv *pp = ap->private_data;
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
 	u32 status, qc_active;
-	int rc, known_irq = 0;
+	int rc;
 
 	status = readl(port_mmio + PORT_IRQ_STAT);
 	writel(status, port_mmio + PORT_IRQ_STAT);
 
+	/* ignore BAD_PMP while resetting */
+	if (unlikely(resetting))
+		status &= ~PORT_IRQ_BAD_PMP;
+
+	/* If we are getting PhyRdy, this is
+ 	 * just a power state change, we should
+ 	 * clear out this, plus the PhyRdy/Comm
+ 	 * Wake bits from Serror
+ 	 */
+	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+		(status & PORT_IRQ_PHYRDY)) {
+		status &= ~PORT_IRQ_PHYRDY;
+		ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+	}
+
 	if (unlikely(status & PORT_IRQ_ERROR)) {
 		ahci_error_intr(ap, status);
 		return;
 	}
 
-	if (ap->sactive)
+	if (status & PORT_IRQ_SDB_FIS) {
+		/* If SNotification is available, leave notification
+		 * handling to sata_async_notification().  If not,
+		 * emulate it by snooping SDB FIS RX area.
+		 *
+		 * Snooping FIS RX area is probably cheaper than
+		 * poking SNotification but some constrollers which
+		 * implement SNotification, ICH9 for example, don't
+		 * store AN SDB FIS into receive area.
+		 */
+		if (hpriv->cap & HOST_CAP_SNTF)
+			sata_async_notification(ap);
+		else {
+			/* If the 'N' bit in word 0 of the FIS is set,
+			 * we just received asynchronous notification.
+			 * Tell libata about it.
+			 */
+			const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+			u32 f0 = le32_to_cpu(f[0]);
+
+			if (f0 & (1 << 15))
+				sata_async_notification(ap);
+		}
+	}
+
+	/* pp->active_link is valid iff any command is in flight */
+	if (ap->qc_active && pp->active_link->sactive)
 		qc_active = readl(port_mmio + PORT_SCR_ACT);
 	else
 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
 
 	rc = ata_qc_complete_multiple(ap, qc_active, NULL);
-	if (rc > 0)
-		return;
-	if (rc < 0) {
+
+	/* while resetting, invalid completions are expected */
+	if (unlikely(rc < 0 && !resetting)) {
 		ehi->err_mask |= AC_ERR_HSM;
 		ehi->action |= ATA_EH_SOFTRESET;
 		ata_port_freeze(ap);
-		return;
 	}
-
-	/* hmmm... a spurious interupt */
-
-	/* if !NCQ, ignore.  No modern ATA device has broken HSM
-	 * implementation for non-NCQ commands.
-	 */
-	if (!ap->sactive)
-		return;
-
-	if (status & PORT_IRQ_D2H_REG_FIS) {
-		if (!pp->ncq_saw_d2h)
-			ata_port_printk(ap, KERN_INFO,
-				"D2H reg with I during NCQ, "
-				"this message won't be printed again\n");
-		pp->ncq_saw_d2h = 1;
-		known_irq = 1;
-	}
-
-	if (status & PORT_IRQ_DMAS_FIS) {
-		if (!pp->ncq_saw_dmas)
-			ata_port_printk(ap, KERN_INFO,
-				"DMAS FIS during NCQ, "
-				"this message won't be printed again\n");
-		pp->ncq_saw_dmas = 1;
-		known_irq = 1;
-	}
-
-	if (status & PORT_IRQ_SDB_FIS) {
-		const __le32 *f = pp->rx_fis + RX_FIS_SDB;
-
-		if (le32_to_cpu(f[1])) {
-			/* SDB FIS containing spurious completions
-			 * might be dangerous, whine and fail commands
-			 * with HSM violation.  EH will turn off NCQ
-			 * after several such failures.
-			 */
-			ata_ehi_push_desc(ehi,
-				"spurious completions during NCQ "
-				"issue=0x%x SAct=0x%x FIS=%08x:%08x",
-				readl(port_mmio + PORT_CMD_ISSUE),
-				readl(port_mmio + PORT_SCR_ACT),
-				le32_to_cpu(f[0]), le32_to_cpu(f[1]));
-			ehi->err_mask |= AC_ERR_HSM;
-			ehi->action |= ATA_EH_SOFTRESET;
-			ata_port_freeze(ap);
-		} else {
-			if (!pp->ncq_saw_sdb)
-				ata_port_printk(ap, KERN_INFO,
-					"spurious SDB FIS %08x:%08x during NCQ, "
-					"this message won't be printed again\n",
-					le32_to_cpu(f[0]), le32_to_cpu(f[1]));
-			pp->ncq_saw_sdb = 1;
-		}
-		known_irq = 1;
-	}
-
-	if (!known_irq)
-		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
-				"(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
-				status, ap->active_tag, ap->sactive);
 }
 
 static void ahci_irq_clear(struct ata_port *ap)
@@ -1331,9 +1730,9 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *p
 	if (!irq_stat)
 		return IRQ_NONE;
 
-        spin_lock(&host->lock);
+	spin_lock(&host->lock);
 
-        for (i = 0; i < host->n_ports; i++) {
+	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap;
 
 		if (!(irq_stat & (1 << i)))
@@ -1341,7 +1740,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *p
 
 		ap = host->ports[i];
 		if (ap) {
-			ahci_host_intr(ap);
+			ahci_port_intr(ap);
 			VPRINTK("port %u\n", i);
 		} else {
 			VPRINTK("port %u (no irq)\n", i);
@@ -1369,6 +1768,13 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+
+	/* Keep track of the currently active link.  It will be used
+	 * in completion path to determine whether NCQ phase is in
+	 * progress.
+	 */
+	pp->active_link = qc->dev->link;
 
 	if (qc->tf.protocol == ATA_PROT_NCQ)
 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
@@ -1391,6 +1797,7 @@ static void ahci_thaw(struct ata_port *ap)
 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
 	void __iomem *port_mmio = ahci_port_base(ap);
 	u32 tmp;
+	struct ahci_port_priv *pp = ap->private_data;
 
 	/* clear IRQ */
 	tmp = readl(port_mmio + PORT_IRQ_STAT);
@@ -1398,7 +1805,7 @@ static void ahci_thaw(struct ata_port *ap)
 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
 
 	/* turn IRQ back on */
-	writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
 }
 
 static void ahci_error_handler(struct ata_port *ap)
@@ -1410,8 +1817,10 @@ static void ahci_error_handler(struct ata_port *ap)
 	}
 
 	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
-		  ahci_postreset);
+	sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset,
+		       ahci_hardreset, ahci_postreset,
+		       sata_pmp_std_prereset, ahci_pmp_softreset,
+		       sata_pmp_std_hardreset, sata_pmp_std_postreset);
 }
 
 static void ahci_vt8251_error_handler(struct ata_port *ap)
@@ -1427,15 +1836,67 @@ static void ahci_vt8251_error_handler(struct ata_port *ap)
 		  ahci_postreset);
 }
 
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+static void ahci_p5wdh_error_handler(struct ata_port *ap)
 {
-	struct ata_port *ap = qc->ap;
-
-	if (qc->flags & ATA_QCFLAG_FAILED) {
-		/* make DMA engine forget about the failed command */
+	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+		/* restart engine */
 		ahci_stop_engine(ap);
 		ahci_start_engine(ap);
 	}
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset,
+		  ahci_postreset);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	/* make DMA engine forget about the failed command */
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		ahci_kick_engine(ap, 1);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 cmd;
+
+	cmd = readl(port_mmio + PORT_CMD);
+	cmd |= PORT_CMD_PMP;
+	writel(cmd, port_mmio + PORT_CMD);
+
+	pp->intr_mask |= PORT_IRQ_BAD_PMP;
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+	void __iomem *port_mmio = ahci_port_base(ap);
+	struct ahci_port_priv *pp = ap->private_data;
+	u32 cmd;
+
+	cmd = readl(port_mmio + PORT_CMD);
+	cmd &= ~PORT_CMD_PMP;
+	writel(cmd, port_mmio + PORT_CMD);
+
+	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+	ahci_power_up(ap);
+	ahci_start_port(ap);
+
+	if (ap->nr_pmp_links)
+		ahci_pmp_attach(ap);
+	else
+		ahci_pmp_detach(ap);
+
+	return 0;
 }
 
 #ifdef CONFIG_PM
@@ -1449,20 +1910,12 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
 		ahci_power_down(ap);
 	else {
 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
-		ahci_init_port(ap);
+		ahci_start_port(ap);
 	}
 
 	return rc;
 }
 
-static int ahci_port_resume(struct ata_port *ap)
-{
-	ahci_power_up(ap);
-	ahci_init_port(ap);
-
-	return 0;
-}
-
 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
@@ -1554,15 +2007,16 @@ static int ahci_port_start(struct ata_port *ap)
 	pp->cmd_tbl = mem;
 	pp->cmd_tbl_dma = mem_dma;
 
-	ap->private_data = pp;
-
-	/* power up port */
-	ahci_power_up(ap);
+	/*
+	 * Save off initial list of interrupts to be enabled.
+	 * This could be changed later
+	 */
+	pp->intr_mask = DEF_PORT_IRQ;
 
-	/* initialize port */
-	ahci_init_port(ap);
+	ap->private_data = pp;
 
-	return 0;
+	/* engage engines, captain */
+	return ahci_port_resume(ap);
 }
 
 static void ahci_port_stop(struct ata_port *ap)
@@ -1643,12 +2097,12 @@ static void ahci_print_info(struct ata_host *host)
 	dev_printk(KERN_INFO, &pdev->dev,
 		"AHCI %02x%02x.%02x%02x "
 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
-	       	,
+		,
 
-	       	(vers >> 24) & 0xff,
-	       	(vers >> 16) & 0xff,
-	       	(vers >> 8) & 0xff,
-	       	vers & 0xff,
+		(vers >> 24) & 0xff,
+		(vers >> 16) & 0xff,
+		(vers >> 8) & 0xff,
+		vers & 0xff,
 
 		((cap >> 8) & 0x1f) + 1,
 		(cap & 0x1f) + 1,
@@ -1658,12 +2112,13 @@ static void ahci_print_info(struct ata_host *host)
 
 	dev_printk(KERN_INFO, &pdev->dev,
 		"flags: "
-	       	"%s%s%s%s%s%s"
-	       	"%s%s%s%s%s%s%s\n"
-	       	,
+		"%s%s%s%s%s%s%s"
+		"%s%s%s%s%s%s%s\n"
+		,
 
 		cap & (1 << 31) ? "64bit " : "",
 		cap & (1 << 30) ? "ncq " : "",
+		cap & (1 << 29) ? "sntf " : "",
 		cap & (1 << 28) ? "ilck " : "",
 		cap & (1 << 27) ? "stag " : "",
 		cap & (1 << 26) ? "pm " : "",
@@ -1679,6 +2134,51 @@ static void ahci_print_info(struct ata_host *host)
 		);
 }
 
+/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
+ * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
+ * support PMP and the 4726 either directly exports the device
+ * attached to the first downstream port or acts as a hardware storage
+ * controller and emulate a single ATA device (can be RAID 0/1 or some
+ * other configuration).
+ *
+ * When there's no device attached to the first downstream port of the
+ * 4726, "Config Disk" appears, which is a pseudo ATA device to
+ * configure the 4726.  However, ATA emulation of the device is very
+ * lame.  It doesn't send signature D2H Reg FIS after the initial
+ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
+ *
+ * The following function works around the problem by always using
+ * hardreset on the port and not depending on receiving signature FIS
+ * afterward.  If signature FIS isn't received soon, ATA class is
+ * assumed without follow-up softreset.
+ */
+static void ahci_p5wdh_workaround(struct ata_host *host)
+{
+	static struct dmi_system_id sysids[] = {
+		{
+			.ident = "P5W DH Deluxe",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR,
+					  "ASUSTEK COMPUTER INC"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+			},
+		},
+		{ }
+	};
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+
+	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
+	    dmi_check_system(sysids)) {
+		struct ata_port *ap = host->ports[1];
+
+		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
+			   "Deluxe on-board SIMG4726 workaround\n");
+
+		ap->ops = &ahci_p5wdh_ops;
+		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
+	}
+}
+
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
@@ -1707,20 +2207,40 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (rc)
 		return rc;
 
-	if (pci_enable_msi(pdev))
-		pci_intx(pdev, 1);
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
+		u8 map;
+
+		/* ICH6s share the same PCI ID for both piix and ahci
+		 * modes.  Enabling ahci mode while MAP indicates
+		 * combined mode is a bad idea.  Yield to ata_piix.
+		 */
+		pci_read_config_byte(pdev, ICH_MAP, &map);
+		if (map & 0x3) {
+			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
+				   "combined mode, can't enable AHCI mode\n");
+			return -ENODEV;
+		}
+	}
 
 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
 	if (!hpriv)
 		return -ENOMEM;
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
+		pci_intx(pdev, 1);
 
 	/* save initial config */
-	ahci_save_initial_config(pdev, &pi, hpriv);
+	ahci_save_initial_config(pdev, hpriv);
 
 	/* prepare host */
-	if (!(pi.flags & AHCI_FLAG_NO_NCQ) && (hpriv->cap & HOST_CAP_NCQ))
+	if (hpriv->cap & HOST_CAP_NCQ)
 		pi.flags |= ATA_FLAG_NCQ;
 
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
 	if (!host)
 		return -ENOMEM;
@@ -1728,16 +2248,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	host->private_data = hpriv;
 
 	for (i = 0; i < host->n_ports; i++) {
-		if (hpriv->port_map & (1 << i)) {
-			struct ata_port *ap = host->ports[i];
-			void __iomem *port_mmio = ahci_port_base(ap);
+		struct ata_port *ap = host->ports[i];
+		void __iomem *port_mmio = ahci_port_base(ap);
+
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+				   0x100 + ap->port_no * 0x80, "port");
 
+		/* set initial link pm policy */
+		ap->pm_policy = NOT_AVAILABLE;
+
+		/* standard SATA port setup */
+		if (hpriv->port_map & (1 << i))
 			ap->ioaddr.cmd_addr = port_mmio;
-			ap->ioaddr.scr_addr = port_mmio + PORT_SCR;
-		} else
-			host->ports[i]->ops = &ata_dummy_port_ops;
+
+		/* disabled/not-implemented port */
+		else
+			ap->ops = &ata_dummy_port_ops;
 	}
 
+	/* apply workaround for ASUS P5W DH Deluxe mainboard */
+	ahci_p5wdh_workaround(host);
+
 	/* initialize adapter */
 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
 	if (rc)
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 7565f02..9032998 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -26,7 +26,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "ata_generic"
-#define DRV_VERSION "0.2.12"
+#define DRV_VERSION "0.2.13"
 
 /*
  *	A generic parallel ATA driver using libata
@@ -34,7 +34,7 @@
 
 /**
  *	generic_set_mode	-	mode setting
- *	@ap: interface to set up
+ *	@link: link to set up
  *	@unused: returned device on error
  *
  *	Use a non standard set_mode function. We don't want to be tuned.
@@ -43,24 +43,24 @@
  *	and respect them.
  */
 
-static int generic_set_mode(struct ata_port *ap, struct ata_device **unused)
+static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
 {
+	struct ata_port *ap = link->ap;
 	int dma_enabled = 0;
-	int i;
+	struct ata_device *dev;
 
 	/* Bits 5 and 6 indicate if DMA is active on master/slave */
 	if (ap->ioaddr.bmdma_addr)
-		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			/* We don't really care */
 			dev->pio_mode = XFER_PIO_0;
 			dev->dma_mode = XFER_MW_DMA_0;
 			/* We do need the right mode information for DMA or PIO
 			   and this comes from the current configuration flags */
-			if (dma_enabled & (1 << (5 + i))) {
+			if (dma_enabled & (1 << (5 + dev->devno))) {
 				ata_id_to_dma_mode(dev, XFER_MW_DMA_0);
 				dev->flags &= ~ATA_DFLAG_PIO;
 			} else {
@@ -95,7 +95,6 @@ static struct scsi_host_template generic_sht = {
 static struct ata_port_operations generic_port_ops = {
 	.set_mode	= generic_set_mode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -121,9 +120,8 @@ static struct ata_port_operations generic_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int all_generic_ide;		/* Set to claim all devices */
@@ -143,10 +141,10 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
 	u16 command;
 	static const struct ata_port_info info = {
 		.sht = &generic_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &generic_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 53e025b..f7a243a 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -91,9 +91,10 @@
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
+#include <linux/dmi.h>
 
 #define DRV_NAME	"ata_piix"
-#define DRV_VERSION	"2.11"
+#define DRV_VERSION	"2.12"
 
 enum {
 	PIIX_IOCFG		= 0x54, /* IDE I/O configuration register */
@@ -118,18 +119,19 @@ enum {
 	PIIX_80C_SEC		= (1 << 7) | (1 << 6),
 
 	/* controller IDs */
-	piix_pata_33		= 0,	/* PIIX4 at 33Mhz */
-	ich_pata_33		= 1,	/* ICH up to UDMA 33 only */
-	ich_pata_66		= 2,	/* ICH up to 66 Mhz */
-	ich_pata_100		= 3,	/* ICH up to UDMA 100 */
-	ich_pata_133		= 4,	/* ICH up to UDMA 133 */
-	ich5_sata		= 5,
-	ich6_sata		= 6,
-	ich6_sata_ahci		= 7,
-	ich6m_sata_ahci		= 8,
-	ich8_sata_ahci		= 9,
-	piix_pata_mwdma		= 10,	/* PIIX3 MWDMA only */
-	tolapai_sata_ahci	= 11,
+	piix_pata_mwdma		= 0,	/* PIIX3 MWDMA only */
+	piix_pata_33,			/* PIIX4 at 33Mhz */
+	ich_pata_33,			/* ICH up to UDMA 33 only */
+	ich_pata_66,			/* ICH up to 66 Mhz */
+	ich_pata_100,			/* ICH up to UDMA 100 */
+	ich5_sata,
+	ich6_sata,
+	ich6_sata_ahci,
+	ich6m_sata_ahci,
+	ich8_sata_ahci,
+	ich8_2port_sata,
+	ich8m_apple_sata_ahci,		/* locks up on second port enable */
+	tolapai_sata_ahci,
 
 	/* constants for mapping table */
 	P0			= 0,  /* port 0 */
@@ -141,6 +143,9 @@ enum {
 	RV			= -3, /* reserved */
 
 	PIIX_AHCI_DEVICE	= 6,
+
+	/* host->flags bits */
+	PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
 };
 
 struct piix_map_db {
@@ -153,13 +158,17 @@ struct piix_host_priv {
 	const int *map;
 };
 
-static int piix_init_one (struct pci_dev *pdev,
-				    const struct pci_device_id *ent);
+static int piix_init_one(struct pci_dev *pdev,
+			 const struct pci_device_id *ent);
 static void piix_pata_error_handler(struct ata_port *ap);
-static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
-static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
-static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
 static int ich_pata_cable_detect(struct ata_port *ap);
+#ifdef CONFIG_PM
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int piix_pci_device_resume(struct pci_dev *pdev);
+#endif
 
 static unsigned int in_module_init = 1;
 
@@ -191,7 +200,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
 	{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	/* Intel ICH5 */
-	{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
+	{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	/* C-ICH (i810E2) */
 	{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	/* ESB (855GME/875P + 6300ESB) UDMA 100  */
@@ -199,8 +208,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
 	/* ICH6 (and 6) (i915) UDMA 100 */
 	{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	/* ICH7/7-R (i945, i975) UDMA 100*/
-	{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
+	{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 	{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+	/* ICH8 Mobile PATA Controller */
+	{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
 
 	/* NOTE: The following PCI ids must be kept in sync with the
 	 * list in drivers/pci/quirks.c.
@@ -229,19 +240,21 @@ static const struct pci_device_id piix_pci_tbl[] = {
 	/* SATA Controller 1 IDE (ICH8) */
 	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
 	/* SATA Controller 2 IDE (ICH8) */
-	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* Mobile SATA Controller IDE (ICH8M) */
 	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	/* Mobile SATA Controller IDE (ICH8M), Apple */
+	{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata_ahci },
 	/* SATA Controller IDE (ICH9) */
 	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
 	/* SATA Controller IDE (ICH9) */
-	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9) */
-	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9M) */
-	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9M) */
-	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
+	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
 	/* SATA Controller IDE (ICH9M) */
 	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
 	/* SATA Controller IDE (Tolapai) */
@@ -256,8 +269,8 @@ static struct pci_driver piix_pci_driver = {
 	.probe			= piix_init_one,
 	.remove			= ata_pci_remove_one,
 #ifdef CONFIG_PM
-	.suspend		= ata_pci_device_suspend,
-	.resume			= ata_pci_device_resume,
+	.suspend		= piix_pci_device_suspend,
+	.resume			= piix_pci_device_resume,
 #endif
 };
 
@@ -280,7 +293,6 @@ static struct scsi_host_template piix_sht = {
 };
 
 static const struct ata_port_operations piix_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= piix_set_piomode,
 	.set_dmamode		= piix_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -308,13 +320,11 @@ static const struct ata_port_operations piix_pata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations ich_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= piix_set_piomode,
 	.set_dmamode		= ich_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -342,14 +352,11 @@ static const struct ata_port_operations ich_pata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations piix_sata_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -372,7 +379,6 @@ static const struct ata_port_operations piix_sata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
 };
@@ -415,7 +421,7 @@ static const struct piix_map_db ich6m_map_db = {
 	 */
 	.map = {
 		/* PM   PS   SM   SS       MAP */
-		{  P0,  P2,  RV,  RV }, /* 00b */
+		{  P0,  P2,  NA,  NA }, /* 00b */
 		{ IDE, IDE,  P1,  P3 }, /* 01b */
 		{  P0,  P2, IDE, IDE }, /* 10b */
 		{  RV,  RV,  RV,  RV },
@@ -424,12 +430,36 @@ static const struct piix_map_db ich6m_map_db = {
 
 static const struct piix_map_db ich8_map_db = {
 	.mask = 0x3,
-	.port_enable = 0x3,
+	.port_enable = 0xf,
 	.map = {
 		/* PM   PS   SM   SS       MAP */
 		{  P0,  P2,  P1,  P3 }, /* 00b (hardwired when in AHCI) */
 		{  RV,  RV,  RV,  RV },
-		{  IDE,  IDE,  NA,  NA }, /* 10b (IDE mode) */
+		{  P0,  P2, IDE, IDE }, /* 10b (IDE mode) */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich8_2port_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x3,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  NA,  P1,  NA }, /* 00b */
+		{  RV,  RV,  RV,  RV }, /* 01b */
+		{  RV,  RV,  RV,  RV }, /* 10b */
+		{  RV,  RV,  RV,  RV },
+	},
+};
+
+static const struct piix_map_db ich8m_apple_map_db = {
+	.mask = 0x3,
+	.port_enable = 0x1,
+	.map = {
+		/* PM   PS   SM   SS       MAP */
+		{  P0,  NA,  NA,  NA }, /* 00b */
+		{  RV,  RV,  RV,  RV },
+		{  P0,  P2, IDE, IDE }, /* 10b */
 		{  RV,  RV,  RV,  RV },
 	},
 };
@@ -452,11 +482,22 @@ static const struct piix_map_db *piix_map_db_table[] = {
 	[ich6_sata_ahci]	= &ich6_map_db,
 	[ich6m_sata_ahci]	= &ich6m_map_db,
 	[ich8_sata_ahci]	= &ich8_map_db,
+	[ich8_2port_sata]	= &ich8_2port_map_db,
+	[ich8m_apple_sata_ahci]	= &ich8m_apple_map_db,
 	[tolapai_sata_ahci]	= &tolapai_map_db,
 };
 
 static struct ata_port_info piix_port_info[] = {
-	/* piix_pata_33: 0:  PIIX4 at 33MHz */
+	[piix_pata_mwdma] = 	/* PIIX3 MWDMA only */
+	{
+		.sht		= &piix_sht,
+		.flags		= PIIX_PATA_FLAGS,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+		.port_ops	= &piix_pata_ops,
+	},
+
+	[piix_pata_33] =	/* PIIX4 at 33MHz */
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_PATA_FLAGS,
@@ -466,7 +507,7 @@ static struct ata_port_info piix_port_info[] = {
 		.port_ops	= &piix_pata_ops,
 	},
 
-	/* ich_pata_33: 1 	ICH0 - ICH at 33Mhz*/
+	[ich_pata_33] = 	/* ICH0 - ICH at 33Mhz*/
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_PATA_FLAGS,
@@ -475,7 +516,8 @@ static struct ata_port_info piix_port_info[] = {
 		.udma_mask	= ATA_UDMA2, /* UDMA33 */
 		.port_ops	= &ich_pata_ops,
 	},
-	/* ich_pata_66: 2 	ICH controllers up to 66MHz */
+
+	[ich_pata_66] = 	/* ICH controllers up to 66MHz */
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_PATA_FLAGS,
@@ -485,7 +527,7 @@ static struct ata_port_info piix_port_info[] = {
 		.port_ops	= &ich_pata_ops,
 	},
 
-	/* ich_pata_100: 3 */
+	[ich_pata_100] =
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
@@ -495,79 +537,82 @@ static struct ata_port_info piix_port_info[] = {
 		.port_ops	= &ich_pata_ops,
 	},
 
-	/* ich_pata_133: 4 	ICH with full UDMA6 */
+	[ich5_sata] =
 	{
 		.sht		= &piix_sht,
-		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
-		.pio_mask 	= 0x1f,	/* pio 0-4 */
-		.mwdma_mask	= 0x06, /* Check: maybe 0x07  */
-		.udma_mask	= ATA_UDMA6, /* UDMA133 */
-		.port_ops	= &ich_pata_ops,
+		.flags		= PIIX_SATA_FLAGS,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich5_sata: 5 */
+	[ich6_sata] =
 	{
 		.sht		= &piix_sht,
-		.flags		= PIIX_SATA_FLAGS,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6_sata: 6 */
+	[ich6_sata_ahci] =
 	{
 		.sht		= &piix_sht,
-		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6_sata_ahci: 7 */
+	[ich6m_sata_ahci] =
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich6m_sata_ahci: 8 */
+	[ich8_sata_ahci] =
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* ich8_sata_ahci: 9 */
+	[ich8_2port_sata] =
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
 				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
 
-	/* piix_pata_mwdma: 10:  PIIX3 MWDMA only */
+	[tolapai_sata_ahci] =
 	{
 		.sht		= &piix_sht,
-		.flags		= PIIX_PATA_FLAGS,
+		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
+				  PIIX_FLAG_AHCI,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.mwdma_mask	= 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
-		.port_ops	= &piix_pata_ops,
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &piix_sata_ops,
 	},
 
-	/* tolapai_sata_ahci: 11: */
+	[ich8m_apple_sata_ahci] =
 	{
 		.sht		= &piix_sht,
 		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SCR |
@@ -577,6 +622,7 @@ static struct ata_port_info piix_port_info[] = {
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &piix_sata_ops,
 	},
+
 };
 
 static struct pci_bits piix_enable_bits[] = {
@@ -603,8 +649,10 @@ struct ich_laptop {
 static const struct ich_laptop ich_laptop[] = {
 	/* devid, subvendor, subdev */
 	{ 0x27DF, 0x0005, 0x0280 },	/* ICH7 on Acer 5602WLMi */
+	{ 0x27DF, 0x1025, 0x0102 },	/* ICH7 on Acer 5602aWLMi */
 	{ 0x27DF, 0x1025, 0x0110 },	/* ICH7 on Acer 3682WLMi */
 	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
+	{ 0x27DF, 0x103C, 0x30A1 },	/* ICH7 on HP Compaq nc2400 */
 	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on ACER Aspire 2023WLMi */
 	/* end marker */
 	{ 0, }
@@ -631,9 +679,9 @@ static int ich_pata_cable_detect(struct ata_port *ap)
 	while (lap->device) {
 		if (lap->device == pdev->device &&
 		    lap->subvendor == pdev->subsystem_vendor &&
-		    lap->subdevice == pdev->subsystem_device) {
+		    lap->subdevice == pdev->subsystem_device)
 			return ATA_CBL_PATA40_SHORT;
-		}
+
 		lap++;
 	}
 
@@ -647,19 +695,20 @@ static int ich_pata_cable_detect(struct ata_port *ap)
 
 /**
  *	piix_pata_prereset - prereset for PATA host controller
- *	@ap: Target port
+ *	@link: Target link
  *	@deadline: deadline jiffies for the operation
  *
  *	LOCKING:
  *	None (inherited from caller).
  */
-static int piix_pata_prereset(struct ata_port *ap, unsigned long deadline)
+static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
 		return -ENOENT;
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 static void piix_pata_error_handler(struct ata_port *ap)
@@ -679,7 +728,7 @@ static void piix_pata_error_handler(struct ata_port *ap)
  *	None (inherited from caller).
  */
 
-static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
 {
 	unsigned int pio	= adev->pio_mode - XFER_PIO_0;
 	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
@@ -712,8 +761,14 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
 	if (adev->class == ATA_DEV_ATA)
 		control |= 4;	/* PPE enable */
 
+	/* PIO configuration clears DTE unconditionally.  It will be
+	 * programmed in set_dmamode which is guaranteed to be called
+	 * after set_piomode if any DMA mode is available.
+	 */
 	pci_read_config_word(dev, master_port, &master_data);
 	if (is_slave) {
+		/* clear TIME1|IE1|PPE1|DTE1 */
+		master_data &= 0xff0f;
 		/* Enable SITRE (seperate slave timing register) */
 		master_data |= 0x4000;
 		/* enable PPE1, IE1 and TIME1 as needed */
@@ -721,12 +776,14 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
 		pci_read_config_byte(dev, slave_port, &slave_data);
 		slave_data &= (ap->port_no ? 0x0f : 0xf0);
 		/* Load the timing nibble for this slave */
-		slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+		slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+						<< (ap->port_no ? 4 : 0);
 	} else {
-		/* Master keeps the bits in a different format */
-		master_data &= 0xccf8;
+		/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+		master_data &= 0xccf0;
 		/* Enable PPE, IE and TIME as appropriate */
 		master_data |= control;
+		/* load ISP and RCT */
 		master_data |=
 			(timings[pio][0] << 12) |
 			(timings[pio][1] << 8);
@@ -758,7 +815,7 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
  *	None (inherited from caller).
  */
 
-static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich)
+static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
 {
 	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
 	u8 master_port		= ap->port_no ? 0x42 : 0x40;
@@ -785,7 +842,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
 		int u_clock, u_speed;
 
 		/*
-	 	 * UDMA is handled by a combination of clock switching and
+		 * UDMA is handled by a combination of clock switching and
 		 * selection of dividers
 		 *
 		 * Handy rule: Odd modes are UDMATIMx 01, even are 02
@@ -843,7 +900,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
 			master_data &= 0xFF4F;  /* Mask out IORDY|TIME1|DMAONLY */
 			master_data |= control << 4;
 			pci_read_config_byte(dev, 0x44, &slave_data);
-			slave_data &= (0x0F + 0xE1 * ap->port_no);
+			slave_data &= (ap->port_no ? 0x0f : 0xf0);
 			/* Load the matching timing */
 			slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
 			pci_write_config_byte(dev, 0x44, slave_data);
@@ -855,8 +912,11 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
 				(timings[pio][0] << 12) |
 				(timings[pio][1] << 8);
 		}
-		udma_enable &= ~(1 << devid);
-		pci_write_config_word(dev, master_port, master_data);
+
+		if (ap->udma_mask) {
+			udma_enable &= ~(1 << devid);
+			pci_write_config_word(dev, master_port, master_data);
+		}
 	}
 	/* Don't scribble on 0x48 if the controller does not support UDMA */
 	if (ap->udma_mask)
@@ -874,7 +934,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
  *	None (inherited from caller).
  */
 
-static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 {
 	do_pata_set_dmamode(ap, adev, 0);
 }
@@ -890,11 +950,191 @@ static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
  *	None (inherited from caller).
  */
 
-static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 {
 	do_pata_set_dmamode(ap, adev, 1);
 }
 
+#ifdef CONFIG_PM
+static int piix_broken_suspend(void)
+{
+	static struct dmi_system_id sysids[] = {
+		{
+			.ident = "TECRA M3",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
+			},
+		},
+		{
+			.ident = "TECRA M3",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
+			},
+		},
+		{
+			.ident = "TECRA M4",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
+			},
+		},
+		{
+			.ident = "TECRA M5",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
+			},
+		},
+		{
+			.ident = "TECRA M7",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
+			},
+		},
+		{
+			.ident = "TECRA A8",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
+			},
+		},
+		{
+			.ident = "Satellite R25",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
+			},
+		},
+		{
+			.ident = "Satellite U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+			},
+		},
+		{
+			.ident = "Satellite U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
+			},
+		},
+		{
+			.ident = "Satellite Pro U200",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
+			},
+		},
+		{
+			.ident = "Satellite U205",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
+			},
+		},
+		{
+			.ident = "SATELLITE U205",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
+			},
+		},
+		{
+			.ident = "Portege M500",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
+			},
+		},
+
+		{ }	/* terminate list */
+	};
+	static const char *oemstrs[] = {
+		"Tecra M3,",
+	};
+	int i;
+
+	if (dmi_check_system(sysids))
+		return 1;
+
+	for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
+		if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
+			return 1;
+
+	return 0;
+}
+
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	unsigned long flags;
+	int rc = 0;
+
+	rc = ata_host_suspend(host, mesg);
+	if (rc)
+		return rc;
+
+	/* Some braindamaged ACPI suspend implementations expect the
+	 * controller to be awake on entry; otherwise, it burns cpu
+	 * cycles and power trying to do something to the sleeping
+	 * beauty.
+	 */
+	if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) {
+		pci_save_state(pdev);
+
+		/* mark its power state as "unknown", since we don't
+		 * know if e.g. the BIOS will change its device state
+		 * when we suspend.
+		 */
+		if (pdev->current_state == PCI_D0)
+			pdev->current_state = PCI_UNKNOWN;
+
+		/* tell resume that it's waking up from broken suspend */
+		spin_lock_irqsave(&host->lock, flags);
+		host->flags |= PIIX_HOST_BROKEN_SUSPEND;
+		spin_unlock_irqrestore(&host->lock, flags);
+	} else
+		ata_pci_device_do_suspend(pdev, mesg);
+
+	return 0;
+}
+
+static int piix_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	unsigned long flags;
+	int rc;
+
+	if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
+		spin_lock_irqsave(&host->lock, flags);
+		host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		pci_set_power_state(pdev, PCI_D0);
+		pci_restore_state(pdev);
+
+		/* PCI device wasn't disabled during suspend.  Use
+		 * pci_reenable_device() to avoid affecting the enable
+		 * count.
+		 */
+		rc = pci_reenable_device(pdev);
+		if (rc)
+			dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
+				   "device after resume (%d)\n", rc);
+	} else
+		rc = ata_pci_device_do_resume(pdev);
+
+	if (rc == 0)
+		ata_host_resume(host);
+
+	return rc;
+}
+#endif
+
 #define AHCI_PCI_BAR 5
 #define AHCI_GLOBAL_CTL 0x04
 #define AHCI_ENABLE (1 << 31)
@@ -916,12 +1156,12 @@ static int piix_disable_ahci(struct pci_dev *pdev)
 	if (!mmio)
 		return -ENOMEM;
 
-	tmp = readl(mmio + AHCI_GLOBAL_CTL);
+	tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
 	if (tmp & AHCI_ENABLE) {
 		tmp &= ~AHCI_ENABLE;
-		writel(tmp, mmio + AHCI_GLOBAL_CTL);
+		iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
 
-		tmp = readl(mmio + AHCI_GLOBAL_CTL);
+		tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
 		if (tmp & AHCI_ENABLE)
 			rc = -EIO;
 	}
@@ -942,20 +1182,19 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
 {
 	struct pci_dev *pdev = NULL;
 	u16 cfg;
-	u8 rev;
 	int no_piix_dma = 0;
+	u8 pdev_revision;
 
-	while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
-	{
+	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
 		/* Look for 450NX PXB. Check for problem configurations
 		   A PCI quirk checks bit 6 already */
-		pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
 		pci_read_config_word(pdev, 0x41, &cfg);
+		pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
 		/* Only on the original revision: IDE DMA can hang */
-		if (rev == 0x00)
+		if (pdev_revision == 0x00)
 			no_piix_dma = 1;
 		/* On all revisions below 5 PXB bus lock must be disabled for IDE */
-		else if (cfg & (1<<14) && rev < 5)
+		else if (cfg & (1<<14) && pdev_revision < 5)
 			no_piix_dma = 2;
 	}
 	if (no_piix_dma)
@@ -987,7 +1226,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
 					 const struct piix_map_db *map_db)
 {
 	struct piix_host_priv *hpriv = pinfo[0].private_data;
-	const unsigned int *map;
+	const int *map;
 	int i, invalid_map = 0;
 	u8 map_value;
 
@@ -1031,6 +1270,41 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
 	hpriv->map = map;
 }
 
+static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+{
+	static struct dmi_system_id sysids[] = {
+		{
+			/* Clevo M570U sets IOCFG bit 18 if the cdrom
+			 * isn't used to boot the system which
+			 * disables the channel.
+			 */
+			.ident = "M570U",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
+				DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
+			},
+		},
+
+		{ }	/* terminate list */
+	};
+	u32 iocfg;
+
+	if (!dmi_check_system(sysids))
+		return;
+
+	/* The datasheet says that bit 18 is NOOP but certain systems
+	 * seem to use it to disable a channel.  Clear the bit on the
+	 * affected systems.
+	 */
+	pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg);
+	if (iocfg & (1 << 18)) {
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "applying IOCFG bit18 quirk\n");
+		iocfg &= ~(1 << 18);
+		pci_write_config_dword(pdev, PIIX_IOCFG, iocfg);
+	}
+}
+
 /**
  *	piix_init_one - Register PIIX ATA PCI device with kernel services
  *	@pdev: PCI device to register
@@ -1046,7 +1320,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
  *	Zero on success, or -ERRNO value.
  */
 
-static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	struct device *dev = &pdev->dev;
@@ -1092,6 +1366,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 			      piix_map_db_table[ent->driver_data]);
 	}
 
+	/* apply IOCFG bit18 quirk */
+	piix_iocfg_bit18_quirk(pdev);
+
 	/* On ICH5, some BIOSen disable the interrupt using the
 	 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
 	 * On ICH6, this bit has the same effect, but only when
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 0223673..7bf4bef 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -6,6 +6,7 @@
  * Copyright (C) 2006 Randy Dunlap
  */
 
+#include <linux/module.h>
 #include <linux/ata.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -14,6 +15,7 @@
 #include <linux/acpi.h>
 #include <linux/libata.h>
 #include <linux/pci.h>
+#include <scsi/scsi_device.h>
 #include "libata.h"
 
 #include <acpi/acpi_bus.h>
@@ -24,15 +26,25 @@
 #include <acpi/acmacros.h>
 #include <acpi/actypes.h>
 
-#define SATA_ROOT_PORT(x)	(((x) >> 16) & 0xffff)
-#define SATA_PORT_NUMBER(x)	((x) & 0xffff)	/* or NO_PORT_MULT */
+enum {
+	ATA_ACPI_FILTER_SETXFER	= 1 << 0,
+	ATA_ACPI_FILTER_LOCK	= 1 << 1,
+
+	ATA_ACPI_FILTER_DEFAULT	= ATA_ACPI_FILTER_SETXFER |
+				  ATA_ACPI_FILTER_LOCK,
+};
+
+static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
+module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
+MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock)");
+
 #define NO_PORT_MULT		0xffff
-#define SATA_ADR_RSVD		0xffffffff
+#define SATA_ADR(root, pmp)	(((root) << 16) | (pmp))
 
 #define REGS_PER_GTF		7
-struct taskfile_array {
-	u8	tfa[REGS_PER_GTF];	/* regs. 0x1f1 - 0x1f7 */
-};
+struct ata_acpi_gtf {
+	u8	tf[REGS_PER_GTF];	/* regs. 0x1f1 - 0x1f7 */
+} __packed;
 
 /*
  *	Helper - belongs in the PCI layer somewhere eventually
@@ -42,237 +54,298 @@ static int is_pci_dev(struct device *dev)
 	return (dev->bus == &pci_bus_type);
 }
 
+static void ata_acpi_clear_gtf(struct ata_device *dev)
+{
+	kfree(dev->gtf_cache);
+	dev->gtf_cache = NULL;
+}
+
 /**
- * sata_get_dev_handle - finds acpi_handle and PCI device.function
- * @dev: device to locate
- * @handle: returned acpi_handle for @dev
- * @pcidevfn: return PCI device.func for @dev
+ * ata_acpi_associate_sata_port - associate SATA port with ACPI objects
+ * @ap: target SATA port
+ *
+ * Look up ACPI objects associated with @ap and initialize acpi_handle
+ * fields of @ap, the port and devices accordingly.
  *
- * This function is somewhat SATA-specific.  Or at least the
- * PATA & SATA versions of this function are different,
- * so it's not entirely generic code.
+ * LOCKING:
+ * EH context.
  *
- * Returns 0 on success, <0 on error.
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-static int sata_get_dev_handle(struct device *dev, acpi_handle *handle,
-					acpi_integer *pcidevfn)
+void ata_acpi_associate_sata_port(struct ata_port *ap)
 {
-	struct pci_dev	*pci_dev;
-	acpi_integer	addr;
-
-	if (!is_pci_dev(dev))
-		return -ENODEV;
-
-	pci_dev = to_pci_dev(dev);	/* NOTE: PCI-specific */
-	/* Please refer to the ACPI spec for the syntax of _ADR. */
-	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
-	*pcidevfn = addr;
-	*handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
-	if (!*handle)
-		return -ENODEV;
-	return 0;
+	WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA));
+
+	if (!ap->nr_pmp_links) {
+		acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
+
+		ap->link.device->acpi_handle =
+			acpi_get_child(ap->host->acpi_handle, adr);
+	} else {
+		struct ata_link *link;
+
+		ap->link.device->acpi_handle = NULL;
+
+		ata_port_for_each_link(link, ap) {
+			acpi_integer adr = SATA_ADR(ap->port_no, link->pmp);
+
+			link->device->acpi_handle =
+				acpi_get_child(ap->host->acpi_handle, adr);
+		}
+	}
+}
+
+static void ata_acpi_associate_ide_port(struct ata_port *ap)
+{
+	int max_devices, i;
+
+	ap->acpi_handle = acpi_get_child(ap->host->acpi_handle, ap->port_no);
+	if (!ap->acpi_handle)
+		return;
+
+	max_devices = 1;
+	if (ap->flags & ATA_FLAG_SLAVE_POSS)
+		max_devices++;
+
+	for (i = 0; i < max_devices; i++) {
+		struct ata_device *dev = &ap->link.device[i];
+
+		dev->acpi_handle = acpi_get_child(ap->acpi_handle, i);
+	}
+
+	if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+		ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
+}
+
+static void ata_acpi_handle_hotplug(struct ata_port *ap, struct kobject *kobj,
+				    u32 event)
+{
+	char event_string[12];
+	char *envp[] = { event_string, NULL };
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+
+	if (event == 0 || event == 1) {
+	       unsigned long flags;
+	       spin_lock_irqsave(ap->lock, flags);
+	       ata_ehi_clear_desc(ehi);
+	       ata_ehi_push_desc(ehi, "ACPI event");
+	       ata_ehi_hotplugged(ehi);
+	       ata_port_freeze(ap);
+	       spin_unlock_irqrestore(ap->lock, flags);
+	}
+
+	if (kobj) {
+		sprintf(event_string, "BAY_EVENT=%d", event);
+		kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
+	}
+}
+
+static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct ata_device *dev = data;
+	struct kobject *kobj = NULL;
+
+	if (dev->sdev)
+		kobj = &dev->sdev->sdev_gendev.kobj;
+
+	ata_acpi_handle_hotplug(dev->link->ap, kobj, event);
+}
+
+static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct ata_port *ap = data;
+
+	ata_acpi_handle_hotplug(ap, &ap->dev->kobj, event);
 }
 
 /**
- * pata_get_dev_handle - finds acpi_handle and PCI device.function
- * @dev: device to locate
- * @handle: returned acpi_handle for @dev
- * @pcidevfn: return PCI device.func for @dev
+ * ata_acpi_associate - associate ATA host with ACPI objects
+ * @host: target ATA host
  *
- * The PATA and SATA versions of this function are different.
+ * Look up ACPI objects associated with @host and initialize
+ * acpi_handle fields of @host, its ports and devices accordingly.
  *
- * Returns 0 on success, <0 on error.
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-static int pata_get_dev_handle(struct device *dev, acpi_handle *handle,
-				acpi_integer *pcidevfn)
+void ata_acpi_associate(struct ata_host *host)
 {
-	unsigned int bus, devnum, func;
-	acpi_integer addr;
-	acpi_handle dev_handle, parent_handle;
-	struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER,
-					.pointer = NULL};
-	acpi_status status;
-	struct acpi_device_info	*dinfo = NULL;
-	int ret = -ENODEV;
-	struct pci_dev *pdev;
+	int i, j;
+
+	if (!is_pci_dev(host->dev) || libata_noacpi)
+		return;
 
-	if (!is_pci_dev(dev))
-		return -ENODEV;
+	host->acpi_handle = DEVICE_ACPI_HANDLE(host->dev);
+	if (!host->acpi_handle)
+		return;
 
-	pdev = to_pci_dev(dev);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
 
-	bus = pdev->bus->number;
-	devnum = PCI_SLOT(pdev->devfn);
-	func = PCI_FUNC(pdev->devfn);
+		if (host->ports[0]->flags & ATA_FLAG_ACPI_SATA)
+			ata_acpi_associate_sata_port(ap);
+		else
+			ata_acpi_associate_ide_port(ap);
 
-	dev_handle = DEVICE_ACPI_HANDLE(dev);
-	parent_handle = DEVICE_ACPI_HANDLE(dev->parent);
+		if (ap->acpi_handle)
+			acpi_install_notify_handler (ap->acpi_handle,
+						     ACPI_SYSTEM_NOTIFY,
+						     ata_acpi_ap_notify,
+						     ap);
 
-	status = acpi_get_object_info(parent_handle, &buffer);
-	if (ACPI_FAILURE(status))
-		goto err;
+		for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
+			struct ata_device *dev = &ap->link.device[j];
 
-	dinfo = buffer.pointer;
-	if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
-	    dinfo->address == bus) {
-		/* ACPI spec for _ADR for PCI bus: */
-		addr = (acpi_integer)(devnum << 16 | func);
-		*pcidevfn = addr;
-		*handle = dev_handle;
-	} else {
-		goto err;
+			if (dev->acpi_handle)
+				acpi_install_notify_handler (dev->acpi_handle,
+							     ACPI_SYSTEM_NOTIFY,
+							     ata_acpi_dev_notify,
+							     dev);
+		}
 	}
-
-	if (!*handle)
-		goto err;
-	ret = 0;
-err:
-	kfree(dinfo);
-	return ret;
 }
 
-struct walk_info {		/* can be trimmed some */
-	struct device	*dev;
-	struct acpi_device *adev;
-	acpi_handle	handle;
-	acpi_integer	pcidevfn;
-	unsigned int	drivenum;
-	acpi_handle	obj_handle;
-	struct ata_port *ataport;
-	struct ata_device *atadev;
-	u32		sata_adr;
-	int		status;
-	char		basepath[ACPI_PATHNAME_MAX];
-	int		basepath_len;
-};
+/**
+ * ata_acpi_dissociate - dissociate ATA host from ACPI objects
+ * @host: target ATA host
+ *
+ * This function is called during driver detach after the whole host
+ * is shut down.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_dissociate(struct ata_host *host)
+{
+	int i;
+
+	/* Restore initial _GTM values so that driver which attaches
+	 * afterward can use them too.
+	 */
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
 
-static acpi_status get_devices(acpi_handle handle,
-				u32 level, void *context, void **return_value)
+		if (ap->acpi_handle && gtm)
+			ata_acpi_stm(ap, gtm);
+	}
+}
+
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
 {
-	acpi_status		status;
-	struct walk_info	*winfo = context;
-	struct acpi_buffer	namebuf = {ACPI_ALLOCATE_BUFFER, NULL};
-	char			*pathname;
-	struct acpi_buffer	buffer;
-	struct acpi_device_info	*dinfo;
-
-	status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &namebuf);
-	if (status)
-		goto ret;
-	pathname = namebuf.pointer;
-
-	buffer.length = ACPI_ALLOCATE_BUFFER;
-	buffer.pointer = NULL;
-	status = acpi_get_object_info(handle, &buffer);
-	if (ACPI_FAILURE(status))
-		goto out2;
-
-	dinfo = buffer.pointer;
-
-	/* find full device path name for pcidevfn */
-	if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
-	    dinfo->address == winfo->pcidevfn) {
-		if (ata_msg_probe(winfo->ataport))
-			ata_dev_printk(winfo->atadev, KERN_DEBUG,
-				":%s: matches pcidevfn (0x%llx)\n",
-				pathname, winfo->pcidevfn);
-		strlcpy(winfo->basepath, pathname,
-			sizeof(winfo->basepath));
-		winfo->basepath_len = strlen(pathname);
-		goto out;
+	struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
+	union acpi_object *out_obj;
+	acpi_status status;
+	int rc = 0;
+
+	status = acpi_evaluate_object(ap->acpi_handle, "_GTM", NULL, &output);
+
+	rc = -ENOENT;
+	if (status == AE_NOT_FOUND)
+		goto out_free;
+
+	rc = -EINVAL;
+	if (ACPI_FAILURE(status)) {
+		ata_port_printk(ap, KERN_ERR,
+				"ACPI get timing mode failed (AE 0x%x)\n",
+				status);
+		goto out_free;
 	}
 
-	/* if basepath is not yet known, ignore this object */
-	if (!winfo->basepath_len)
-		goto out;
-
-	/* if this object is in scope of basepath, maybe use it */
-	if (strncmp(pathname, winfo->basepath,
-	    winfo->basepath_len) == 0) {
-		if (!(dinfo->valid & ACPI_VALID_ADR))
-			goto out;
-		if (ata_msg_probe(winfo->ataport))
-			ata_dev_printk(winfo->atadev, KERN_DEBUG,
-				"GOT ONE: (%s) root_port = 0x%llx,"
-				" port_num = 0x%llx\n", pathname,
-				SATA_ROOT_PORT(dinfo->address),
-				SATA_PORT_NUMBER(dinfo->address));
-		/* heuristics: */
-		if (SATA_PORT_NUMBER(dinfo->address) != NO_PORT_MULT)
-			if (ata_msg_probe(winfo->ataport))
-				ata_dev_printk(winfo->atadev,
-					KERN_DEBUG, "warning: don't"
-					" know how to handle SATA port"
-					" multiplier\n");
-		if (SATA_ROOT_PORT(dinfo->address) ==
-			winfo->ataport->port_no &&
-		    SATA_PORT_NUMBER(dinfo->address) == NO_PORT_MULT) {
-			if (ata_msg_probe(winfo->ataport))
-				ata_dev_printk(winfo->atadev,
-					KERN_DEBUG,
-					"THIS ^^^^^ is the requested"
-					" SATA drive (handle = 0x%p)\n",
-					handle);
-			winfo->sata_adr = dinfo->address;
-			winfo->obj_handle = handle;
-		}
+	out_obj = output.pointer;
+	if (out_obj->type != ACPI_TYPE_BUFFER) {
+		ata_port_printk(ap, KERN_WARNING,
+				"_GTM returned unexpected object type 0x%x\n",
+				out_obj->type);
+
+		goto out_free;
+	}
+
+	if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
+		ata_port_printk(ap, KERN_ERR,
+				"_GTM returned invalid length %d\n",
+				out_obj->buffer.length);
+		goto out_free;
 	}
-out:
-	kfree(dinfo);
-out2:
-	kfree(pathname);
 
-ret:
-	return status;
+	memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm));
+	rc = 0;
+ out_free:
+	kfree(output.pointer);
+	return rc;
 }
 
-/* Get the SATA drive _ADR object. */
-static int get_sata_adr(struct device *dev, acpi_handle handle,
-			acpi_integer pcidevfn, unsigned int drive,
-			struct ata_port *ap,
-			struct ata_device *atadev, u32 *dev_adr)
+EXPORT_SYMBOL_GPL(ata_acpi_gtm);
+
+/**
+ * ata_acpi_stm - execute _STM
+ * @ap: target ATA port
+ * @stm: timing parameter to _STM
+ *
+ * Evaluate _STM with timing parameter @stm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure.
+ */
+int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
 {
-	acpi_status	status;
-	struct walk_info *winfo;
-	int		err = -ENOMEM;
-
-	winfo = kzalloc(sizeof(struct walk_info), GFP_KERNEL);
-	if (!winfo)
-		goto out;
-
-	winfo->dev = dev;
-	winfo->atadev = atadev;
-	winfo->ataport = ap;
-	if (acpi_bus_get_device(handle, &winfo->adev) < 0)
-		if (ata_msg_probe(ap))
-			ata_dev_printk(winfo->atadev, KERN_DEBUG,
-				"acpi_bus_get_device failed\n");
-	winfo->handle = handle;
-	winfo->pcidevfn = pcidevfn;
-	winfo->drivenum = drive;
+	acpi_status status;
+	struct ata_acpi_gtm		stm_buf = *stm;
+	struct acpi_object_list         input;
+	union acpi_object               in_params[3];
 
-	status = acpi_get_devices(NULL, get_devices, winfo, NULL);
+	in_params[0].type = ACPI_TYPE_BUFFER;
+	in_params[0].buffer.length = sizeof(struct ata_acpi_gtm);
+	in_params[0].buffer.pointer = (u8 *)&stm_buf;
+	/* Buffers for id may need byteswapping ? */
+	in_params[1].type = ACPI_TYPE_BUFFER;
+	in_params[1].buffer.length = 512;
+	in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
+	in_params[2].type = ACPI_TYPE_BUFFER;
+	in_params[2].buffer.length = 512;
+	in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
+
+	input.count = 3;
+	input.pointer = in_params;
+
+	status = acpi_evaluate_object(ap->acpi_handle, "_STM", &input, NULL);
+
+	if (status == AE_NOT_FOUND)
+		return -ENOENT;
 	if (ACPI_FAILURE(status)) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(winfo->atadev, KERN_DEBUG,
-				"%s: acpi_get_devices failed\n",
-				__FUNCTION__);
-		err = -ENODEV;
-	} else {
-		*dev_adr = winfo->sata_adr;
-		atadev->obj_handle = winfo->obj_handle;
-		err = 0;
+		ata_port_printk(ap, KERN_ERR,
+			"ACPI set timing mode failed (status=0x%x)\n", status);
+		return -EINVAL;
 	}
-	kfree(winfo);
-out:
-	return err;
+	return 0;
 }
 
+EXPORT_SYMBOL_GPL(ata_acpi_stm);
+
 /**
- * do_drive_get_GTF - get the drive bootup default taskfile settings
+ * ata_dev_get_GTF - get the drive bootup default taskfile settings
  * @dev: target ATA device
- * @gtf_length: number of bytes of _GTF data returned at @gtf_address
- * @gtf_address: buffer containing _GTF taskfile arrays
+ * @gtf: output parameter for buffer containing _GTF taskfile arrays
  *
  * This applies to both PATA and SATA drives.
  *
@@ -282,121 +355,47 @@ out:
  * The <variable number> is not known in advance, so have ACPI-CA
  * allocate the buffer as needed and return it, then free it later.
  *
- * The returned @gtf_length and @gtf_address are only valid if the
- * function return value is 0.
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of taskfiles on success, 0 if _GTF doesn't exist.  -EINVAL
+ * if _GTF is invalid.
  */
-static int do_drive_get_GTF(struct ata_device *dev, unsigned int *gtf_length,
-			    unsigned long *gtf_address, unsigned long *obj_loc)
+static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	acpi_status status;
-	acpi_handle dev_handle = NULL;
-	acpi_handle chan_handle, drive_handle;
-	acpi_integer pcidevfn = 0;
-	u32 dev_adr;
 	struct acpi_buffer output;
 	union acpi_object *out_obj;
-	struct device *gdev = ap->host->dev;
-	int err = -ENODEV;
+	int rc = 0;
 
-	*gtf_length = 0;
-	*gtf_address = 0UL;
-	*obj_loc = 0UL;
+	/* if _GTF is cached, use the cached value */
+	if (dev->gtf_cache) {
+		out_obj = dev->gtf_cache;
+		goto done;
+	}
 
-	if (libata_noacpi)
-		return 0;
+	/* set up output buffer */
+	output.length = ACPI_ALLOCATE_BUFFER;
+	output.pointer = NULL;	/* ACPI-CA sets this; save/free it later */
 
 	if (ata_msg_probe(ap))
 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
 			       __FUNCTION__, ap->port_no);
 
-	if (!ata_dev_enabled(dev) || (ap->flags & ATA_FLAG_DISABLED)) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG, "%s: ERR: "
-				"ata_dev_present: %d, PORT_DISABLED: %lu\n",
-				__FUNCTION__, ata_dev_enabled(dev),
-				ap->flags & ATA_FLAG_DISABLED);
-		goto out;
-	}
-
-	/* Don't continue if device has no _ADR method.
-	 * _GTF is intended for known motherboard devices. */
-	if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
-		err = pata_get_dev_handle(gdev, &dev_handle, &pcidevfn);
-		if (err < 0) {
-			if (ata_msg_probe(ap))
-				ata_dev_printk(dev, KERN_DEBUG,
-					"%s: pata_get_dev_handle failed (%d)\n",
-					__FUNCTION__, err);
-			goto out;
-		}
-	} else {
-		err = sata_get_dev_handle(gdev, &dev_handle, &pcidevfn);
-		if (err < 0) {
-			if (ata_msg_probe(ap))
-				ata_dev_printk(dev, KERN_DEBUG,
-					"%s: sata_get_dev_handle failed (%d\n",
-					__FUNCTION__, err);
-			goto out;
-		}
-	}
-
-	/* Get this drive's _ADR info. if not already known. */
-	if (!dev->obj_handle) {
-		if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
-			/* get child objects of dev_handle == channel objects,
-	 		 * + _their_ children == drive objects */
-			/* channel is ap->port_no */
-			chan_handle = acpi_get_child(dev_handle,
-						ap->port_no);
-			if (ata_msg_probe(ap))
-				ata_dev_printk(dev, KERN_DEBUG,
-					"%s: chan adr=%d: chan_handle=0x%p\n",
-					__FUNCTION__, ap->port_no,
-					chan_handle);
-			if (!chan_handle) {
-				err = -ENODEV;
-				goto out;
-			}
-			/* TBD: could also check ACPI object VALID bits */
-			drive_handle = acpi_get_child(chan_handle, dev->devno);
-			if (!drive_handle) {
-				err = -ENODEV;
-				goto out;
-			}
-			dev_adr = dev->devno;
-			dev->obj_handle = drive_handle;
-		} else {	/* for SATA mode */
-			dev_adr = SATA_ADR_RSVD;
-			err = get_sata_adr(gdev, dev_handle, pcidevfn, 0,
-					ap, dev, &dev_adr);
-		}
-		if (err < 0 || dev_adr == SATA_ADR_RSVD ||
-		    !dev->obj_handle) {
-			if (ata_msg_probe(ap))
-				ata_dev_printk(dev, KERN_DEBUG,
-					"%s: get_sata/pata_adr failed: "
-					"err=%d, dev_adr=%u, obj_handle=0x%p\n",
-					__FUNCTION__, err, dev_adr,
-					dev->obj_handle);
-			goto out;
-		}
-	}
-
-	/* Setting up output buffer */
-	output.length = ACPI_ALLOCATE_BUFFER;
-	output.pointer = NULL;	/* ACPI-CA sets this; save/free it later */
-
 	/* _GTF has no input parameters */
-	err = -EIO;
-	status = acpi_evaluate_object(dev->obj_handle, "_GTF",
-					NULL, &output);
+	status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output);
+	out_obj = dev->gtf_cache = output.pointer;
+
 	if (ACPI_FAILURE(status)) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG,
-				"%s: Run _GTF error: status = 0x%x\n",
-				__FUNCTION__, status);
-		goto out;
+		if (status != AE_NOT_FOUND) {
+			ata_dev_printk(dev, KERN_WARNING,
+				       "_GTF evaluation failed (AE 0x%x)\n",
+				       status);
+			rc = -EINVAL;
+		}
+		goto out_free;
 	}
 
 	if (!output.length || !output.pointer) {
@@ -406,47 +405,135 @@ static int do_drive_get_GTF(struct ata_device *dev, unsigned int *gtf_length,
 				__FUNCTION__,
 				(unsigned long long)output.length,
 				output.pointer);
-		kfree(output.pointer);
-		goto out;
+		rc = -EINVAL;
+		goto out_free;
 	}
 
-	out_obj = output.pointer;
 	if (out_obj->type != ACPI_TYPE_BUFFER) {
-		kfree(output.pointer);
+		ata_dev_printk(dev, KERN_WARNING,
+			       "_GTF unexpected object type 0x%x\n",
+			       out_obj->type);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	if (out_obj->buffer.length % REGS_PER_GTF) {
+		ata_dev_printk(dev, KERN_WARNING,
+			       "unexpected _GTF length (%d)\n",
+			       out_obj->buffer.length);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+ done:
+	rc = out_obj->buffer.length / REGS_PER_GTF;
+	if (gtf) {
+		*gtf = (void *)out_obj->buffer.pointer;
 		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
-				"error: expected object type of "
-				" ACPI_TYPE_BUFFER, got 0x%x\n",
-				__FUNCTION__, out_obj->type);
-		err = -ENOENT;
-		goto out;
+			ata_dev_printk(dev, KERN_DEBUG,
+				       "%s: returning gtf=%p, gtf_count=%d\n",
+				       __FUNCTION__, *gtf, rc);
 	}
+	return rc;
+
+ out_free:
+	ata_acpi_clear_gtf(dev);
+	return rc;
+}
+
+/**
+ * ata_acpi_cbl_80wire		-	Check for 80 wire cable
+ * @ap: Port to check
+ *
+ * Return 1 if the ACPI mode data for this port indicates the BIOS selected
+ * an 80wire mode.
+ */
 
-	if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
-	    out_obj->buffer.length % REGS_PER_GTF) {
-		if (ata_msg_drv(ap))
-			ata_dev_printk(dev, KERN_ERR,
-				"%s: unexpected GTF length (%d) or addr (0x%p)\n",
-				__FUNCTION__, out_obj->buffer.length,
-				out_obj->buffer.pointer);
-		err = -ENOENT;
-		goto out;
+int ata_acpi_cbl_80wire(struct ata_port *ap)
+{
+	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+	int valid = 0;
+
+	if (!gtm)
+		return 0;
+
+	/* Split timing, DMA enabled */
+	if ((gtm->flags & 0x11) == 0x11 && gtm->drive[0].dma < 55)
+		valid |= 1;
+	if ((gtm->flags & 0x14) == 0x14 && gtm->drive[1].dma < 55)
+		valid |= 2;
+	/* Shared timing, DMA enabled */
+	if ((gtm->flags & 0x11) == 0x01 && gtm->drive[0].dma < 55)
+		valid |= 1;
+	if ((gtm->flags & 0x14) == 0x04 && gtm->drive[0].dma < 55)
+		valid |= 2;
+
+	/* Drive check */
+	if ((valid & 1) && ata_dev_enabled(&ap->link.device[0]))
+		return 1;
+	if ((valid & 2) && ata_dev_enabled(&ap->link.device[1]))
+		return 1;
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+
+static void ata_acpi_gtf_to_tf(struct ata_device *dev,
+			       const struct ata_acpi_gtf *gtf,
+			       struct ata_taskfile *tf)
+{
+	ata_tf_init(dev, tf);
+
+	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf->protocol = ATA_PROT_NODATA;
+	tf->feature = gtf->tf[0];	/* 0x1f1 */
+	tf->nsect   = gtf->tf[1];	/* 0x1f2 */
+	tf->lbal    = gtf->tf[2];	/* 0x1f3 */
+	tf->lbam    = gtf->tf[3];	/* 0x1f4 */
+	tf->lbah    = gtf->tf[4];	/* 0x1f5 */
+	tf->device  = gtf->tf[5];	/* 0x1f6 */
+	tf->command = gtf->tf[6];	/* 0x1f7 */
+}
+
+static int ata_acpi_filter_tf(const struct ata_taskfile *tf,
+			      const struct ata_taskfile *ptf)
+{
+	if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_SETXFER) {
+		/* libata doesn't use ACPI to configure transfer mode.
+		 * It will only confuse device configuration.  Skip.
+		 */
+		if (tf->command == ATA_CMD_SET_FEATURES &&
+		    tf->feature == SETFEATURES_XFER)
+			return 1;
 	}
 
-	*gtf_length = out_obj->buffer.length;
-	*gtf_address = (unsigned long)out_obj->buffer.pointer;
-	*obj_loc = (unsigned long)out_obj;
-	if (ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: returning "
-			"gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
-			__FUNCTION__, *gtf_length, *gtf_address, *obj_loc);
-	err = 0;
-out:
-	return err;
+	if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_LOCK) {
+		/* BIOS writers, sorry but we don't wanna lock
+		 * features unless the user explicitly said so.
+		 */
+
+		/* DEVICE CONFIGURATION FREEZE LOCK */
+		if (tf->command == ATA_CMD_CONF_OVERLAY &&
+		    tf->feature == ATA_DCO_FREEZE_LOCK)
+			return 1;
+
+		/* SECURITY FREEZE LOCK */
+		if (tf->command == ATA_CMD_SEC_FREEZE_LOCK)
+			return 1;
+
+		/* SET MAX LOCK and SET MAX FREEZE LOCK */
+		if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) &&
+		    tf->command == ATA_CMD_SET_MAX &&
+		    (tf->feature == ATA_SET_MAX_LOCK ||
+		     tf->feature == ATA_SET_MAX_FREEZE_LOCK))
+			return 1;
+	}
+
+	return 0;
 }
 
 /**
- * taskfile_load_raw - send taskfile registers to host controller
+ * ata_acpi_run_tf - send taskfile registers to host controller
  * @dev: target ATA device
  * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
  *
@@ -461,154 +548,118 @@ out:
  * function also waits for idle after writing control and before
  * writing the remaining registers.
  *
- * LOCKING: TBD:
- * Inherited from caller.
- */
-static void taskfile_load_raw(struct ata_device *dev,
-			      const struct taskfile_array *gtf)
-{
-	struct ata_port *ap = dev->ap;
-	struct ata_taskfile tf;
-	unsigned int err;
-
-	if (ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: (0x1f1-1f7): hex: "
-			"%02x %02x %02x %02x %02x %02x %02x\n",
-			__FUNCTION__,
-			gtf->tfa[0], gtf->tfa[1], gtf->tfa[2],
-			gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
-
-	if ((gtf->tfa[0] == 0) && (gtf->tfa[1] == 0) && (gtf->tfa[2] == 0)
-	    && (gtf->tfa[3] == 0) && (gtf->tfa[4] == 0) && (gtf->tfa[5] == 0)
-	    && (gtf->tfa[6] == 0))
-		return;
-
-	ata_tf_init(dev, &tf);
-
-	/* convert gtf to tf */
-	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
-	tf.protocol = ATA_PROT_NODATA;
-	tf.feature = gtf->tfa[0];	/* 0x1f1 */
-	tf.nsect   = gtf->tfa[1];	/* 0x1f2 */
-	tf.lbal    = gtf->tfa[2];	/* 0x1f3 */
-	tf.lbam    = gtf->tfa[3];	/* 0x1f4 */
-	tf.lbah    = gtf->tfa[4];	/* 0x1f5 */
-	tf.device  = gtf->tfa[5];	/* 0x1f6 */
-	tf.command = gtf->tfa[6];	/* 0x1f7 */
-
-	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-	if (err && ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_ERR,
-			"%s: ata_exec_internal failed: %u\n",
-			__FUNCTION__, err);
-}
-
-/**
- * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
- * @dev: target ATA device
- * @gtf_length: total number of bytes of _GTF taskfiles
- * @gtf_address: location of _GTF taskfile arrays
- *
- * This applies to both PATA and SATA drives.
+ * LOCKING:
+ * EH context.
  *
- * Write {gtf_address, length gtf_length} in groups of
- * REGS_PER_GTF bytes.
+ * RETURNS:
+ * 1 if command is executed successfully.  0 if ignored, rejected or
+ * filtered out, -errno on other errors.
  */
-static int do_drive_set_taskfiles(struct ata_device *dev,
-				  unsigned int gtf_length,
-				  unsigned long gtf_address)
+static int ata_acpi_run_tf(struct ata_device *dev,
+			   const struct ata_acpi_gtf *gtf,
+			   const struct ata_acpi_gtf *prev_gtf)
 {
-	struct ata_port *ap = dev->ap;
-	int err = -ENODEV;
-	int gtf_count = gtf_length / REGS_PER_GTF;
-	int ix;
-	struct taskfile_array	*gtf;
-
-	if (ata_msg_probe(ap))
-		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
-			       __FUNCTION__, ap->port_no);
-
-	if (libata_noacpi || !(ap->flags & ATA_FLAG_ACPI_SATA))
+	struct ata_taskfile *pptf = NULL;
+	struct ata_taskfile tf, ptf, rtf;
+	unsigned int err_mask;
+	const char *level;
+	char msg[60];
+	int rc;
+
+	if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
+	    && (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0)
+	    && (gtf->tf[6] == 0))
 		return 0;
 
-	if (!ata_dev_enabled(dev) || (ap->flags & ATA_FLAG_DISABLED))
-		goto out;
-	if (!gtf_count)		/* shouldn't be here */
-		goto out;
-
-	if (gtf_length % REGS_PER_GTF) {
-		if (ata_msg_drv(ap))
-			ata_dev_printk(dev, KERN_ERR,
-				"%s: unexpected GTF length (%d)\n",
-				__FUNCTION__, gtf_length);
-		goto out;
+	ata_acpi_gtf_to_tf(dev, gtf, &tf);
+	if (prev_gtf) {
+		ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf);
+		pptf = &ptf;
 	}
 
-	for (ix = 0; ix < gtf_count; ix++) {
-		gtf = (struct taskfile_array *)
-			(gtf_address + ix * REGS_PER_GTF);
+	if (!ata_acpi_filter_tf(&tf, pptf)) {
+		rtf = tf;
+		err_mask = ata_exec_internal(dev, &rtf, NULL,
+					     DMA_NONE, NULL, 0, 0);
 
-		/* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */
-		taskfile_load_raw(dev, gtf);
+		switch (err_mask) {
+		case 0:
+			level = KERN_DEBUG;
+			snprintf(msg, sizeof(msg), "succeeded");
+			rc = 1;
+			break;
+
+		case AC_ERR_DEV:
+			level = KERN_INFO;
+			snprintf(msg, sizeof(msg),
+				 "rejected by device (Stat=0x%02x Err=0x%02x)",
+				 rtf.command, rtf.feature);
+			rc = 0;
+			break;
+
+		default:
+			level = KERN_ERR;
+			snprintf(msg, sizeof(msg),
+				 "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
+				 err_mask, rtf.command, rtf.feature);
+			rc = -EIO;
+			break;
+		}
+	} else {
+		level = KERN_INFO;
+		snprintf(msg, sizeof(msg), "filtered out");
+		rc = 0;
 	}
 
-	err = 0;
-out:
-	return err;
+	ata_dev_printk(dev, level,
+		       "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n",
+		       tf.command, tf.feature, tf.nsect, tf.lbal,
+		       tf.lbam, tf.lbah, tf.device, msg);
+
+	return rc;
 }
 
 /**
  * ata_acpi_exec_tfs - get then write drive taskfile settings
- * @ap: the ata_port for the drive
+ * @dev: target ATA device
+ * @nr_executed: out paramter for the number of executed commands
  *
- * This applies to both PATA and SATA drives.
+ * Evaluate _GTF and excute returned taskfiles.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of executed taskfiles on success, 0 if _GTF doesn't exist.
+ * -errno on other errors.
  */
-int ata_acpi_exec_tfs(struct ata_port *ap)
+static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed)
 {
-	int ix;
-	int ret = 0;
-	unsigned int gtf_length;
-	unsigned long gtf_address;
-	unsigned long obj_loc;
-
-	if (libata_noacpi)
-		return 0;
-	/*
-	 * TBD - implement PATA support.  For now,
-	 * we should not run GTF on PATA devices since some
-	 * PATA require execution of GTM/STM before GTF.
-	 */
-	if (!(ap->flags & ATA_FLAG_ACPI_SATA))
-		return 0;
-
-	for (ix = 0; ix < ATA_MAX_DEVICES; ix++) {
-		struct ata_device *dev = &ap->device[ix];
-
-		if (!ata_dev_enabled(dev))
-			continue;
-
-		ret = do_drive_get_GTF(dev, &gtf_length, &gtf_address,
-				       &obj_loc);
-		if (ret < 0) {
-			if (ata_msg_probe(ap))
-				ata_port_printk(ap, KERN_DEBUG,
-					"%s: get_GTF error (%d)\n",
-					__FUNCTION__, ret);
-			break;
-		}
-
-		ret = do_drive_set_taskfiles(dev, gtf_length, gtf_address);
-		kfree((void *)obj_loc);
-		if (ret < 0) {
-			if (ata_msg_probe(ap))
-				ata_port_printk(ap, KERN_DEBUG,
-					"%s: set_taskfiles error (%d)\n",
-					__FUNCTION__, ret);
+	struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL;
+	int gtf_count, i, rc;
+
+	/* get taskfiles */
+	rc = ata_dev_get_GTF(dev, &gtf);
+	if (rc < 0)
+		return rc;
+	gtf_count = rc;
+
+	/* execute them */
+	for (i = 0; i < gtf_count; i++, gtf++) {
+		rc = ata_acpi_run_tf(dev, gtf, pgtf);
+		if (rc < 0)
 			break;
+		if (rc) {
+			(*nr_executed)++;
+			pgtf = gtf;
 		}
 	}
 
-	return ret;
+	ata_acpi_clear_gtf(dev);
+
+	if (rc < 0)
+		return rc;
+	return 0;
 }
 
 /**
@@ -620,62 +671,25 @@ int ata_acpi_exec_tfs(struct ata_port *ap)
  * ATM this function never returns a failure.  It is an optional
  * method and if it fails for whatever reason, we should still
  * just keep going.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-int ata_acpi_push_id(struct ata_device *dev)
+static int ata_acpi_push_id(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
-	acpi_handle handle;
-	acpi_integer pcidevfn;
+	struct ata_port *ap = dev->link->ap;
 	int err;
-	struct device *gdev = ap->host->dev;
-	u32 dev_adr;
 	acpi_status status;
 	struct acpi_object_list input;
 	union acpi_object in_params[1];
 
-	if (libata_noacpi)
-		return 0;
-
 	if (ata_msg_probe(ap))
 		ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
 			       __FUNCTION__, dev->devno, ap->port_no);
 
-	/* Don't continue if not a SATA device. */
-	if (!(ap->flags & ATA_FLAG_ACPI_SATA)) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG,
-				"%s: Not a SATA device\n", __FUNCTION__);
-		goto out;
-	}
-
-	/* Don't continue if device has no _ADR method.
-	 * _SDD is intended for known motherboard devices. */
-	err = sata_get_dev_handle(gdev, &handle, &pcidevfn);
-	if (err < 0) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG,
-				"%s: sata_get_dev_handle failed (%d\n",
-				__FUNCTION__, err);
-		goto out;
-	}
-
-	/* Get this drive's _ADR info, if not already known */
-	if (!dev->obj_handle) {
-		dev_adr = SATA_ADR_RSVD;
-		err = get_sata_adr(gdev, handle, pcidevfn, dev->devno, ap, dev,
-					&dev_adr);
-		if (err < 0 || dev_adr == SATA_ADR_RSVD ||
-			!dev->obj_handle) {
-			if (ata_msg_probe(ap))
-				ata_dev_printk(dev, KERN_DEBUG,
-					"%s: get_sata_adr failed: "
-					"err=%d, dev_adr=%u, obj_handle=0x%p\n",
-					__FUNCTION__, err, dev_adr,
-					dev->obj_handle);
-			goto out;
-		}
-	}
-
 	/* Give the drive Identify data to the drive via the _SDD method */
 	/* _SDD: set up input parameters */
 	input.count = 1;
@@ -687,20 +701,170 @@ int ata_acpi_push_id(struct ata_device *dev)
 
 	/* It's OK for _SDD to be missing too. */
 	swap_buf_le16(dev->id, ATA_ID_WORDS);
-	status = acpi_evaluate_object(dev->obj_handle, "_SDD", &input, NULL);
+	status = acpi_evaluate_object(dev->acpi_handle, "_SDD", &input, NULL);
 	swap_buf_le16(dev->id, ATA_ID_WORDS);
 
 	err = ACPI_FAILURE(status) ? -EIO : 0;
-	if (err < 0) {
-		if (ata_msg_probe(ap))
-			ata_dev_printk(dev, KERN_DEBUG,
-				       "%s _SDD error: status = 0x%x\n",
-				       __FUNCTION__, status);
-	}
+	if (err < 0)
+		ata_dev_printk(dev, KERN_WARNING,
+			       "ACPI _SDD failed (AE 0x%x)\n", status);
 
-	/* always return success */
-out:
+	return err;
+}
+
+/**
+ * ata_acpi_on_suspend - ATA ACPI hook called on suspend
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is about to be suspended.  All
+ * devices are already put to sleep but the port_suspend() callback
+ * hasn't been executed yet.  Error return from this function aborts
+ * suspend.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_acpi_on_suspend(struct ata_port *ap)
+{
+	/* nada */
 	return 0;
 }
 
+/**
+ * ata_acpi_on_resume - ATA ACPI hook called on resume
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is resumed - right after port
+ * itself is resumed but before any EH action is taken.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_resume(struct ata_port *ap)
+{
+	const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+	struct ata_device *dev;
+
+	if (ap->acpi_handle && gtm) {
+		/* _GTM valid */
+
+		/* restore timing parameters */
+		ata_acpi_stm(ap, gtm);
+
+		/* _GTF should immediately follow _STM so that it can
+		 * use values set by _STM.  Cache _GTF result and
+		 * schedule _GTF.
+		 */
+		ata_link_for_each_dev(dev, &ap->link) {
+			ata_acpi_clear_gtf(dev);
+			if (ata_dev_get_GTF(dev, NULL) >= 0)
+				dev->flags |= ATA_DFLAG_ACPI_PENDING;
+		}
+	} else {
+		/* SATA _GTF needs to be evaulated after _SDD and
+		 * there's no reason to evaluate IDE _GTF early
+		 * without _STM.  Clear cache and schedule _GTF.
+		 */
+		ata_link_for_each_dev(dev, &ap->link) {
+			ata_acpi_clear_gtf(dev);
+			dev->flags |= ATA_DFLAG_ACPI_PENDING;
+		}
+	}
+}
+
+/**
+ * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be configured.
+ * IDENTIFY data might have been modified after this hook is run.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Positive number if IDENTIFY data needs to be refreshed, 0 if not,
+ * -errno on failure.
+ */
+int ata_acpi_on_devcfg(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	struct ata_eh_context *ehc = &ap->link.eh_context;
+	int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
+	int nr_executed = 0;
+	int rc;
+
+	if (!dev->acpi_handle)
+		return 0;
+
+	/* do we need to do _GTF? */
+	if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) &&
+	    !(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET)))
+		return 0;
+
+	/* do _SDD if SATA */
+	if (acpi_sata) {
+		rc = ata_acpi_push_id(dev);
+		if (rc)
+			goto acpi_err;
+	}
 
+	/* do _GTF */
+	rc = ata_acpi_exec_tfs(dev, &nr_executed);
+	if (rc)
+		goto acpi_err;
+
+	dev->flags &= ~ATA_DFLAG_ACPI_PENDING;
+
+	/* refresh IDENTIFY page if any _GTF command has been executed */
+	if (nr_executed) {
+		rc = ata_dev_reread_id(dev, 0);
+		if (rc < 0) {
+			ata_dev_printk(dev, KERN_ERR, "failed to IDENTIFY "
+				       "after ACPI commands\n");
+			return rc;
+		}
+	}
+
+	return 0;
+
+ acpi_err:
+	/* ignore evaluation failure if we can continue safely */
+	if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+		return 0;
+
+	/* fail and let EH retry once more for unknown IO errors */
+	if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) {
+		dev->flags |= ATA_DFLAG_ACPI_FAILED;
+		return rc;
+	}
+
+	ata_dev_printk(dev, KERN_WARNING,
+		       "ACPI: failed the second time, disabled\n");
+	dev->acpi_handle = NULL;
+
+	/* We can safely continue if no _GTF command has been executed
+	 * and port is not frozen.
+	 */
+	if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+		return 0;
+
+	return rc;
+}
+
+/**
+ * ata_acpi_on_disable - ATA ACPI hook called when a device is disabled
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be disabled.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_disable(struct ata_device *dev)
+{
+	ata_acpi_clear_gtf(dev);
+}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9b51430..f3cecc6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -30,6 +30,14 @@
  *  Hardware documentation available from http://www.t13.org/ and
  *  http://www.sata-io.org/
  *
+ *  Standards documents from:
+ *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
+ *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
+ *	http://www.sata-io.org (SATA)
+ *	http://www.compactflash.org (CF)
+ *	http://www.qic.org (QIC157 - Tape and DSC)
+ *	http://www.ce-ata.org (CE-ATA: not supported)
+ *
  */
 
 #include <linux/kernel.h>
@@ -49,18 +57,17 @@
 #include <linux/workqueue.h>
 #include <linux/jiffies.h>
 #include <linux/scatterlist.h>
+#include <linux/io.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
-#include <asm/io.h>
 #include <asm/semaphore.h>
 #include <asm/byteorder.h>
+#include <linux/cdrom.h>
 
 #include "libata.h"
 
-#define DRV_VERSION	"2.21"	/* must be exactly four chars */
-
 
 /* debounce timing parameters in msecs { interval, duration, timeout } */
 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
@@ -70,10 +77,13 @@ const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
 static unsigned int ata_dev_init_params(struct ata_device *dev,
 					u16 heads, u16 sectors);
 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+static unsigned int ata_dev_set_feature(struct ata_device *dev,
+					u8 enable, u8 feature);
 static void ata_dev_xfermask(struct ata_device *dev);
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
 unsigned int ata_print_id = 1;
-static struct workqueue_struct *ata_wq;
+struct workqueue_struct *ata_wq = NULL;
 
 struct workqueue_struct *ata_aux_wq;
 
@@ -85,21 +95,29 @@ int atapi_dmadir = 0;
 module_param(atapi_dmadir, int, 0444);
 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
 
+int atapi_passthru16 = 1;
+module_param(atapi_passthru16, int, 0444);
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
+
 int libata_fua = 0;
 module_param_named(fua, libata_fua, int, 0444);
 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
 
-static int ata_ignore_hpa = 0;
+static int ata_ignore_hpa;
 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
 
+static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
+module_param_named(dma, libata_dma_mask, int, 0444);
+MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
+
 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
 module_param(ata_probe_timeout, int, 0444);
 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
 
 int libata_noacpi = 1;
 module_param_named(noacpi, libata_noacpi, int, 0444);
-MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
+MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
 
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("Library module for ATA devices");
@@ -110,8 +128,9 @@ MODULE_VERSION(DRV_VERSION);
 /**
  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
  *	@tf: Taskfile to convert
- *	@fis: Buffer into which data will output
  *	@pmp: Port multiplier port
+ *	@is_cmd: This FIS is for command
+ *	@fis: Buffer into which data will output
  *
  *	Converts a standard ATA taskfile to a Serial ATA
  *	FIS structure (Register - Host to Device).
@@ -119,12 +138,13 @@ MODULE_VERSION(DRV_VERSION);
  *	LOCKING:
  *	Inherited from caller.
  */
-
-void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
 {
-	fis[0] = 0x27;	/* Register - Host to Device FIS */
-	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
-					    bit 7 indicates Command FIS */
+	fis[0] = 0x27;			/* Register - Host to Device FIS */
+	fis[1] = pmp & 0xf;		/* Port multiplier number*/
+	if (is_cmd)
+		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
+
 	fis[2] = tf->command;
 	fis[3] = tf->feature;
 
@@ -232,7 +252,7 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
 	if (dev->flags & ATA_DFLAG_PIO) {
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
-	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
+	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
 		/* Unable to use DMA due to host limitation */
 		tf->protocol = ATA_PROT_PIO;
 		index = dev->multi_count ? 0 : 8;
@@ -601,14 +621,192 @@ static const char *sata_spd_string(unsigned int spd)
 void ata_dev_disable(struct ata_device *dev)
 {
 	if (ata_dev_enabled(dev)) {
-		if (ata_msg_drv(dev->ap))
+		if (ata_msg_drv(dev->link->ap))
 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+		ata_acpi_on_disable(dev);
 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
 					     ATA_DNXFER_QUIET);
 		dev->class++;
 	}
 }
 
+static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	u32 scontrol;
+	unsigned int err_mask;
+	int rc;
+
+	/*
+	 * disallow DIPM for drivers which haven't set
+	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
+	 * phy ready will be set in the interrupt status on
+	 * state changes, which will cause some drivers to
+	 * think there are errors - additionally drivers will
+	 * need to disable hot plug.
+	 */
+	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
+		ap->pm_policy = NOT_AVAILABLE;
+		return -EINVAL;
+	}
+
+	/*
+	 * For DIPM, we will only enable it for the
+	 * min_power setting.
+	 *
+	 * Why?  Because Disks are too stupid to know that
+	 * If the host rejects a request to go to SLUMBER
+	 * they should retry at PARTIAL, and instead it
+	 * just would give up.  So, for medium_power to
+	 * work at all, we need to only allow HIPM.
+	 */
+	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+	if (rc)
+		return rc;
+
+	switch (policy) {
+	case MIN_POWER:
+		/* no restrictions on IPM transitions */
+		scontrol &= ~(0x3 << 8);
+		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+		if (rc)
+			return rc;
+
+		/* enable DIPM */
+		if (dev->flags & ATA_DFLAG_DIPM)
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_ENABLE, SATA_DIPM);
+		break;
+	case MEDIUM_POWER:
+		/* allow IPM to PARTIAL */
+		scontrol &= ~(0x1 << 8);
+		scontrol |= (0x2 << 8);
+		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+		if (rc)
+			return rc;
+
+		/*
+		 * we don't have to disable DIPM since IPM flags
+		 * disallow transitions to SLUMBER, which effectively
+		 * disable DIPM if it does not support PARTIAL
+		 */
+		break;
+	case NOT_AVAILABLE:
+	case MAX_PERFORMANCE:
+		/* disable all IPM transitions */
+		scontrol |= (0x3 << 8);
+		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+		if (rc)
+			return rc;
+
+		/*
+		 * we don't have to disable DIPM since IPM flags
+		 * disallow all transitions which effectively
+		 * disable DIPM anyway.
+		 */
+		break;
+	}
+
+	/* FIXME: handle SET FEATURES failure */
+	(void) err_mask;
+
+	return 0;
+}
+
+/**
+ *	ata_dev_enable_pm - enable SATA interface power management
+ *	@dev:  device to enable power management
+ *	@policy: the link power management policy
+ *
+ *	Enable SATA Interface power management.  This will enable
+ *	Device Interface Power Management (DIPM) for min_power
+ * 	policy, and then call driver specific callbacks for
+ *	enabling Host Initiated Power management.
+ *
+ *	Locking: Caller.
+ *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
+ */
+void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
+{
+	int rc = 0;
+	struct ata_port *ap = dev->link->ap;
+
+	/* set HIPM first, then DIPM */
+	if (ap->ops->enable_pm)
+		rc = ap->ops->enable_pm(ap, policy);
+	if (rc)
+		goto enable_pm_out;
+	rc = ata_dev_set_dipm(dev, policy);
+
+enable_pm_out:
+	if (rc)
+		ap->pm_policy = MAX_PERFORMANCE;
+	else
+		ap->pm_policy = policy;
+	return /* rc */;	/* hopefully we can use 'rc' eventually */
+}
+
+#ifdef CONFIG_PM
+/**
+ *	ata_dev_disable_pm - disable SATA interface power management
+ *	@dev: device to disable power management
+ *
+ *	Disable SATA Interface power management.  This will disable
+ *	Device Interface Power Management (DIPM) without changing
+ * 	policy,  call driver specific callbacks for disabling Host
+ * 	Initiated Power management.
+ *
+ *	Locking: Caller.
+ *	Returns: void
+ */
+static void ata_dev_disable_pm(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+
+	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
+	if (ap->ops->disable_pm)
+		ap->ops->disable_pm(ap);
+}
+#endif	/* CONFIG_PM */
+
+void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
+{
+	ap->pm_policy = policy;
+	ap->link.eh_info.action |= ATA_EHI_LPM;
+	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
+	ata_port_schedule_eh(ap);
+}
+
+#ifdef CONFIG_PM
+static void ata_lpm_enable(struct ata_host *host)
+{
+	struct ata_link *link;
+	struct ata_port *ap;
+	struct ata_device *dev;
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		ap = host->ports[i];
+		ata_port_for_each_link(link, ap) {
+			ata_link_for_each_dev(dev, link)
+				ata_dev_disable_pm(dev);
+		}
+	}
+}
+
+static void ata_lpm_disable(struct ata_host *host)
+{
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		ata_lpm_schedule(ap, ap->pm_policy);
+	}
+}
+#endif	/* CONFIG_PM */
+
+
 /**
  *	ata_devchk - PATA device presence detection
  *	@ap: ATA channel to examine
@@ -664,37 +862,57 @@ static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
  *	None.
  *
  *	RETURNS:
- *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
- *	the event of failure.
+ *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
+ *	%ATA_DEV_UNKNOWN the event of failure.
  */
-
 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
 {
 	/* Apple's open source Darwin code hints that some devices only
 	 * put a proper signature into the LBA mid/high registers,
 	 * So, we only check those.  It's sufficient for uniqueness.
+	 *
+	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
+	 * signatures for ATA and ATAPI devices attached on SerialATA,
+	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
+	 * spec has never mentioned about using different signatures
+	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
+	 * Multiplier specification began to use 0x69/0x96 to identify
+	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
+	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
+	 * 0x69/0x96 shortly and described them as reserved for
+	 * SerialATA.
+	 *
+	 * We follow the current spec and consider that 0x69/0x96
+	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
 	 */
-
-	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
-	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
+	if ((tf->lbam == 0) && (tf->lbah == 0)) {
 		DPRINTK("found ATA device by sig\n");
 		return ATA_DEV_ATA;
 	}
 
-	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
-	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
+	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
 		DPRINTK("found ATAPI device by sig\n");
 		return ATA_DEV_ATAPI;
 	}
 
+	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
+		DPRINTK("found PMP device by sig\n");
+		return ATA_DEV_PMP;
+	}
+
+	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
+		printk(KERN_INFO "ata: SEMB device ignored\n");
+		return ATA_DEV_SEMB_UNSUP; /* not yet */
+	}
+
 	DPRINTK("unknown device\n");
 	return ATA_DEV_UNKNOWN;
 }
 
 /**
  *	ata_dev_try_classify - Parse returned ATA device signature
- *	@ap: ATA channel to examine
- *	@device: Device to examine (starting at zero)
+ *	@dev: ATA device to classify (starting at zero)
+ *	@present: device seems present
  *	@r_err: Value of error register on completion
  *
  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
@@ -712,15 +930,15 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
  *	RETURNS:
  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
  */
-
-unsigned int
-ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
+unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
+				  u8 *r_err)
 {
+	struct ata_port *ap = dev->link->ap;
 	struct ata_taskfile tf;
 	unsigned int class;
 	u8 err;
 
-	ap->ops->dev_select(ap, device);
+	ap->ops->dev_select(ap, dev->devno);
 
 	memset(&tf, 0, sizeof(tf));
 
@@ -730,12 +948,12 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
 		*r_err = err;
 
 	/* see if device passed diags: if master then continue and warn later */
-	if (err == 0 && device == 0)
+	if (err == 0 && dev->devno == 0)
 		/* diagnostic fail : do nothing _YET_ */
-		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
+		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
 	else if (err == 1)
 		/* do nothing */ ;
-	else if ((device == 0) && (err == 0x81))
+	else if ((dev->devno == 0) && (err == 0x81))
 		/* do nothing */ ;
 	else
 		return ATA_DEV_NONE;
@@ -743,10 +961,20 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
 	/* determine if device is ATA or ATAPI */
 	class = ata_dev_classify(&tf);
 
-	if (class == ATA_DEV_UNKNOWN)
-		return ATA_DEV_NONE;
-	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
-		return ATA_DEV_NONE;
+	if (class == ATA_DEV_UNKNOWN) {
+		/* If the device failed diagnostic, it's likely to
+		 * have reported incorrect device signature too.
+		 * Assume ATA device if the device seems present but
+		 * device signature is invalid with diagnostic
+		 * failure.
+		 */
+		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+			class = ATA_DEV_ATA;
+		else
+			class = ATA_DEV_NONE;
+	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
+		class = ATA_DEV_NONE;
+
 	return class;
 }
 
@@ -813,6 +1041,21 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
 	*p = '\0';
 }
 
+static u64 ata_id_n_sectors(const u16 *id)
+{
+	if (ata_id_has_lba(id)) {
+		if (ata_id_has_lba48(id))
+			return ata_id_u64(id, 100);
+		else
+			return ata_id_u32(id, 60);
+	} else {
+		if (ata_id_current_chs_valid(id))
+			return ata_id_u32(id, 57);
+		else
+			return id[1] * id[3] * id[6];
+	}
+}
+
 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
 {
 	u64 sectors = 0;
@@ -840,129 +1083,110 @@ static u64 ata_tf_to_lba(struct ata_taskfile *tf)
 }
 
 /**
- *	ata_read_native_max_address_ext	-	LBA48 native max query
- *	@dev: Device to query
+ *	ata_read_native_max_address - Read native max address
+ *	@dev: target device
+ *	@max_sectors: out parameter for the result native max address
  *
- *	Perform an LBA48 size query upon the device in question. Return the
- *	actual LBA48 size or zero if the command fails.
- */
-
-static u64 ata_read_native_max_address_ext(struct ata_device *dev)
-{
-	unsigned int err;
-	struct ata_taskfile tf;
-
-	ata_tf_init(dev, &tf);
-
-	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
-	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
-	tf.protocol |= ATA_PROT_NODATA;
-	tf.device |= 0x40;
-
-	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-	if (err)
-		return 0;
-
-	return ata_tf_to_lba48(&tf);
-}
-
-/**
- *	ata_read_native_max_address	-	LBA28 native max query
- *	@dev: Device to query
+ *	Perform an LBA48 or LBA28 native size query upon the device in
+ *	question.
  *
- *	Performa an LBA28 size query upon the device in question. Return the
- *	actual LBA28 size or zero if the command fails.
+ *	RETURNS:
+ *	0 on success, -EACCES if command is aborted by the drive.
+ *	-EIO on other errors.
  */
-
-static u64 ata_read_native_max_address(struct ata_device *dev)
+static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
 {
-	unsigned int err;
+	unsigned int err_mask;
 	struct ata_taskfile tf;
+	int lba48 = ata_id_has_lba48(dev->id);
 
 	ata_tf_init(dev, &tf);
 
-	tf.command = ATA_CMD_READ_NATIVE_MAX;
+	/* always clear all address registers */
 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+
+	if (lba48) {
+		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
+		tf.flags |= ATA_TFLAG_LBA48;
+	} else
+		tf.command = ATA_CMD_READ_NATIVE_MAX;
+
 	tf.protocol |= ATA_PROT_NODATA;
-	tf.device |= 0x40;
+	tf.device |= ATA_LBA;
 
-	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-	if (err)
-		return 0;
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (err_mask) {
+		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
+			       "max address (err_mask=0x%x)\n", err_mask);
+		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+			return -EACCES;
+		return -EIO;
+	}
 
-	return ata_tf_to_lba(&tf);
+	if (lba48)
+		*max_sectors = ata_tf_to_lba48(&tf);
+	else
+		*max_sectors = ata_tf_to_lba(&tf);
+	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+		(*max_sectors)--;
+	return 0;
 }
 
 /**
- *	ata_set_native_max_address_ext	-	LBA48 native max set
- *	@dev: Device to query
+ *	ata_set_max_sectors - Set max sectors
+ *	@dev: target device
  *	@new_sectors: new max sectors value to set for the device
  *
- *	Perform an LBA48 size set max upon the device in question. Return the
- *	actual LBA48 size or zero if the command fails.
+ *	Set max sectors of @dev to @new_sectors.
+ *
+ *	RETURNS:
+ *	0 on success, -EACCES if command is aborted or denied (due to
+ *	previous non-volatile SET_MAX) by the drive.  -EIO on other
+ *	errors.
  */
-
-static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
+static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
 {
-	unsigned int err;
+	unsigned int err_mask;
 	struct ata_taskfile tf;
+	int lba48 = ata_id_has_lba48(dev->id);
 
 	new_sectors--;
 
 	ata_tf_init(dev, &tf);
 
-	tf.command = ATA_CMD_SET_MAX_EXT;
-	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
-	tf.protocol |= ATA_PROT_NODATA;
-	tf.device |= 0x40;
-
-	tf.lbal = (new_sectors >> 0) & 0xff;
-	tf.lbam = (new_sectors >> 8) & 0xff;
-	tf.lbah = (new_sectors >> 16) & 0xff;
-
-	tf.hob_lbal = (new_sectors >> 24) & 0xff;
-	tf.hob_lbam = (new_sectors >> 32) & 0xff;
-	tf.hob_lbah = (new_sectors >> 40) & 0xff;
-
-	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-	if (err)
-		return 0;
-
-	return ata_tf_to_lba48(&tf);
-}
-
-/**
- *	ata_set_native_max_address	-	LBA28 native max set
- *	@dev: Device to query
- *	@new_sectors: new max sectors value to set for the device
- *
- *	Perform an LBA28 size set max upon the device in question. Return the
- *	actual LBA28 size or zero if the command fails.
- */
+	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
 
-static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
-{
-	unsigned int err;
-	struct ata_taskfile tf;
+	if (lba48) {
+		tf.command = ATA_CMD_SET_MAX_EXT;
+		tf.flags |= ATA_TFLAG_LBA48;
 
-	new_sectors--;
+		tf.hob_lbal = (new_sectors >> 24) & 0xff;
+		tf.hob_lbam = (new_sectors >> 32) & 0xff;
+		tf.hob_lbah = (new_sectors >> 40) & 0xff;
+	} else {
+		tf.command = ATA_CMD_SET_MAX;
 
-	ata_tf_init(dev, &tf);
+		tf.device |= (new_sectors >> 24) & 0xf;
+	}
 
-	tf.command = ATA_CMD_SET_MAX;
-	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
 	tf.protocol |= ATA_PROT_NODATA;
+	tf.device |= ATA_LBA;
 
 	tf.lbal = (new_sectors >> 0) & 0xff;
 	tf.lbam = (new_sectors >> 8) & 0xff;
 	tf.lbah = (new_sectors >> 16) & 0xff;
-	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
 
-	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-	if (err)
-		return 0;
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (err_mask) {
+		ata_dev_printk(dev, KERN_WARNING, "failed to set "
+			       "max address (err_mask=0x%x)\n", err_mask);
+		if (err_mask == AC_ERR_DEV &&
+		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
+			return -EACCES;
+		return -EIO;
+	}
 
-	return ata_tf_to_lba(&tf);
+	return 0;
 }
 
 /**
@@ -972,60 +1196,93 @@ static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
  *	it if required to the full size of the media. The caller must check
  *	the drive has the HPA feature set enabled.
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
  */
-
-static u64 ata_hpa_resize(struct ata_device *dev)
+static int ata_hpa_resize(struct ata_device *dev)
 {
-	u64 sectors = dev->n_sectors;
-	u64 hpa_sectors;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+	u64 sectors = ata_id_n_sectors(dev->id);
+	u64 native_sectors;
+	int rc;
 
-	if (ata_id_has_lba48(dev->id))
-		hpa_sectors = ata_read_native_max_address_ext(dev);
-	else
-		hpa_sectors = ata_read_native_max_address(dev);
+	/* do we need to do it? */
+	if (dev->class != ATA_DEV_ATA ||
+	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
+	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+		return 0;
 
-	if (hpa_sectors > sectors) {
-		ata_dev_printk(dev, KERN_INFO,
-			"Host Protected Area detected:\n"
-			"\tcurrent size: %lld sectors\n"
-			"\tnative size: %lld sectors\n",
-			(long long)sectors, (long long)hpa_sectors);
-
-		if (ata_ignore_hpa) {
-			if (ata_id_has_lba48(dev->id))
-				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
-			else
-				hpa_sectors = ata_set_native_max_address(dev,
-								hpa_sectors);
-
-			if (hpa_sectors) {
-				ata_dev_printk(dev, KERN_INFO, "native size "
-					"increased to %lld sectors\n",
-					(long long)hpa_sectors);
-				return hpa_sectors;
-			}
+	/* read native max address */
+	rc = ata_read_native_max_address(dev, &native_sectors);
+	if (rc) {
+		/* If HPA isn't going to be unlocked, skip HPA
+		 * resizing from the next try.
+		 */
+		if (!ata_ignore_hpa) {
+			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
+				       "broken, will skip HPA handling\n");
+			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+
+			/* we can continue if device aborted the command */
+			if (rc == -EACCES)
+				rc = 0;
 		}
-	} else if (hpa_sectors < sectors)
-		ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
-			       "is smaller than sectors (%lld)\n", __FUNCTION__,
-			       (long long)hpa_sectors, (long long)sectors);
 
-	return sectors;
-}
+		return rc;
+	}
 
-static u64 ata_id_n_sectors(const u16 *id)
-{
-	if (ata_id_has_lba(id)) {
-		if (ata_id_has_lba48(id))
-			return ata_id_u64(id, 100);
-		else
-			return ata_id_u32(id, 60);
-	} else {
-		if (ata_id_current_chs_valid(id))
-			return ata_id_u32(id, 57);
-		else
-			return id[1] * id[3] * id[6];
+	/* nothing to do? */
+	if (native_sectors <= sectors || !ata_ignore_hpa) {
+		if (!print_info || native_sectors == sectors)
+			return 0;
+
+		if (native_sectors > sectors)
+			ata_dev_printk(dev, KERN_INFO,
+				"HPA detected: current %llu, native %llu\n",
+				(unsigned long long)sectors,
+				(unsigned long long)native_sectors);
+		else if (native_sectors < sectors)
+			ata_dev_printk(dev, KERN_WARNING,
+				"native sectors (%llu) is smaller than "
+				"sectors (%llu)\n",
+				(unsigned long long)native_sectors,
+				(unsigned long long)sectors);
+		return 0;
 	}
+
+	/* let's unlock HPA */
+	rc = ata_set_max_sectors(dev, native_sectors);
+	if (rc == -EACCES) {
+		/* if device aborted the command, skip HPA resizing */
+		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
+			       "(%llu -> %llu), skipping HPA handling\n",
+			       (unsigned long long)sectors,
+			       (unsigned long long)native_sectors);
+		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+		return 0;
+	} else if (rc)
+		return rc;
+
+	/* re-read IDENTIFY data */
+	rc = ata_dev_reread_id(dev, 0);
+	if (rc) {
+		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
+			       "data after HPA resizing\n");
+		return rc;
+	}
+
+	if (print_info) {
+		u64 new_sectors = ata_id_n_sectors(dev->id);
+		ata_dev_printk(dev, KERN_INFO,
+			"HPA unlocked: %llu -> %llu, native %llu\n",
+			(unsigned long long)sectors,
+			(unsigned long long)new_sectors,
+			(unsigned long long)native_sectors);
+	}
+
+	return 0;
 }
 
 /**
@@ -1082,7 +1339,7 @@ void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
  *	LOCKING:
  *	caller.
  */
-void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
+void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
 {
 }
 
@@ -1102,7 +1359,7 @@ void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
  *	caller.
  */
 
-void ata_std_dev_select (struct ata_port *ap, unsigned int device)
+void ata_std_dev_select(struct ata_port *ap, unsigned int device)
 {
 	u8 tmp;
 
@@ -1147,7 +1404,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
 	ap->ops->dev_select(ap, device);
 
 	if (wait) {
-		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
+		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
 			msleep(150);
 		ata_wait_idle(ap);
 	}
@@ -1223,7 +1480,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
 		 */
 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
 		if (mode < 5)	/* Valid PIO range */
-                	pio_mask = (2 << mode) - 1;
+			pio_mask = (2 << mode) - 1;
 		else
 			pio_mask = 1;
 
@@ -1283,21 +1540,15 @@ static unsigned int ata_id_xfermask(const u16 *id)
 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
 			 unsigned long delay)
 {
-	int rc;
-
-	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
-		return;
+	DPRINTK("ENTER (ap %p, data %p, delay %lu)\n", ap, data, delay);
 
-	PREPARE_WORK(&ap->port_task, fn, data);
 	ap->port_task_data = data;
 
+	/* may fail if ata_port_flush_task() in progress */
 	if (!delay)
-		rc = queue_work(ata_wq, &ap->port_task);
+		queue_work(ata_wq, &ap->port_task);
 	else
-		rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
-
-	/* rc == 0 means that another user is using port task */
-	WARN_ON(rc == 0);
+		queue_delayed_work(ata_wq, &ap->port_task, delay);
 }
 
 /**
@@ -1312,32 +1563,11 @@ void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
  */
 void ata_port_flush_task(struct ata_port *ap)
 {
-	unsigned long flags;
+	DPRINTK("ENTER (ap %p)\n", ap);
 
-	DPRINTK("ENTER\n");
-
-	spin_lock_irqsave(ap->lock, flags);
-	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
-	spin_unlock_irqrestore(ap->lock, flags);
-
-	DPRINTK("flush #1\n");
 	flush_workqueue(ata_wq);
-
-	/*
-	 * At this point, if a task is running, it's guaranteed to see
-	 * the FLUSH flag; thus, it will never queue pio tasks again.
-	 * Cancel and flush.
-	 */
-	if (!cancel_delayed_work(&ap->port_task)) {
-		if (ata_msg_ctl(ap))
-			ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
-					__FUNCTION__);
+	if (!cancel_delayed_work(&ap->port_task))
 		flush_workqueue(ata_wq);
-	}
-
-	spin_lock_irqsave(ap->lock, flags);
-	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
-	spin_unlock_irqrestore(ap->lock, flags);
 
 	if (ata_msg_ctl(ap))
 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
@@ -1356,8 +1586,9 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  *	@tf: Taskfile registers for the command and the result
  *	@cdb: CDB for packet command
  *	@dma_dir: Data tranfer direction of the command
- *	@sg: sg list for the data buffer of the command
+ *	@sgl: sg list for the data buffer of the command
  *	@n_elem: Number of sg entries
+ *	@timeout: Timeout in msecs (0 for default)
  *
  *	Executes libata internal command with timeout.  @tf contains
  *	command on entry and result on return.  Timeout and error
@@ -1373,14 +1604,16 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  */
 unsigned ata_exec_internal_sg(struct ata_device *dev,
 			      struct ata_taskfile *tf, const u8 *cdb,
-			      int dma_dir, struct scatterlist *sg,
-			      unsigned int n_elem)
+			      int dma_dir, struct scatterlist *sgl,
+			      unsigned int n_elem, unsigned long timeout)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
 	u8 command = tf->command;
 	struct ata_queued_cmd *qc;
 	unsigned int tag, preempted_tag;
 	u32 preempted_sactive, preempted_qc_active;
+	int preempted_nr_active_links;
 	DECLARE_COMPLETION_ONSTACK(wait);
 	unsigned long flags;
 	unsigned int err_mask;
@@ -1416,12 +1649,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 	qc->dev = dev;
 	ata_qc_reinit(qc);
 
-	preempted_tag = ap->active_tag;
-	preempted_sactive = ap->sactive;
+	preempted_tag = link->active_tag;
+	preempted_sactive = link->sactive;
 	preempted_qc_active = ap->qc_active;
-	ap->active_tag = ATA_TAG_POISON;
-	ap->sactive = 0;
+	preempted_nr_active_links = ap->nr_active_links;
+	link->active_tag = ATA_TAG_POISON;
+	link->sactive = 0;
 	ap->qc_active = 0;
+	ap->nr_active_links = 0;
 
 	/* prepare & issue qc */
 	qc->tf = *tf;
@@ -1431,11 +1666,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 	qc->dma_dir = dma_dir;
 	if (dma_dir != DMA_NONE) {
 		unsigned int i, buflen = 0;
+		struct scatterlist *sg;
 
-		for (i = 0; i < n_elem; i++)
-			buflen += sg[i].length;
+		for_each_sg(sgl, sg, n_elem, i)
+			buflen += sg->length;
 
-		ata_sg_init(qc, sg, n_elem);
+		ata_sg_init(qc, sgl, n_elem);
 		qc->nbytes = buflen;
 	}
 
@@ -1446,7 +1682,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
 	spin_unlock_irqrestore(ap->lock, flags);
 
-	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
+	if (!timeout)
+		timeout = ata_probe_timeout * 1000 / HZ;
+
+	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
 
 	ata_port_flush_task(ap);
 
@@ -1497,9 +1736,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 	err_mask = qc->err_mask;
 
 	ata_qc_free(qc);
-	ap->active_tag = preempted_tag;
-	ap->sactive = preempted_sactive;
+	link->active_tag = preempted_tag;
+	link->sactive = preempted_sactive;
 	ap->qc_active = preempted_qc_active;
+	ap->nr_active_links = preempted_nr_active_links;
 
 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
 	 * Until those drivers are fixed, we detect the condition
@@ -1530,6 +1770,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
  *	@dma_dir: Data tranfer direction of the command
  *	@buf: Data buffer of the command
  *	@buflen: Length of data buffer
+ *	@timeout: Timeout in msecs (0 for default)
  *
  *	Wrapper around ata_exec_internal_sg() which takes simple
  *	buffer instead of sg list.
@@ -1542,7 +1783,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
  */
 unsigned ata_exec_internal(struct ata_device *dev,
 			   struct ata_taskfile *tf, const u8 *cdb,
-			   int dma_dir, void *buf, unsigned int buflen)
+			   int dma_dir, void *buf, unsigned int buflen,
+			   unsigned long timeout)
 {
 	struct scatterlist *psg = NULL, sg;
 	unsigned int n_elem = 0;
@@ -1554,7 +1796,8 @@ unsigned ata_exec_internal(struct ata_device *dev,
 		n_elem++;
 	}
 
-	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
+	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
+				    timeout);
 }
 
 /**
@@ -1581,7 +1824,7 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
 	tf.flags |= ATA_TFLAG_DEVICE;
 	tf.protocol = ATA_PROT_NODATA;
 
-	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
 }
 
 /**
@@ -1596,7 +1839,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
 {
 	/* Controller doesn't support  IORDY. Probably a pointless check
 	   as the caller should know this */
-	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
+	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
 		return 0;
 	/* PIO3 and higher it is mandatory */
 	if (adev->pio_mode > XFER_PIO_2)
@@ -1643,6 +1886,9 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
  *	for pre-ATA4 drives.
  *
+ *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
+ *	now we abort if we hit that case.
+ *
  *	LOCKING:
  *	Kernel thread context (may sleep)
  *
@@ -1652,7 +1898,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 		    unsigned int flags, u16 *id)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	unsigned int class = *p_class;
 	struct ata_taskfile tf;
 	unsigned int err_mask = 0;
@@ -1693,7 +1939,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 	tf.flags |= ATA_TFLAG_POLLING;
 
 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
-				     id, sizeof(id[0]) * ATA_ID_WORDS);
+				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
 	if (err_mask) {
 		if (err_mask & AC_ERR_NODEV_HINT) {
 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
@@ -1747,13 +1993,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 		 * SET_FEATURES spin-up subcommand before it will accept
 		 * anything other than the original IDENTIFY command.
 		 */
-		ata_tf_init(dev, &tf);
-		tf.command = ATA_CMD_SET_FEATURES;
-		tf.feature = SETFEATURES_SPINUP;
-		tf.protocol = ATA_PROT_NODATA;
-		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
-		if (err_mask) {
+		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
+		if (err_mask && id[2] != 0x738c) {
 			rc = -EIO;
 			reason = "SPINUP failed";
 			goto err_out;
@@ -1770,10 +2011,13 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 		/*
 		 * The exact sequence expected by certain pre-ATA4 drives is:
 		 * SRST RESET
-		 * IDENTIFY
-		 * INITIALIZE DEVICE PARAMETERS
+		 * IDENTIFY (optional in early ATA)
+		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
 		 * anything else..
 		 * Some drives were very specific about that exact sequence.
+		 *
+		 * Note that ATA4 says lba is mandatory so the second check
+		 * shoud never trigger.
 		 */
 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
@@ -1804,20 +2048,21 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 
 static inline u8 ata_dev_knobble(struct ata_device *dev)
 {
-	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
+	struct ata_port *ap = dev->link->ap;
+	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
 }
 
 static void ata_dev_config_ncq(struct ata_device *dev,
 			       char *desc, size_t desc_sz)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
 
 	if (!ata_id_has_ncq(dev->id)) {
 		desc[0] = '\0';
 		return;
 	}
-	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
+	if (dev->horkage & ATA_HORKAGE_NONCQ) {
 		snprintf(desc, desc_sz, "NCQ (not used)");
 		return;
 	}
@@ -1847,8 +2092,9 @@ static void ata_dev_config_ncq(struct ata_device *dev,
  */
 int ata_dev_configure(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
-	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
+	struct ata_port *ap = dev->link->ap;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
 	const u16 *id = dev->id;
 	unsigned int xfer_mask;
 	char revbuf[7];		/* XYZ-99\0 */
@@ -1865,15 +2111,18 @@ int ata_dev_configure(struct ata_device *dev)
 	if (ata_msg_probe(ap))
 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
 
-	/* set _SDD */
-	rc = ata_acpi_push_id(dev);
-	if (rc) {
-		ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
-			rc);
-	}
+	/* set horkage */
+	dev->horkage |= ata_dev_blacklisted(dev);
 
-	/* retrieve and execute the ATA task file of _GTF */
-	ata_acpi_exec_tfs(ap);
+	/* let ACPI work its magic */
+	rc = ata_acpi_on_devcfg(dev);
+	if (rc)
+		return rc;
+
+	/* massage HPA, do it early as it might change IDENTIFY data */
+	rc = ata_hpa_resize(dev);
+	if (rc)
+		return rc;
 
 	/* print device capabilities */
 	if (ata_msg_probe(ap))
@@ -1903,6 +2152,13 @@ int ata_dev_configure(struct ata_device *dev)
 	if (ata_msg_probe(ap))
 		ata_dump_id(id);
 
+	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
+	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
+			sizeof(fwrevbuf));
+
+	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
+			sizeof(modelbuf));
+
 	/* ATA-specific feature tests */
 	if (dev->class == ATA_DEV_ATA) {
 		if (ata_id_is_cfa(id)) {
@@ -1911,19 +2167,11 @@ int ata_dev_configure(struct ata_device *dev)
 					       "supports DRM functions and may "
 					       "not be fully accessable.\n");
 			snprintf(revbuf, 7, "CFA");
-		}
-		else
-			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
+		} else
+			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
 
 		dev->n_sectors = ata_id_n_sectors(id);
 
-		/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
-		ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
-				sizeof(fwrevbuf));
-
-		ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
-				sizeof(modelbuf));
-
 		if (dev->id[59] & 0x100)
 			dev->multi_count = dev->id[59] & 0xff;
 
@@ -1942,9 +2190,6 @@ int ata_dev_configure(struct ata_device *dev)
 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
 			}
 
-			if (ata_id_hpa_enabled(dev->id))
-				dev->n_sectors = ata_hpa_resize(dev);
-
 			/* config NCQ */
 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
 
@@ -1993,7 +2238,9 @@ int ata_dev_configure(struct ata_device *dev)
 
 	/* ATAPI-specific feature tests */
 	else if (dev->class == ATA_DEV_ATAPI) {
-		char *cdb_intr_string = "";
+		const char *cdb_intr_string = "";
+		const char *atapi_an_string = "";
+		u32 sntf;
 
 		rc = atapi_cdb_len(id);
 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
@@ -2005,6 +2252,29 @@ int ata_dev_configure(struct ata_device *dev)
 		}
 		dev->cdb_len = (unsigned int) rc;
 
+		/* Enable ATAPI AN if both the host and device have
+		 * the support.  If PMP is attached, SNTF is required
+		 * to enable ATAPI AN to discern between PHY status
+		 * changed notifications and ATAPI ANs.
+		 */
+		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+		    (!ap->nr_pmp_links ||
+		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
+			unsigned int err_mask;
+
+			/* issue SET feature command to turn this on */
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_ENABLE, SATA_AN);
+			if (err_mask)
+				ata_dev_printk(dev, KERN_ERR,
+					"failed to enable ATAPI AN "
+					"(err_mask=0x%x)\n", err_mask);
+			else {
+				dev->flags |= ATA_DFLAG_AN;
+				atapi_an_string = ", ATAPI AN";
+			}
+		}
+
 		if (ata_id_cdb_intr(dev->id)) {
 			dev->flags |= ATA_DFLAG_CDB_INTR;
 			cdb_intr_string = ", CDB intr";
@@ -2012,9 +2282,11 @@ int ata_dev_configure(struct ata_device *dev)
 
 		/* print device info to dmesg */
 		if (ata_msg_drv(ap) && print_info)
-			ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
+			ata_dev_printk(dev, KERN_INFO,
+				       "ATAPI: %s, %s, max %s%s%s\n",
+				       modelbuf, fwrevbuf,
 				       ata_mode_string(xfer_mask),
-				       cdb_intr_string);
+				       cdb_intr_string, atapi_an_string);
 	}
 
 	/* determine max_sectors */
@@ -2022,11 +2294,18 @@ int ata_dev_configure(struct ata_device *dev)
 	if (dev->flags & ATA_DFLAG_LBA48)
 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
 
+	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
+		if (ata_id_has_hipm(dev->id))
+			dev->flags |= ATA_DFLAG_HIPM;
+		if (ata_id_has_dipm(dev->id))
+			dev->flags |= ATA_DFLAG_DIPM;
+	}
+
 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
 		/* Let the user know. We don't want to disallow opens for
 		   rescue purposes, or in case the vendor is just a blithering
 		   idiot */
-                if (print_info) {
+		if (print_info) {
 			ata_dev_printk(dev, KERN_WARNING,
 "Drive reports diagnostics failure. This may indicate a drive\n");
 			ata_dev_printk(dev, KERN_WARNING,
@@ -2043,10 +2322,23 @@ int ata_dev_configure(struct ata_device *dev)
 		dev->max_sectors = ATA_MAX_SECTORS;
 	}
 
-	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
+	if ((dev->class == ATA_DEV_ATAPI) &&
+	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
+		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
+		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+	}
+
+	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
 					 dev->max_sectors);
 
+	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
+		dev->horkage |= ATA_HORKAGE_IPM;
+
+		/* reset link pm_policy for this port to no pm */
+		ap->pm_policy = MAX_PERFORMANCE;
+	}
+
 	if (ap->ops->dev_config)
 		ap->ops->dev_config(dev);
 
@@ -2131,21 +2423,38 @@ int ata_bus_probe(struct ata_port *ap)
 {
 	unsigned int classes[ATA_MAX_DEVICES];
 	int tries[ATA_MAX_DEVICES];
-	int i, rc;
+	int rc;
 	struct ata_device *dev;
 
 	ata_port_probe(ap);
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		tries[i] = ATA_PROBE_MAX_TRIES;
+	ata_link_for_each_dev(dev, &ap->link)
+		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
 
  retry:
+	ata_link_for_each_dev(dev, &ap->link) {
+		/* If we issue an SRST then an ATA drive (not ATAPI)
+		 * may change configuration and be in PIO0 timing. If
+		 * we do a hard reset (or are coming from power on)
+		 * this is true for ATA or ATAPI. Until we've set a
+		 * suitable controller mode we should not touch the
+		 * bus as we may be talking too fast.
+		 */
+		dev->pio_mode = XFER_PIO_0;
+
+		/* If the controller has a pio mode setup function
+		 * then use it to set the chipset to rights. Don't
+		 * touch the DMA setup as that will be dealt with when
+		 * configuring devices.
+		 */
+		if (ap->ops->set_piomode)
+			ap->ops->set_piomode(ap, dev);
+	}
+
 	/* reset and determine device classes */
 	ap->ops->phy_reset(ap);
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
-
+	ata_link_for_each_dev(dev, &ap->link) {
 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
 		    dev->class != ATA_DEV_UNKNOWN)
 			classes[dev->devno] = dev->class;
@@ -2157,21 +2466,13 @@ int ata_bus_probe(struct ata_port *ap)
 
 	ata_port_probe(ap);
 
-	/* after the reset the device state is PIO 0 and the controller
-	   state is undefined. Record the mode */
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		ap->device[i].pio_mode = XFER_PIO_0;
-
 	/* read IDENTIFY page and configure devices. We have to do the identify
 	   specific sequence bass-ackwards so that PDIAG- is released by
 	   the slave device */
 
-	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
-		dev = &ap->device[i];
-
-		if (tries[i])
-			dev->class = classes[i];
+	ata_link_for_each_dev(dev, &ap->link) {
+		if (tries[dev->devno])
+			dev->class = classes[dev->devno];
 
 		if (!ata_dev_enabled(dev))
 			continue;
@@ -2186,33 +2487,42 @@ int ata_bus_probe(struct ata_port *ap)
 	if (ap->ops->cable_detect)
 		ap->cbl = ap->ops->cable_detect(ap);
 
+	/* We may have SATA bridge glue hiding here irrespective of the
+	   reported cable types and sensed types */
+	ata_link_for_each_dev(dev, &ap->link) {
+		if (!ata_dev_enabled(dev))
+			continue;
+		/* SATA drives indicate we have a bridge. We don't know which
+		   end of the link the bridge is which is a problem */
+		if (ata_id_is_sata(dev->id))
+			ap->cbl = ATA_CBL_SATA;
+	}
+
 	/* After the identify sequence we can now set up the devices. We do
 	   this in the normal order so that the user doesn't get confused */
 
-	for(i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
+	ata_link_for_each_dev(dev, &ap->link) {
 		if (!ata_dev_enabled(dev))
 			continue;
 
-		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
+		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
 		rc = ata_dev_configure(dev);
-		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
+		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
 		if (rc)
 			goto fail;
 	}
 
 	/* configure transfer mode */
-	rc = ata_set_mode(ap, &dev);
+	rc = ata_set_mode(&ap->link, &dev);
 	if (rc)
 		goto fail;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		if (ata_dev_enabled(&ap->device[i]))
+	ata_link_for_each_dev(dev, &ap->link)
+		if (ata_dev_enabled(dev))
 			return 0;
 
 	/* no device present, disable port */
 	ata_port_disable(ap);
-	ap->ops->port_disable(ap);
 	return -ENODEV;
 
  fail:
@@ -2232,7 +2542,7 @@ int ata_bus_probe(struct ata_port *ap)
 			/* This is the last chance, better to slow
 			 * down than lose it.
 			 */
-			sata_down_spd_limit(ap);
+			sata_down_spd_limit(&ap->link);
 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
 		}
 	}
@@ -2261,108 +2571,34 @@ void ata_port_probe(struct ata_port *ap)
 
 /**
  *	sata_print_link_status - Print SATA link status
- *	@ap: SATA port to printk link status about
+ *	@link: SATA link to printk link status about
  *
  *	This function prints link speed and status of a SATA link.
  *
  *	LOCKING:
  *	None.
  */
-void sata_print_link_status(struct ata_port *ap)
+void sata_print_link_status(struct ata_link *link)
 {
 	u32 sstatus, scontrol, tmp;
 
-	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
+	if (sata_scr_read(link, SCR_STATUS, &sstatus))
 		return;
-	sata_scr_read(ap, SCR_CONTROL, &scontrol);
+	sata_scr_read(link, SCR_CONTROL, &scontrol);
 
-	if (ata_port_online(ap)) {
+	if (ata_link_online(link)) {
 		tmp = (sstatus >> 4) & 0xf;
-		ata_port_printk(ap, KERN_INFO,
+		ata_link_printk(link, KERN_INFO,
 				"SATA link up %s (SStatus %X SControl %X)\n",
 				sata_spd_string(tmp), sstatus, scontrol);
 	} else {
-		ata_port_printk(ap, KERN_INFO,
+		ata_link_printk(link, KERN_INFO,
 				"SATA link down (SStatus %X SControl %X)\n",
 				sstatus, scontrol);
 	}
 }
 
 /**
- *	__sata_phy_reset - Wake/reset a low-level SATA PHY
- *	@ap: SATA port associated with target SATA PHY.
- *
- *	This function issues commands to standard SATA Sxxx
- *	PHY registers, to wake up the phy (and device), and
- *	clear any reset condition.
- *
- *	LOCKING:
- *	PCI/etc. bus probe sem.
- *
- */
-void __sata_phy_reset(struct ata_port *ap)
-{
-	u32 sstatus;
-	unsigned long timeout = jiffies + (HZ * 5);
-
-	if (ap->flags & ATA_FLAG_SATA_RESET) {
-		/* issue phy wake/reset */
-		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
-		/* Couldn't find anything in SATA I/II specs, but
-		 * AHCI-1.1 10.4.2 says at least 1 ms. */
-		mdelay(1);
-	}
-	/* phy wake/clear reset */
-	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
-
-	/* wait for phy to become ready, if necessary */
-	do {
-		msleep(200);
-		sata_scr_read(ap, SCR_STATUS, &sstatus);
-		if ((sstatus & 0xf) != 1)
-			break;
-	} while (time_before(jiffies, timeout));
-
-	/* print link status */
-	sata_print_link_status(ap);
-
-	/* TODO: phy layer with polling, timeouts, etc. */
-	if (!ata_port_offline(ap))
-		ata_port_probe(ap);
-	else
-		ata_port_disable(ap);
-
-	if (ap->flags & ATA_FLAG_DISABLED)
-		return;
-
-	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
-		ata_port_disable(ap);
-		return;
-	}
-
-	ap->cbl = ATA_CBL_SATA;
-}
-
-/**
- *	sata_phy_reset - Reset SATA bus.
- *	@ap: SATA port associated with target SATA PHY.
- *
- *	This function resets the SATA bus, and then probes
- *	the bus for devices.
- *
- *	LOCKING:
- *	PCI/etc. bus probe sem.
- *
- */
-void sata_phy_reset(struct ata_port *ap)
-{
-	__sata_phy_reset(ap);
-	if (ap->flags & ATA_FLAG_DISABLED)
-		return;
-	ata_bus_reset(ap);
-}
-
-/**
  *	ata_dev_pair		-	return other device on cable
  *	@adev: device
  *
@@ -2372,8 +2608,8 @@ void sata_phy_reset(struct ata_port *ap)
 
 struct ata_device *ata_dev_pair(struct ata_device *adev)
 {
-	struct ata_port *ap = adev->ap;
-	struct ata_device *pair = &ap->device[1 - adev->devno];
+	struct ata_link *link = adev->link;
+	struct ata_device *pair = &link->device[1 - adev->devno];
 	if (!ata_dev_enabled(pair))
 		return NULL;
 	return pair;
@@ -2394,16 +2630,16 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
 
 void ata_port_disable(struct ata_port *ap)
 {
-	ap->device[0].class = ATA_DEV_NONE;
-	ap->device[1].class = ATA_DEV_NONE;
+	ap->link.device[0].class = ATA_DEV_NONE;
+	ap->link.device[1].class = ATA_DEV_NONE;
 	ap->flags |= ATA_FLAG_DISABLED;
 }
 
 /**
  *	sata_down_spd_limit - adjust SATA spd limit downward
- *	@ap: Port to adjust SATA spd limit for
+ *	@link: Link to adjust SATA spd limit for
  *
- *	Adjust SATA spd limit of @ap downward.  Note that this
+ *	Adjust SATA spd limit of @link downward.  Note that this
  *	function only adjusts the limit.  The change must be applied
  *	using sata_set_spd().
  *
@@ -2413,58 +2649,82 @@ void ata_port_disable(struct ata_port *ap)
  *	RETURNS:
  *	0 on success, negative errno on failure
  */
-int sata_down_spd_limit(struct ata_port *ap)
+int sata_down_spd_limit(struct ata_link *link)
 {
 	u32 sstatus, spd, mask;
 	int rc, highbit;
 
-	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
-	if (rc)
-		return rc;
+	if (!sata_scr_valid(link))
+		return -EOPNOTSUPP;
+
+	/* If SCR can be read, use it to determine the current SPD.
+	 * If not, use cached value in link->sata_spd.
+	 */
+	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+	if (rc == 0)
+		spd = (sstatus >> 4) & 0xf;
+	else
+		spd = link->sata_spd;
 
-	mask = ap->sata_spd_limit;
+	mask = link->sata_spd_limit;
 	if (mask <= 1)
 		return -EINVAL;
+
+	/* unconditionally mask off the highest bit */
 	highbit = fls(mask) - 1;
 	mask &= ~(1 << highbit);
 
-	spd = (sstatus >> 4) & 0xf;
-	if (spd <= 1)
-		return -EINVAL;
-	spd--;
-	mask &= (1 << spd) - 1;
+	/* Mask off all speeds higher than or equal to the current
+	 * one.  Force 1.5Gbps if current SPD is not available.
+	 */
+	if (spd > 1)
+		mask &= (1 << (spd - 1)) - 1;
+	else
+		mask &= 1;
+
+	/* were we already at the bottom? */
 	if (!mask)
 		return -EINVAL;
 
-	ap->sata_spd_limit = mask;
+	link->sata_spd_limit = mask;
 
-	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
+	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
 			sata_spd_string(fls(mask)));
 
 	return 0;
 }
 
-static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
+static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
 {
-	u32 spd, limit;
+	struct ata_link *host_link = &link->ap->link;
+	u32 limit, target, spd;
+
+	limit = link->sata_spd_limit;
 
-	if (ap->sata_spd_limit == UINT_MAX)
-		limit = 0;
+	/* Don't configure downstream link faster than upstream link.
+	 * It doesn't speed up anything and some PMPs choke on such
+	 * configuration.
+	 */
+	if (!ata_is_host_link(link) && host_link->sata_spd)
+		limit &= (1 << host_link->sata_spd) - 1;
+
+	if (limit == UINT_MAX)
+		target = 0;
 	else
-		limit = fls(ap->sata_spd_limit);
+		target = fls(limit);
 
 	spd = (*scontrol >> 4) & 0xf;
-	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
+	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
 
-	return spd != limit;
+	return spd != target;
 }
 
 /**
  *	sata_set_spd_needed - is SATA spd configuration needed
- *	@ap: Port in question
+ *	@link: Link in question
  *
  *	Test whether the spd limit in SControl matches
- *	@ap->sata_spd_limit.  This function is used to determine
+ *	@link->sata_spd_limit.  This function is used to determine
  *	whether hardreset is necessary to apply SATA spd
  *	configuration.
  *
@@ -2474,21 +2734,21 @@ static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
  *	RETURNS:
  *	1 if SATA spd configuration is needed, 0 otherwise.
  */
-int sata_set_spd_needed(struct ata_port *ap)
+int sata_set_spd_needed(struct ata_link *link)
 {
 	u32 scontrol;
 
-	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
-		return 0;
+	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+		return 1;
 
-	return __sata_set_spd_needed(ap, &scontrol);
+	return __sata_set_spd_needed(link, &scontrol);
 }
 
 /**
  *	sata_set_spd - set SATA spd according to spd limit
- *	@ap: Port to set SATA spd for
+ *	@link: Link to set SATA spd for
  *
- *	Set SATA spd of @ap according to sata_spd_limit.
+ *	Set SATA spd of @link according to sata_spd_limit.
  *
  *	LOCKING:
  *	Inherited from caller.
@@ -2497,18 +2757,18 @@ int sata_set_spd_needed(struct ata_port *ap)
  *	0 if spd doesn't need to be changed, 1 if spd has been
  *	changed.  Negative errno if SCR registers are inaccessible.
  */
-int sata_set_spd(struct ata_port *ap)
+int sata_set_spd(struct ata_link *link)
 {
 	u32 scontrol;
 	int rc;
 
-	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 		return rc;
 
-	if (!__sata_set_spd_needed(ap, &scontrol))
+	if (!__sata_set_spd_needed(link, &scontrol))
 		return 0;
 
-	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 		return rc;
 
 	return 1;
@@ -2563,8 +2823,8 @@ static const struct ata_timing ata_timing[] = {
 	{ 0xFF }
 };
 
-#define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
-#define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
+#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
+#define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
 
 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
 {
@@ -2591,7 +2851,7 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
 }
 
-static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
+static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
 {
 	const struct ata_timing *t;
 
@@ -2623,10 +2883,10 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
 
 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
 		memset(&p, 0, sizeof(p));
-		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
-		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
+		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
 		}
 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
@@ -2763,7 +3023,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
 
 static int ata_dev_set_mode(struct ata_device *dev)
 {
-	struct ata_eh_context *ehc = &dev->ap->eh_context;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
 	unsigned int err_mask;
 	int rc;
 
@@ -2772,9 +3032,23 @@ static int ata_dev_set_mode(struct ata_device *dev)
 		dev->flags |= ATA_DFLAG_PIO;
 
 	err_mask = ata_dev_set_xfermode(dev);
+
 	/* Old CFA may refuse this command, which is just fine */
 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
-        	err_mask &= ~AC_ERR_DEV;
+		err_mask &= ~AC_ERR_DEV;
+
+	/* Some very old devices and some bad newer ones fail any kind of
+	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
+	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
+			dev->pio_mode <= XFER_PIO_2)
+		err_mask &= ~AC_ERR_DEV;
+
+	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
+	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
+	if (dev->xfer_shift == ATA_SHIFT_MWDMA && 
+	    dev->dma_mode == XFER_MW_DMA_0 &&
+	    (dev->id[63] >> 8) & 1)
+		err_mask &= ~AC_ERR_DEV;
 
 	if (err_mask) {
 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
@@ -2783,7 +3057,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
 	}
 
 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
-	rc = ata_dev_revalidate(dev, 0);
+	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
 	if (rc)
 		return rc;
@@ -2798,7 +3072,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
 
 /**
  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
- *	@ap: port on which timings will be programmed
+ *	@link: link on which timings will be programmed
  *	@r_failed_dev: out paramter for failed device
  *
  *	Standard implementation of the function used to tune and set
@@ -2813,25 +3087,36 @@ static int ata_dev_set_mode(struct ata_device *dev)
  *	0 on success, negative errno otherwise
  */
 
-int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
 {
+	struct ata_port *ap = link->ap;
 	struct ata_device *dev;
-	int i, rc = 0, used_dma = 0, found = 0;
-
+	int rc = 0, used_dma = 0, found = 0;
 
 	/* step 1: calculate xfer_mask */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+	ata_link_for_each_dev(dev, link) {
 		unsigned int pio_mask, dma_mask;
-
-		dev = &ap->device[i];
+		unsigned int mode_mask;
 
 		if (!ata_dev_enabled(dev))
 			continue;
 
+		mode_mask = ATA_DMA_MASK_ATA;
+		if (dev->class == ATA_DEV_ATAPI)
+			mode_mask = ATA_DMA_MASK_ATAPI;
+		else if (ata_id_is_cfa(dev->id))
+			mode_mask = ATA_DMA_MASK_CFA;
+
 		ata_dev_xfermask(dev);
 
 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+
+		if (libata_dma_mask & mode_mask)
+			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+		else
+			dma_mask = 0;
+
 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
 
@@ -2843,8 +3128,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
 		goto out;
 
 	/* step 2: always set host PIO timings */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (!ata_dev_enabled(dev))
 			continue;
 
@@ -2861,9 +3145,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
 	}
 
 	/* step 3: set host DMA timings */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
-
+	ata_link_for_each_dev(dev, link) {
 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
 			continue;
 
@@ -2874,9 +3156,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
 	}
 
 	/* step 4: update devices' xfer mode */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
-
+	ata_link_for_each_dev(dev, link) {
 		/* don't update suspended devices' xfer mode */
 		if (!ata_dev_enabled(dev))
 			continue;
@@ -2900,7 +3180,7 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
 
 /**
  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
- *	@ap: port on which timings will be programmed
+ *	@link: link on which timings will be programmed
  *	@r_failed_dev: out paramter for failed device
  *
  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
@@ -2913,12 +3193,14 @@ int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
 {
+	struct ata_port *ap = link->ap;
+
 	/* has private set_mode? */
 	if (ap->ops->set_mode)
-		return ap->ops->set_mode(ap, r_failed_dev);
-	return ata_do_set_mode(ap, r_failed_dev);
+		return ap->ops->set_mode(link, r_failed_dev);
+	return ata_do_set_mode(link, r_failed_dev);
 }
 
 /**
@@ -2997,6 +3279,61 @@ int ata_busy_sleep(struct ata_port *ap,
 }
 
 /**
+ *	ata_wait_after_reset - wait before checking status after reset
+ *	@ap: port containing status register to be polled
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	After reset, we need to pause a while before reading status.
+ *	Also, certain combination of controller and device report 0xff
+ *	for some duration (e.g. until SATA PHY is up and running)
+ *	which is interpreted as empty port in ATA world.  This
+ *	function also waits for such devices to get out of 0xff
+ *	status.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
+{
+	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
+
+	if (time_before(until, deadline))
+		deadline = until;
+
+	/* Spec mandates ">= 2ms" before checking status.  We wait
+	 * 150ms, because that was the magic delay used for ATAPI
+	 * devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 *
+	 * Old drivers/ide uses the 2mS rule and then waits for ready.
+	 */
+	msleep(150);
+
+	/* Wait for 0xff to clear.  Some SATA devices take a long time
+	 * to clear 0xff after reset.  For example, HHD424020F7SV00
+	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
+	 * than that.
+	 *
+	 * Note that some PATA controllers (pata_ali) explode if
+	 * status register is read more than once when there's no
+	 * device attached.
+	 */
+	if (ap->flags & ATA_FLAG_SATA) {
+		while (1) {
+			u8 status = ata_chk_status(ap);
+
+			if (status != 0xff || time_after(jiffies, deadline))
+				return;
+
+			msleep(50);
+		}
+	}
+}
+
+/**
  *	ata_wait_ready - sleep until BSY clears, or timeout
  *	@ap: port containing status register to be polled
  *	@deadline: deadline jiffies for the operation
@@ -3021,7 +3358,7 @@ int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
 
 		if (!(status & ATA_BUSY))
 			return 0;
-		if (!ata_port_online(ap) && status == 0xff)
+		if (!ata_link_online(&ap->link) && status == 0xff)
 			return -ENODEV;
 		if (time_after(now, deadline))
 			return -EBUSY;
@@ -3112,23 +3449,14 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
 	udelay(20);	/* FIXME: flush */
 	iowrite8(ap->ctl, ioaddr->ctl_addr);
 
-	/* spec mandates ">= 2ms" before checking status.
-	 * We wait 150ms, because that was the magic delay used for
-	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
-	 * between when the ATA command register is written, and then
-	 * status is checked.  Because waiting for "a while" before
-	 * checking status is fine, post SRST, we perform this magic
-	 * delay here as well.
-	 *
-	 * Old drivers/ide uses the 2mS rule and then waits for ready
-	 */
-	msleep(150);
+	/* wait a while before checking status */
+	ata_wait_after_reset(ap, deadline);
 
 	/* Before we perform post reset processing we want to see if
 	 * the bus shows 0xFF because the odd clown forgets the D7
 	 * pulldown resistor.
 	 */
-	if (ata_check_status(ap) == 0xFF)
+	if (ata_chk_status(ap) == 0xFF)
 		return -ENODEV;
 
 	return ata_bus_post_reset(ap, devmask, deadline);
@@ -3156,6 +3484,7 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
 
 void ata_bus_reset(struct ata_port *ap)
 {
+	struct ata_device *device = ap->link.device;
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
 	u8 err;
@@ -3191,22 +3520,19 @@ void ata_bus_reset(struct ata_port *ap)
 	/*
 	 * determine by signature whether we have ATA or ATAPI devices
 	 */
-	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
+	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
 	if ((slave_possible) && (err != 0x81))
-		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
-
-	/* re-enable interrupts */
-	ap->ops->irq_on(ap);
+		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
 
 	/* is double-select really necessary? */
-	if (ap->device[1].class != ATA_DEV_NONE)
+	if (device[1].class != ATA_DEV_NONE)
 		ap->ops->dev_select(ap, 1);
-	if (ap->device[0].class != ATA_DEV_NONE)
+	if (device[0].class != ATA_DEV_NONE)
 		ap->ops->dev_select(ap, 0);
 
 	/* if no devices were detected, disable this port */
-	if ((ap->device[0].class == ATA_DEV_NONE) &&
-	    (ap->device[1].class == ATA_DEV_NONE))
+	if ((device[0].class == ATA_DEV_NONE) &&
+	    (device[1].class == ATA_DEV_NONE))
 		goto err_out;
 
 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
@@ -3219,18 +3545,18 @@ void ata_bus_reset(struct ata_port *ap)
 
 err_out:
 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
-	ap->ops->port_disable(ap);
+	ata_port_disable(ap);
 
 	DPRINTK("EXIT\n");
 }
 
 /**
- *	sata_phy_debounce - debounce SATA phy status
- *	@ap: ATA port to debounce SATA phy status for
+ *	sata_link_debounce - debounce SATA phy status
+ *	@link: ATA link to debounce SATA phy status for
  *	@params: timing parameters { interval, duratinon, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *
- *	Make sure SStatus of @ap reaches stable state, determined by
+*	Make sure SStatus of @link reaches stable state, determined by
  *	holding the same value where DET is not 1 for @duration polled
  *	every @interval, before @timeout.  Timeout constraints the
  *	beginning of the stable state.  Because DET gets stuck at 1 on
@@ -3246,8 +3572,8 @@ err_out:
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
-		      unsigned long deadline)
+int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+		       unsigned long deadline)
 {
 	unsigned long interval_msec = params[0];
 	unsigned long duration = msecs_to_jiffies(params[1]);
@@ -3259,7 +3585,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
 	if (time_before(t, deadline))
 		deadline = t;
 
-	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
+	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
 		return rc;
 	cur &= 0xf;
 
@@ -3268,7 +3594,7 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
 
 	while (1) {
 		msleep(interval_msec);
-		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
+		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
 			return rc;
 		cur &= 0xf;
 
@@ -3285,19 +3611,21 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
 		last = cur;
 		last_jiffies = jiffies;
 
-		/* check deadline */
+		/* Check deadline.  If debouncing failed, return
+		 * -EPIPE to tell upper layer to lower link speed.
+		 */
 		if (time_after(jiffies, deadline))
-			return -EBUSY;
+			return -EPIPE;
 	}
 }
 
 /**
- *	sata_phy_resume - resume SATA phy
- *	@ap: ATA port to resume SATA phy for
+ *	sata_link_resume - resume SATA link
+ *	@link: ATA link to resume SATA
  *	@params: timing parameters { interval, duratinon, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *
- *	Resume SATA phy of @ap and debounce it.
+ *	Resume SATA phy @link and debounce it.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
@@ -3305,18 +3633,18 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
-		    unsigned long deadline)
+int sata_link_resume(struct ata_link *link, const unsigned long *params,
+		     unsigned long deadline)
 {
 	u32 scontrol;
 	int rc;
 
-	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 		return rc;
 
 	scontrol = (scontrol & 0x0f0) | 0x300;
 
-	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 		return rc;
 
 	/* Some PHYs react badly if SStatus is pounded immediately
@@ -3324,15 +3652,15 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
 	 */
 	msleep(200);
 
-	return sata_phy_debounce(ap, params, deadline);
+	return sata_link_debounce(link, params, deadline);
 }
 
 /**
  *	ata_std_prereset - prepare for reset
- *	@ap: ATA port to be reset
+ *	@link: ATA link to be reset
  *	@deadline: deadline jiffies for the operation
  *
- *	@ap is about to be reset.  Initialize it.  Failure from
+ *	@link is about to be reset.  Initialize it.  Failure from
  *	prereset makes libata abort whole reset sequence and give up
  *	that port, so prereset should be best-effort.  It does its
  *	best to prepare for reset sequence but if things go wrong, it
@@ -3344,37 +3672,44 @@ int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
+int ata_std_prereset(struct ata_link *link, unsigned long deadline)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
 	int rc;
 
 	/* handle link resume */
 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
-	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
+	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
+		ehc->i.action |= ATA_EH_HARDRESET;
+
+	/* Some PMPs don't work with only SRST, force hardreset if PMP
+	 * is supported.
+	 */
+	if (ap->flags & ATA_FLAG_PMP)
 		ehc->i.action |= ATA_EH_HARDRESET;
 
 	/* if we're about to do hardreset, nothing more to do */
 	if (ehc->i.action & ATA_EH_HARDRESET)
 		return 0;
 
-	/* if SATA, resume phy */
-	if (ap->cbl == ATA_CBL_SATA) {
-		rc = sata_phy_resume(ap, timing, deadline);
+	/* if SATA, resume link */
+	if (ap->flags & ATA_FLAG_SATA) {
+		rc = sata_link_resume(link, timing, deadline);
 		/* whine about phy resume failure but proceed */
 		if (rc && rc != -EOPNOTSUPP)
-			ata_port_printk(ap, KERN_WARNING, "failed to resume "
+			ata_link_printk(link, KERN_WARNING, "failed to resume "
 					"link for reset (errno=%d)\n", rc);
 	}
 
 	/* Wait for !BSY if the controller can wait for the first D2H
 	 * Reg FIS and we don't know that no device is attached.
 	 */
-	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
+	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
 		rc = ata_wait_ready(ap, deadline);
 		if (rc && rc != -ENODEV) {
-			ata_port_printk(ap, KERN_WARNING, "device not ready "
+			ata_link_printk(link, KERN_WARNING, "device not ready "
 					"(errno=%d), forcing hardreset\n", rc);
 			ehc->i.action |= ATA_EH_HARDRESET;
 		}
@@ -3385,7 +3720,7 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
 
 /**
  *	ata_std_softreset - reset host port via ATA SRST
- *	@ap: port to reset
+ *	@link: ATA link to reset
  *	@classes: resulting classes of attached devices
  *	@deadline: deadline jiffies for the operation
  *
@@ -3397,9 +3732,10 @@ int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
+int ata_std_softreset(struct ata_link *link, unsigned int *classes,
 		      unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
 	unsigned int devmask = 0;
 	int rc;
@@ -3407,7 +3743,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
 
 	DPRINTK("ENTER\n");
 
-	if (ata_port_offline(ap)) {
+	if (ata_link_offline(link)) {
 		classes[0] = ATA_DEV_NONE;
 		goto out;
 	}
@@ -3425,15 +3761,17 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
 	DPRINTK("about to softreset, devmask=%x\n", devmask);
 	rc = ata_bus_softreset(ap, devmask, deadline);
 	/* if link is occupied, -ENODEV too is an error */
-	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
-		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
 		return rc;
 	}
 
 	/* determine by signature whether we have ATA or ATAPI devices */
-	classes[0] = ata_dev_try_classify(ap, 0, &err);
+	classes[0] = ata_dev_try_classify(&link->device[0],
+					  devmask & (1 << 0), &err);
 	if (slave_possible && err != 0x81)
-		classes[1] = ata_dev_try_classify(ap, 1, &err);
+		classes[1] = ata_dev_try_classify(&link->device[1],
+						  devmask & (1 << 1), &err);
 
  out:
 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
@@ -3441,12 +3779,12 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
 }
 
 /**
- *	sata_port_hardreset - reset port via SATA phy reset
- *	@ap: port to reset
+ *	sata_link_hardreset - reset link via SATA phy reset
+ *	@link: link to reset
  *	@timing: timing parameters { interval, duratinon, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *
- *	SATA phy-reset host port using DET bits of SControl register.
+ *	SATA phy-reset @link using DET bits of SControl register.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep)
@@ -3454,7 +3792,7 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
+int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
 			unsigned long deadline)
 {
 	u32 scontrol;
@@ -3462,30 +3800,30 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
 
 	DPRINTK("ENTER\n");
 
-	if (sata_set_spd_needed(ap)) {
+	if (sata_set_spd_needed(link)) {
 		/* SATA spec says nothing about how to reconfigure
 		 * spd.  To be on the safe side, turn off phy during
 		 * reconfiguration.  This works for at least ICH7 AHCI
 		 * and Sil3124.
 		 */
-		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 			goto out;
 
 		scontrol = (scontrol & 0x0f0) | 0x304;
 
-		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
+		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
 			goto out;
 
-		sata_set_spd(ap);
+		sata_set_spd(link);
 	}
 
 	/* issue phy wake/reset */
-	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
+	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
 		goto out;
 
 	scontrol = (scontrol & 0x0f0) | 0x301;
 
-	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
+	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
 		goto out;
 
 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
@@ -3493,8 +3831,8 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
 	 */
 	msleep(1);
 
-	/* bring phy back */
-	rc = sata_phy_resume(ap, timing, deadline);
+	/* bring link back */
+	rc = sata_link_resume(link, timing, deadline);
  out:
 	DPRINTK("EXIT, rc=%d\n", rc);
 	return rc;
@@ -3502,7 +3840,7 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
 
 /**
  *	sata_std_hardreset - reset host port via SATA phy reset
- *	@ap: port to reset
+ *	@link: link to reset
  *	@class: resulting class of attached device
  *	@deadline: deadline jiffies for the operation
  *
@@ -3515,43 +3853,54 @@ int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
 		       unsigned long deadline)
 {
-	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+	struct ata_port *ap = link->ap;
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
 	int rc;
 
 	DPRINTK("ENTER\n");
 
 	/* do hardreset */
-	rc = sata_port_hardreset(ap, timing, deadline);
+	rc = sata_link_hardreset(link, timing, deadline);
 	if (rc) {
-		ata_port_printk(ap, KERN_ERR,
+		ata_link_printk(link, KERN_ERR,
 				"COMRESET failed (errno=%d)\n", rc);
 		return rc;
 	}
 
 	/* TODO: phy layer with polling, timeouts, etc. */
-	if (ata_port_offline(ap)) {
+	if (ata_link_offline(link)) {
 		*class = ATA_DEV_NONE;
 		DPRINTK("EXIT, link offline\n");
 		return 0;
 	}
 
-	/* wait a while before checking status, see SRST for more info */
-	msleep(150);
+	/* wait a while before checking status */
+	ata_wait_after_reset(ap, deadline);
+
+	/* If PMP is supported, we have to do follow-up SRST.  Note
+	 * that some PMPs don't send D2H Reg FIS after hardreset at
+	 * all if the first port is empty.  Wait for it just for a
+	 * second and request follow-up SRST.
+	 */
+	if (ap->flags & ATA_FLAG_PMP) {
+		ata_wait_ready(ap, jiffies + HZ);
+		return -EAGAIN;
+	}
 
 	rc = ata_wait_ready(ap, deadline);
 	/* link occupied, -ENODEV too is an error */
 	if (rc) {
-		ata_port_printk(ap, KERN_ERR,
+		ata_link_printk(link, KERN_ERR,
 				"COMRESET failed (errno=%d)\n", rc);
 		return rc;
 	}
 
 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
 
-	*class = ata_dev_try_classify(ap, 0, NULL);
+	*class = ata_dev_try_classify(link->device, 1, NULL);
 
 	DPRINTK("EXIT, class=%u\n", *class);
 	return 0;
@@ -3559,7 +3908,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
 
 /**
  *	ata_std_postreset - standard postreset callback
- *	@ap: the target ata_port
+ *	@link: the target ata_link
  *	@classes: classes of attached devices
  *
  *	This function is invoked after a successful reset.  Note that
@@ -3569,22 +3918,20 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
  *	LOCKING:
  *	Kernel thread context (may sleep)
  */
-void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
+void ata_std_postreset(struct ata_link *link, unsigned int *classes)
 {
+	struct ata_port *ap = link->ap;
 	u32 serror;
 
 	DPRINTK("ENTER\n");
 
 	/* print link status */
-	sata_print_link_status(ap);
+	sata_print_link_status(link);
 
 	/* clear SError */
-	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
-		sata_scr_write(ap, SCR_ERROR, serror);
-
-	/* re-enable interrupts */
-	if (!ap->ops->error_handler)
-		ap->ops->irq_on(ap);
+	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
+		sata_scr_write(link, SCR_ERROR, serror);
+	link->eh_info.serror = 0;
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
@@ -3656,7 +4003,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
 
 /**
  *	ata_dev_reread_id - Re-read IDENTIFY data
- *	@adev: target ATA device
+ *	@dev: target ATA device
  *	@readid_flags: read ID flags
  *
  *	Re-read IDENTIFY page and make sure @dev is still attached to
@@ -3671,7 +4018,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
 {
 	unsigned int class = dev->class;
-	u16 *id = (void *)dev->ap->sector_buf;
+	u16 *id = (void *)dev->link->ap->sector_buf;
 	int rc;
 
 	/* read ID data */
@@ -3690,6 +4037,7 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
 /**
  *	ata_dev_revalidate - Revalidate ATA device
  *	@dev: device to revalidate
+ *	@new_class: new class code
  *	@readid_flags: read ID flags
  *
  *	Re-read IDENTIFY page, make sure @dev is still attached to the
@@ -3701,7 +4049,8 @@ int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
  *	RETURNS:
  *	0 on success, negative errno otherwise
  */
-int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
+int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+		       unsigned int readid_flags)
 {
 	u64 n_sectors = dev->n_sectors;
 	int rc;
@@ -3709,6 +4058,15 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
 	if (!ata_dev_enabled(dev))
 		return -ENODEV;
 
+	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
+	if (ata_class_enabled(new_class) &&
+	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
+		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
+			       dev->class, new_class);
+		rc = -ENODEV;
+		goto fail;
+	}
+
 	/* re-read ID */
 	rc = ata_dev_reread_id(dev, readid_flags);
 	if (rc)
@@ -3720,11 +4078,16 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
 		goto fail;
 
 	/* verify n_sectors hasn't changed */
-	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
+	if (dev->class == ATA_DEV_ATA && n_sectors &&
+	    dev->n_sectors != n_sectors) {
 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
 			       "%llu != %llu\n",
 			       (unsigned long long)n_sectors,
 			       (unsigned long long)dev->n_sectors);
+
+		/* restore original n_sectors */
+		dev->n_sectors = n_sectors;
+
 		rc = -ENODEV;
 		goto fail;
 	}
@@ -3772,9 +4135,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
-	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
+	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
-	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
+	/* Odd clown on sil3726/4726 PMPs */
+	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
+						ATA_HORKAGE_SKIP_PM },
 
 	/* Weird ATAPI devices */
 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
@@ -3783,34 +4148,69 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 
 	/* Devices where NCQ should be avoided */
 	/* NCQ is slow */
-        { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
+	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
+	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
 	/* NCQ is broken */
-	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
-	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
-	{ "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
-	 ATA_HORKAGE_NONCQ },
-	/* NCQ hard hangs device under heavier load, needs hard power cycle */
-	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
+	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
+	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
+	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
+	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
+	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
+	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
+
 	/* Blacklist entries taken from Silicon Image 3124/3132
 	   Windows driver .inf file - also several Linux problem reports */
 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
-	/* Drives which do spurious command completion */
-	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
-	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
-	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
-	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
 
-	/* Devices with NCQ limits */
+	/* devices which puke on READ_NATIVE_MAX */
+	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
+	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
+
+	/* Devices which report 1 sector over size HPA */
+	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
+	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
+
+	/* Devices which get the IVB wrong */
+	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
+	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
+	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
+	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
+	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
 
 	/* End Marker */
 	{ }
 };
 
-unsigned long ata_device_blacklisted(const struct ata_device *dev)
+static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+{
+	const char *p;
+	int len;
+
+	/*
+	 * check for trailing wildcard: *\0
+	 */
+	p = strchr(patt, wildchar);
+	if (p && ((*(p + 1)) == 0))
+		len = p - patt;
+	else {
+		len = strlen(name);
+		if (!len) {
+			if (!*patt)
+				return 0;
+			return -1;
+		}
+	}
+
+	return strncmp(patt, name, len);
+}
+
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
 {
 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
@@ -3820,10 +4220,10 @@ unsigned long ata_device_blacklisted(const struct ata_device *dev)
 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
 
 	while (ad->model_num) {
-		if (!strcmp(ad->model_num, model_num)) {
+		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
 			if (ad->model_rev == NULL)
 				return ad->horkage;
-			if (!strcmp(ad->model_rev, model_rev))
+			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
 				return ad->horkage;
 		}
 		ad++;
@@ -3837,10 +4237,25 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
 	 */
-	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
+	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
 	    (dev->flags & ATA_DFLAG_CDB_INTR))
 		return 1;
-	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
+	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+}
+
+/**
+ *	ata_is_40wire		-	check drive side detection
+ *	@dev: device
+ *
+ *	Perform drive side detection decoding, allowing for device vendors
+ *	who can't follow the documentation.
+ */
+
+static int ata_is_40wire(struct ata_device *dev)
+{
+	if (dev->horkage & ATA_HORKAGE_IVB)
+		return ata_drive_40wire_relaxed(dev->id);
+	return ata_drive_40wire(dev->id);
 }
 
 /**
@@ -3857,7 +4272,8 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
  */
 static void ata_dev_xfermask(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
 	struct ata_host *host = ap->host;
 	unsigned long xfer_mask;
 
@@ -3888,7 +4304,7 @@ static void ata_dev_xfermask(struct ata_device *dev)
 	}
 
 	if ((host->flags & ATA_HOST_SIMPLEX) &&
-            host->simplex_claimed && host->simplex_claimed != ap) {
+	    host->simplex_claimed && host->simplex_claimed != ap) {
 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
 			       "other device, disabling DMA\n");
@@ -3910,11 +4326,11 @@ static void ata_dev_xfermask(struct ata_device *dev)
 	 */
 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
 		/* UDMA/44 or higher would be available */
-		if((ap->cbl == ATA_CBL_PATA40) ||
-   		    (ata_drive_40wire(dev->id) &&
-		     (ap->cbl == ATA_CBL_PATA_UNK ||
-                     ap->cbl == ATA_CBL_PATA80))) {
-		      	ata_dev_printk(dev, KERN_WARNING,
+		if ((ap->cbl == ATA_CBL_PATA40) ||
+		    (ata_is_40wire(dev) &&
+		    (ap->cbl == ATA_CBL_PATA_UNK ||
+		     ap->cbl == ATA_CBL_PATA80))) {
+			ata_dev_printk(dev, KERN_WARNING,
 				 "limited to UDMA/33 due to 40-wire cable\n");
 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
 		}
@@ -3955,7 +4371,43 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
 	tf.protocol = ATA_PROT_NODATA;
 	tf.nsect = dev->xfer_mode;
 
-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+
+	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+	return err_mask;
+}
+/**
+ *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
+ *	@dev: Device to which command will be sent
+ *	@enable: Whether to enable or disable the feature
+ *	@feature: The sector count represents the feature to set
+ *
+ *	Issue SET FEATURES - SATA FEATURES command to device @dev
+ *	on port @ap with sector count
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask otherwise.
+ */
+static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
+					u8 feature)
+{
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	/* set up set-features taskfile */
+	DPRINTK("set features - SATA features\n");
+
+	ata_tf_init(dev, &tf);
+	tf.command = ATA_CMD_SET_FEATURES;
+	tf.feature = enable;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.nsect = feature;
+
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -3993,7 +4445,12 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
 	tf.nsect = sectors;
 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
 
-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	/* A clean abort indicates an original or just out of spec drive
+	   and we should continue as we issue the setup based on the
+	   drive reported working geometry */
+	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+		err_mask = 0;
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -4034,10 +4491,10 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
 		if (qc->n_elem)
 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
 		/* restore last sg */
-		sg[qc->orig_n_elem - 1].length += qc->pad_len;
+		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
 		if (pad_buf) {
 			struct scatterlist *psg = &qc->pad_sgent;
-			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
 			kunmap_atomic(addr, KM_IRQ0);
 		}
@@ -4110,6 +4567,68 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
 }
 
 /**
+ *	ata_fill_sg_dumb - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command. Perform the fill
+ *	so that we avoid writing any length 64K records for
+ *	controllers that don't follow the spec.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int idx;
+
+	WARN_ON(qc->__sg == NULL);
+	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+
+	idx = 0;
+	ata_for_each_sg(sg, qc) {
+		u32 addr, offset;
+		u32 sg_len, len, blen;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			blen = len & 0xffff;
+			ap->prd[idx].addr = cpu_to_le32(addr);
+			if (blen == 0) {
+			   /* Some PATA chipsets like the CS5530 can't
+			      cope with 0x0000 meaning 64K as the spec says */
+				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
+				blen = 0x8000;
+				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
+			}
+			ap->prd[idx].flags_len = cpu_to_le32(blen);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	if (idx)
+		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
  *	@qc: Metadata associated with taskfile to check
  *
@@ -4140,6 +4659,73 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
 }
 
 /**
+ *	atapi_qc_may_overflow - Check whether data transfer may overflow
+ *	@qc: ATA command in question
+ *
+ *	ATAPI commands which transfer variable length data to host
+ *	might overflow due to application error or hardare bug.  This
+ *	function checks whether overflow should be drained and ignored
+ *	for @qc.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if @qc may overflow; otherwise, 0.
+ */
+static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
+{
+	if (qc->tf.protocol != ATA_PROT_ATAPI &&
+	    qc->tf.protocol != ATA_PROT_ATAPI_DMA)
+		return 0;
+
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		return 0;
+
+	switch (qc->cdb[0]) {
+	case READ_10:
+	case READ_12:
+	case WRITE_10:
+	case WRITE_12:
+	case GPCMD_READ_CD:
+	case GPCMD_READ_CD_MSF:
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ *	ata_std_qc_defer - Check whether a qc needs to be deferred
+ *	@qc: ATA command in question
+ *
+ *	Non-NCQ commands cannot run with any other command, NCQ or
+ *	not.  As upper layer only knows the queue depth, we are
+ *	responsible for maintaining exclusion.  This function checks
+ *	whether a new command @qc can be issued.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int ata_std_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+
+	if (qc->tf.protocol == ATA_PROT_NCQ) {
+		if (!ata_tag_valid(link->active_tag))
+			return 0;
+	} else {
+		if (!ata_tag_valid(link->active_tag) && !link->sactive)
+			return 0;
+	}
+
+	return ATA_DEFER_LINK;
+}
+
+/**
  *	ata_qc_prep - Prepare taskfile for submission
  *	@qc: Metadata associated with taskfile to be prepared
  *
@@ -4156,6 +4742,23 @@ void ata_qc_prep(struct ata_queued_cmd *qc)
 	ata_fill_sg(qc);
 }
 
+/**
+ *	ata_dumb_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_fill_sg_dumb(qc);
+}
+
 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
 
 /**
@@ -4180,6 +4783,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
 	qc->orig_n_elem = 1;
 	qc->buf_virt = buf;
 	qc->nbytes = buflen;
+	qc->cursg = qc->__sg;
 
 	sg_init_one(&qc->sgent, buf, buflen);
 }
@@ -4205,6 +4809,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
 	qc->__sg = sg;
 	qc->n_elem = n_elem;
 	qc->orig_n_elem = n_elem;
+	qc->cursg = qc->__sg;
 }
 
 /**
@@ -4294,7 +4899,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	struct scatterlist *sg = qc->__sg;
-	struct scatterlist *lsg = &sg[qc->n_elem - 1];
+	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
 	int n_elem, pre_n_elem, dir, trim_sg = 0;
 
 	VPRINTK("ENTER, ata%u\n", ap->print_id);
@@ -4316,11 +4921,12 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
 		 * data in this function or read data in ata_sg_clean.
 		 */
 		offset = lsg->offset + lsg->length - qc->pad_len;
-		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
-		psg->offset = offset_in_page(offset);
+		sg_init_table(psg, 1);
+		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
+				qc->pad_len, offset_in_page(offset));
 
 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
-			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
 			kunmap_atomic(addr, KM_IRQ0);
 		}
@@ -4398,7 +5004,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
 		   unsigned int buflen, int write_data)
 {
-	struct ata_port *ap = adev->ap;
+	struct ata_port *ap = adev->link->ap;
 	unsigned int words = buflen >> 1;
 
 	/* Transfer multiple of 2 bytes */
@@ -4458,7 +5064,6 @@ void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
 static void ata_pio_sector(struct ata_queued_cmd *qc)
 {
 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
-	struct scatterlist *sg = qc->__sg;
 	struct ata_port *ap = qc->ap;
 	struct page *page;
 	unsigned int offset;
@@ -4467,8 +5072,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 	if (qc->curbytes == qc->nbytes - qc->sect_size)
 		ap->hsm_task_state = HSM_ST_LAST;
 
-	page = sg[qc->cursg].page;
-	offset = sg[qc->cursg].offset + qc->cursg_ofs;
+	page = sg_page(qc->cursg);
+	offset = qc->cursg->offset + qc->cursg_ofs;
 
 	/* get the current page and offset */
 	page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -4496,8 +5101,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 	qc->curbytes += qc->sect_size;
 	qc->cursg_ofs += qc->sect_size;
 
-	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
-		qc->cursg++;
+	if (qc->cursg_ofs == qc->cursg->length) {
+		qc->cursg = sg_next(qc->cursg);
 		qc->cursg_ofs = 0;
 	}
 }
@@ -4527,6 +5132,8 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
 			ata_pio_sector(qc);
 	} else
 		ata_pio_sector(qc);
+
+	ata_altstatus(qc->ap); /* flush */
 }
 
 /**
@@ -4576,21 +5183,19 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
  *	Inherited from caller.
  *
  */
-
-static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 {
 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
-	struct scatterlist *sg = qc->__sg;
 	struct ata_port *ap = qc->ap;
+	struct ata_eh_info *ehi = &qc->dev->link->eh_info;
+	struct scatterlist *sg;
 	struct page *page;
 	unsigned char *buf;
 	unsigned int offset, count;
 
-	if (qc->curbytes + bytes >= qc->nbytes)
-		ap->hsm_task_state = HSM_ST_LAST;
-
 next_sg:
-	if (unlikely(qc->cursg >= qc->n_elem)) {
+	sg = qc->cursg;
+	if (unlikely(!sg)) {
 		/*
 		 * The end of qc->sg is reached and the device expects
 		 * more data to transfer. In order not to overrun qc->sg
@@ -4599,23 +5204,30 @@ next_sg:
 		 *    - for write case, padding zero data to the device
 		 */
 		u16 pad_buf[1] = { 0 };
-		unsigned int words = bytes >> 1;
 		unsigned int i;
 
-		if (words) /* warning if bytes > 1 */
-			ata_dev_printk(qc->dev, KERN_WARNING,
-				       "%u bytes trailing data\n", bytes);
+		if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
+			ata_ehi_push_desc(ehi, "too much trailing data "
+					  "buf=%u cur=%u bytes=%u",
+					  qc->nbytes, qc->curbytes, bytes);
+			return -1;
+		}
 
-		for (i = 0; i < words; i++)
-			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
+		 /* overflow is exptected for misc ATAPI commands */
+		if (bytes && !atapi_qc_may_overflow(qc))
+			ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
+				       "trailing data (cdb=%02x nbytes=%u)\n",
+				       bytes, qc->cdb[0], qc->nbytes);
 
-		ap->hsm_task_state = HSM_ST_LAST;
-		return;
-	}
+		for (i = 0; i < (bytes + 1) / 2; i++)
+			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
 
-	sg = &qc->__sg[qc->cursg];
+		qc->curbytes += bytes;
 
-	page = sg->page;
+		return 0;
+	}
+
+	page = sg_page(sg);
 	offset = sg->offset + qc->cursg_ofs;
 
 	/* get the current page and offset */
@@ -4648,16 +5260,20 @@ next_sg:
 	}
 
 	bytes -= count;
+	if ((count & 1) && bytes)
+		bytes--;
 	qc->curbytes += count;
 	qc->cursg_ofs += count;
 
 	if (qc->cursg_ofs == sg->length) {
-		qc->cursg++;
+		qc->cursg = sg_next(qc->cursg);
 		qc->cursg_ofs = 0;
 	}
 
 	if (bytes)
 		goto next_sg;
+
+	return 0;
 }
 
 /**
@@ -4700,7 +5316,9 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 
 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
 
-	__atapi_pio_bytes(qc, bytes);
+	if (__atapi_pio_bytes(qc, bytes))
+		goto err_out;
+	ata_altstatus(ap); /* flush */
 
 	return;
 
@@ -4846,11 +5464,19 @@ fsm_start:
 		 * let the EH abort the command or reset the device.
 		 */
 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
-			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
-					"error, dev_stat 0x%X\n", status);
-			qc->err_mask |= AC_ERR_HSM;
-			ap->hsm_task_state = HSM_ST_ERR;
-			goto fsm_start;
+			/* Some ATAPI tape drives forget to clear the ERR bit
+			 * when doing the next command (mostly request sense).
+			 * We ignore ERR here to workaround and proceed sending
+			 * the CDB.
+			 */
+			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+				ata_port_printk(ap, KERN_WARNING,
+						"DRQ=1 with device error, "
+						"dev_stat 0x%X\n", status);
+				qc->err_mask |= AC_ERR_HSM;
+				ap->hsm_task_state = HSM_ST_ERR;
+				goto fsm_start;
+			}
 		}
 
 		/* Send the CDB (atapi) or the first data block (ata pio out).
@@ -4872,7 +5498,6 @@ fsm_start:
 			 */
 			ap->hsm_task_state = HSM_ST;
 			ata_pio_sectors(qc);
-			ata_altstatus(ap); /* flush */
 		} else
 			/* send CDB */
 			atapi_send_cdb(ap, qc);
@@ -4953,7 +5578,6 @@ fsm_start:
 
 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
 					ata_pio_sectors(qc);
-					ata_altstatus(ap);
 					status = ata_wait_idle(ap);
 				}
 
@@ -4973,13 +5597,11 @@ fsm_start:
 			if (ap->hsm_task_state == HSM_ST_LAST &&
 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
 				/* all data read */
-				ata_altstatus(ap);
 				status = ata_wait_idle(ap);
 				goto fsm_start;
 			}
 		}
 
-		ata_altstatus(ap); /* flush */
 		poll_next = 1;
 		break;
 
@@ -5027,11 +5649,13 @@ fsm_start:
 
 static void ata_pio_task(void *_data)
 {
-	struct ata_queued_cmd *qc = _data;
-	struct ata_port *ap = qc->ap;
+	struct ata_port *ap = _data;
+	struct ata_queued_cmd *qc = ap->port_task_data;
 	u8 status;
 	int poll_next;
 
+	DPRINTK("ENTER\n");
+
 fsm_start:
 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
 
@@ -5103,7 +5727,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 
 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	struct ata_queued_cmd *qc;
 
 	qc = ata_qc_new(ap);
@@ -5146,6 +5770,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
 void __ata_qc_complete(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
+	struct ata_link *link = qc->dev->link;
 
 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
@@ -5154,10 +5779,19 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
 		ata_sg_clean(qc);
 
 	/* command should be marked inactive atomically with qc completion */
-	if (qc->tf.protocol == ATA_PROT_NCQ)
-		ap->sactive &= ~(1 << qc->tag);
-	else
-		ap->active_tag = ATA_TAG_POISON;
+	if (qc->tf.protocol == ATA_PROT_NCQ) {
+		link->sactive &= ~(1 << qc->tag);
+		if (!link->sactive)
+			ap->nr_active_links--;
+	} else {
+		link->active_tag = ATA_TAG_POISON;
+		ap->nr_active_links--;
+	}
+
+	/* clear exclusive status */
+	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
+		     ap->excl_link == link))
+		ap->excl_link = NULL;
 
 	/* atapi: mark qc as inactive to prevent the interrupt handler
 	 * from completing the command twice later, before the error handler
@@ -5207,6 +5841,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
 	 * taken care of.
 	 */
 	if (ap->ops->error_handler) {
+		struct ata_device *dev = qc->dev;
+		struct ata_eh_info *ehi = &dev->link->eh_info;
+
 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
 
 		if (unlikely(qc->err_mask))
@@ -5225,6 +5862,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
 			fill_result_tf(qc);
 
+		/* Some commands need post-processing after successful
+		 * completion.
+		 */
+		switch (qc->tf.command) {
+		case ATA_CMD_SET_FEATURES:
+			if (qc->tf.feature != SETFEATURES_WC_ON &&
+			    qc->tf.feature != SETFEATURES_WC_OFF)
+				break;
+			/* fall through */
+		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+		case ATA_CMD_SET_MULTI: /* multi_count changed */
+			/* revalidate device */
+			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+			ata_port_schedule_eh(ap);
+			break;
+
+		case ATA_CMD_SLEEP:
+			dev->flags |= ATA_DFLAG_SLEEPING;
+			break;
+		}
+
 		__ata_qc_complete(qc);
 	} else {
 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
@@ -5326,19 +5984,25 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
 void ata_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
+	struct ata_link *link = qc->dev->link;
 
 	/* Make sure only one non-NCQ command is outstanding.  The
 	 * check is skipped for old EH because it reuses active qc to
 	 * request ATAPI sense.
 	 */
-	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
+	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
 
 	if (qc->tf.protocol == ATA_PROT_NCQ) {
-		WARN_ON(ap->sactive & (1 << qc->tag));
-		ap->sactive |= 1 << qc->tag;
+		WARN_ON(link->sactive & (1 << qc->tag));
+
+		if (!link->sactive)
+			ap->nr_active_links++;
+		link->sactive |= 1 << qc->tag;
 	} else {
-		WARN_ON(ap->sactive);
-		ap->active_tag = qc->tag;
+		WARN_ON(link->sactive);
+
+		ap->nr_active_links++;
+		link->active_tag = qc->tag;
 	}
 
 	qc->flags |= ATA_QCFLAG_ACTIVE;
@@ -5356,6 +6020,14 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
 	}
 
+	/* if device is sleeping, schedule softreset and abort the link */
+	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
+		link->eh_info.action |= ATA_EH_SOFTRESET;
+		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
+		ata_link_abort(link);
+		return;
+	}
+
 	ap->ops->qc_prep(qc);
 
 	qc->err_mask |= ap->ops->qc_issue(qc);
@@ -5518,10 +6190,10 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
  *	One if interrupt was handled, zero if not (shared irq).
  */
 
-inline unsigned int ata_host_intr (struct ata_port *ap,
-				   struct ata_queued_cmd *qc)
+inline unsigned int ata_host_intr(struct ata_port *ap,
+				  struct ata_queued_cmd *qc)
 {
-	struct ata_eh_info *ehi = &ap->eh_info;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
 	u8 status, host_stat = 0;
 
 	VPRINTK("ata%u: protocol %d task_state %d\n",
@@ -5595,7 +6267,8 @@ idle_irq:
 
 #ifdef ATA_IRQ_TRAP
 	if ((ap->stats.idle_irq % 1000) == 0) {
-		ap->ops->irq_ack(ap, 0); /* debug trap */
+		ata_chk_status(ap);
+		ap->ops->irq_clear(ap);
 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
 		return 1;
 	}
@@ -5618,7 +6291,7 @@ idle_irq:
  *	IRQ_NONE or IRQ_HANDLED.
  */
 
-irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
+irqreturn_t ata_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
 	struct ata_host *host = dev_instance;
 	unsigned int i;
@@ -5636,7 +6309,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
 
-			qc = ata_qc_from_tag(ap, ap->active_tag);
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
 			    (qc->flags & ATA_QCFLAG_ACTIVE))
 				handled |= ata_host_intr(ap, qc);
@@ -5650,9 +6323,9 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
 
 /**
  *	sata_scr_valid - test whether SCRs are accessible
- *	@ap: ATA port to test SCR accessibility for
+ *	@link: ATA link to test SCR accessibility for
  *
- *	Test whether SCRs are accessible for @ap.
+ *	Test whether SCRs are accessible for @link.
  *
  *	LOCKING:
  *	None.
@@ -5660,64 +6333,74 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
  *	RETURNS:
  *	1 if SCRs are accessible, 0 otherwise.
  */
-int sata_scr_valid(struct ata_port *ap)
+int sata_scr_valid(struct ata_link *link)
 {
-	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
+	struct ata_port *ap = link->ap;
+
+	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
 }
 
 /**
  *	sata_scr_read - read SCR register of the specified port
- *	@ap: ATA port to read SCR for
+ *	@link: ATA link to read SCR for
  *	@reg: SCR to read
  *	@val: Place to store read value
  *
- *	Read SCR register @reg of @ap into *@val.  This function is
- *	guaranteed to succeed if the cable type of the port is SATA
- *	and the port implements ->scr_read.
+ *	Read SCR register @reg of @link into *@val.  This function is
+ *	guaranteed to succeed if @link is ap->link, the cable type of
+ *	the port is SATA and the port implements ->scr_read.
  *
  *	LOCKING:
- *	None.
+ *	None if @link is ap->link.  Kernel thread context otherwise.
  *
  *	RETURNS:
  *	0 on success, negative errno on failure.
  */
-int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
+int sata_scr_read(struct ata_link *link, int reg, u32 *val)
 {
-	if (sata_scr_valid(ap)) {
-		*val = ap->ops->scr_read(ap, reg);
-		return 0;
+	if (ata_is_host_link(link)) {
+		struct ata_port *ap = link->ap;
+
+		if (sata_scr_valid(link))
+			return ap->ops->scr_read(ap, reg, val);
+		return -EOPNOTSUPP;
 	}
-	return -EOPNOTSUPP;
+
+	return sata_pmp_scr_read(link, reg, val);
 }
 
 /**
  *	sata_scr_write - write SCR register of the specified port
- *	@ap: ATA port to write SCR for
+ *	@link: ATA link to write SCR for
  *	@reg: SCR to write
  *	@val: value to write
  *
- *	Write @val to SCR register @reg of @ap.  This function is
- *	guaranteed to succeed if the cable type of the port is SATA
- *	and the port implements ->scr_read.
+ *	Write @val to SCR register @reg of @link.  This function is
+ *	guaranteed to succeed if @link is ap->link, the cable type of
+ *	the port is SATA and the port implements ->scr_read.
  *
  *	LOCKING:
- *	None.
+ *	None if @link is ap->link.  Kernel thread context otherwise.
  *
  *	RETURNS:
  *	0 on success, negative errno on failure.
  */
-int sata_scr_write(struct ata_port *ap, int reg, u32 val)
+int sata_scr_write(struct ata_link *link, int reg, u32 val)
 {
-	if (sata_scr_valid(ap)) {
-		ap->ops->scr_write(ap, reg, val);
-		return 0;
+	if (ata_is_host_link(link)) {
+		struct ata_port *ap = link->ap;
+
+		if (sata_scr_valid(link))
+			return ap->ops->scr_write(ap, reg, val);
+		return -EOPNOTSUPP;
 	}
-	return -EOPNOTSUPP;
+
+	return sata_pmp_scr_write(link, reg, val);
 }
 
 /**
  *	sata_scr_write_flush - write SCR register of the specified port and flush
- *	@ap: ATA port to write SCR for
+ *	@link: ATA link to write SCR for
  *	@reg: SCR to write
  *	@val: value to write
  *
@@ -5725,28 +6408,36 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val)
  *	function performs flush after writing to the register.
  *
  *	LOCKING:
- *	None.
+ *	None if @link is ap->link.  Kernel thread context otherwise.
  *
  *	RETURNS:
  *	0 on success, negative errno on failure.
  */
-int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
+int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
 {
-	if (sata_scr_valid(ap)) {
-		ap->ops->scr_write(ap, reg, val);
-		ap->ops->scr_read(ap, reg);
-		return 0;
+	if (ata_is_host_link(link)) {
+		struct ata_port *ap = link->ap;
+		int rc;
+
+		if (sata_scr_valid(link)) {
+			rc = ap->ops->scr_write(ap, reg, val);
+			if (rc == 0)
+				rc = ap->ops->scr_read(ap, reg, &val);
+			return rc;
+		}
+		return -EOPNOTSUPP;
 	}
-	return -EOPNOTSUPP;
+
+	return sata_pmp_scr_write(link, reg, val);
 }
 
 /**
- *	ata_port_online - test whether the given port is online
- *	@ap: ATA port to test
+ *	ata_link_online - test whether the given link is online
+ *	@link: ATA link to test
  *
- *	Test whether @ap is online.  Note that this function returns 0
- *	if online status of @ap cannot be obtained, so
- *	ata_port_online(ap) != !ata_port_offline(ap).
+ *	Test whether @link is online.  Note that this function returns
+ *	0 if online status of @link cannot be obtained, so
+ *	ata_link_online(link) != !ata_link_offline(link).
  *
  *	LOCKING:
  *	None.
@@ -5754,22 +6445,23 @@ int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
  *	RETURNS:
  *	1 if the port online status is available and online.
  */
-int ata_port_online(struct ata_port *ap)
+int ata_link_online(struct ata_link *link)
 {
 	u32 sstatus;
 
-	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+	    (sstatus & 0xf) == 0x3)
 		return 1;
 	return 0;
 }
 
 /**
- *	ata_port_offline - test whether the given port is offline
- *	@ap: ATA port to test
+ *	ata_link_offline - test whether the given link is offline
+ *	@link: ATA link to test
  *
- *	Test whether @ap is offline.  Note that this function returns
- *	0 if offline status of @ap cannot be obtained, so
- *	ata_port_online(ap) != !ata_port_offline(ap).
+ *	Test whether @link is offline.  Note that this function
+ *	returns 0 if offline status of @link cannot be obtained, so
+ *	ata_link_online(link) != !ata_link_offline(link).
  *
  *	LOCKING:
  *	None.
@@ -5777,11 +6469,12 @@ int ata_port_online(struct ata_port *ap)
  *	RETURNS:
  *	1 if the port offline status is available and offline.
  */
-int ata_port_offline(struct ata_port *ap)
+int ata_link_offline(struct ata_link *link)
 {
 	u32 sstatus;
 
-	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+	    (sstatus & 0xf) != 0x3)
 		return 1;
 	return 0;
 }
@@ -5799,6 +6492,10 @@ int ata_flush_cache(struct ata_device *dev)
 	else
 		cmd = ATA_CMD_FLUSH;
 
+	/* This is wrong. On a failed flush we get back the LBA of the lost
+	   sector and we should (assuming it wasn't aborted as unknown) issue
+	   a further flush command to continue the writeback until it
+	   does not error */
 	err_mask = ata_do_simple_cmd(dev, cmd);
 	if (err_mask) {
 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
@@ -5818,6 +6515,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
 
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
+		struct ata_link *link;
 
 		/* Previous resume operation might still be in
 		 * progress.  Wait for PM_PENDING to clear.
@@ -5837,8 +6535,10 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
 		}
 
 		ap->pflags |= ATA_PFLAG_PM_PENDING;
-		ap->eh_info.action |= action;
-		ap->eh_info.flags |= ehi_flags;
+		__ata_port_for_each_link(link, ap) {
+			link->eh_info.action |= action;
+			link->eh_info.flags |= ehi_flags;
+		}
 
 		ata_port_schedule_eh(ap);
 
@@ -5875,6 +6575,12 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
 	int rc;
 
+	/*
+	 * disable link pm on all ports before requesting
+	 * any pm activity
+	 */
+	ata_lpm_enable(host);
+
 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
 	if (rc == 0)
 		host->dev->power.power_state = mesg;
@@ -5897,6 +6603,9 @@ void ata_host_resume(struct ata_host *host)
 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
 	host->dev->power.power_state = PMSG_ON;
+
+	/* reenable link pm */
+	ata_lpm_disable(host);
 }
 #endif
 
@@ -5942,11 +6651,13 @@ int ata_port_start(struct ata_port *ap)
  */
 void ata_dev_init(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
 	unsigned long flags;
 
 	/* SATA spd limit is bound to the first device */
-	ap->sata_spd_limit = ap->hw_sata_spd_limit;
+	link->sata_spd_limit = link->hw_sata_spd_limit;
+	link->sata_spd = 0;
 
 	/* High bits of dev->flags are used to record warm plug
 	 * requests which occur asynchronously.  Synchronize using
@@ -5954,6 +6665,7 @@ void ata_dev_init(struct ata_device *dev)
 	 */
 	spin_lock_irqsave(ap->lock, flags);
 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
+	dev->horkage = 0;
 	spin_unlock_irqrestore(ap->lock, flags);
 
 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
@@ -5964,6 +6676,70 @@ void ata_dev_init(struct ata_device *dev)
 }
 
 /**
+ *	ata_link_init - Initialize an ata_link structure
+ *	@ap: ATA port link is attached to
+ *	@link: Link structure to initialize
+ *	@pmp: Port multiplier port number
+ *
+ *	Initialize @link.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
+{
+	int i;
+
+	/* clear everything except for devices */
+	memset(link, 0, offsetof(struct ata_link, device[0]));
+
+	link->ap = ap;
+	link->pmp = pmp;
+	link->active_tag = ATA_TAG_POISON;
+	link->hw_sata_spd_limit = UINT_MAX;
+
+	/* can't use iterator, ap isn't initialized yet */
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &link->device[i];
+
+		dev->link = link;
+		dev->devno = dev - link->device;
+		ata_dev_init(dev);
+	}
+}
+
+/**
+ *	sata_link_init_spd - Initialize link->sata_spd_limit
+ *	@link: Link to configure sata_spd_limit for
+ *
+ *	Initialize @link->[hw_]sata_spd_limit to the currently
+ *	configured value.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_link_init_spd(struct ata_link *link)
+{
+	u32 scontrol, spd;
+	int rc;
+
+	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+	if (rc)
+		return rc;
+
+	spd = (scontrol >> 4) & 0xf;
+	if (spd)
+		link->hw_sata_spd_limit &= (1 << spd) - 1;
+
+	link->sata_spd_limit = link->hw_sata_spd_limit;
+
+	return 0;
+}
+
+/**
  *	ata_port_alloc - allocate and initialize basic ATA port resources
  *	@host: ATA host this allocated port belongs to
  *
@@ -5978,7 +6754,6 @@ void ata_dev_init(struct ata_device *dev)
 struct ata_port *ata_port_alloc(struct ata_host *host)
 {
 	struct ata_port *ap;
-	unsigned int i;
 
 	DPRINTK("ENTER\n");
 
@@ -5993,9 +6768,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
 	ap->ctl = ATA_DEVCTL_OBS;
 	ap->host = host;
 	ap->dev = host->dev;
-
-	ap->hw_sata_spd_limit = UINT_MAX;
-	ap->active_tag = ATA_TAG_POISON;
 	ap->last_ctl = 0xFF;
 
 #if defined(ATA_VERBOSE_DEBUG)
@@ -6007,20 +6779,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
-	INIT_WORK(&ap->port_task, NULL, NULL);
+	INIT_WORK(&ap->port_task, ata_pio_task, ap);
 	INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
 	INIT_LIST_HEAD(&ap->eh_done_q);
 	init_waitqueue_head(&ap->eh_wait_q);
+	init_timer_deferrable(&ap->fastdrain_timer);
+	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
+	ap->fastdrain_timer.data = (unsigned long)ap;
 
 	ap->cbl = ATA_CBL_NONE;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		dev->ap = ap;
-		dev->devno = i;
-		ata_dev_init(dev);
-	}
+	ata_link_init(ap, &ap->link, 0);
 
 #ifdef ATA_IRQ_TRAP
 	ap->stats.unhandled_irq = 1;
@@ -6040,22 +6810,10 @@ static void ata_host_release(struct device *gendev, void *res)
 		if (!ap)
 			continue;
 
-		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
-			ap->ops->port_stop(ap);
-	}
-
-	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
-		host->ops->host_stop(host);
-
-	for (i = 0; i < host->n_ports; i++) {
-		struct ata_port *ap = host->ports[i];
-
-		if (!ap)
-			continue;
-
 		if (ap->scsi_host)
 			scsi_host_put(ap->scsi_host);
 
+		kfree(ap->pmp_link);
 		kfree(ap);
 		host->ports[i] = NULL;
 	}
@@ -6166,6 +6924,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
 		ap->mwdma_mask = pi->mwdma_mask;
 		ap->udma_mask = pi->udma_mask;
 		ap->flags |= pi->flags;
+		ap->link.flags |= pi->link_flags;
 		ap->ops = pi->port_ops;
 
 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
@@ -6177,6 +6936,24 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
 	return host;
 }
 
+static void ata_host_stop(struct device *gendev, void *res)
+{
+	struct ata_host *host = dev_get_drvdata(gendev);
+	int i;
+
+	WARN_ON(!(host->flags & ATA_HOST_STARTED));
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap->ops->port_stop)
+			ap->ops->port_stop(ap);
+	}
+
+	if (host->ops->host_stop)
+		host->ops->host_stop(host);
+}
+
 /**
  *	ata_host_start - start and freeze ports of an ATA host
  *	@host: ATA host to start ports for
@@ -6195,6 +6972,8 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
  */
 int ata_host_start(struct ata_host *host)
 {
+	int have_stop = 0;
+	void *start_dr = NULL;
 	int i, rc;
 
 	if (host->flags & ATA_HOST_STARTED)
@@ -6206,18 +6985,35 @@ int ata_host_start(struct ata_host *host)
 		if (!host->ops && !ata_port_is_dummy(ap))
 			host->ops = ap->ops;
 
+		if (ap->ops->port_stop)
+			have_stop = 1;
+	}
+
+	if (host->ops->host_stop)
+		have_stop = 1;
+
+	if (have_stop) {
+		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
+		if (!start_dr)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
 		if (ap->ops->port_start) {
 			rc = ap->ops->port_start(ap);
 			if (rc) {
-				ata_port_printk(ap, KERN_ERR, "failed to "
-						"start port (errno=%d)\n", rc);
+				if (rc != -ENODEV)
+					dev_printk(KERN_ERR, host->dev, "failed to start port %d (errno=%d)\n", i, rc);
 				goto err_out;
 			}
 		}
-
 		ata_eh_freeze_port(ap);
 	}
 
+	if (start_dr)
+		devres_add(host->dev, start_dr);
 	host->flags |= ATA_HOST_STARTED;
 	return 0;
 
@@ -6228,6 +7024,7 @@ int ata_host_start(struct ata_host *host)
 		if (ap->ops->port_stop)
 			ap->ops->port_stop(ap);
 	}
+	devres_free(start_dr);
 	return rc;
 }
 
@@ -6295,11 +7092,12 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 	if (rc)
 		return rc;
 
+	/* associate with ACPI nodes */
+	ata_acpi_associate(host);
+
 	/* set cable, sata_spd_limit and report */
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
-		int irq_line;
-		u32 scontrol;
 		unsigned long xfer_mask;
 
 		/* set SATA cable type if still unset */
@@ -6307,32 +7105,20 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 			ap->cbl = ATA_CBL_SATA;
 
 		/* init sata_spd_limit to the current value */
-		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
-			int spd = (scontrol >> 4) & 0xf;
-			if (spd)
-				ap->hw_sata_spd_limit &= (1 << spd) - 1;
-		}
-		ap->sata_spd_limit = ap->hw_sata_spd_limit;
-
-		/* report the secondary IRQ for second channel legacy */
-		irq_line = host->irq;
-		if (i == 1 && host->irq2)
-			irq_line = host->irq2;
+		sata_link_init_spd(&ap->link);
 
+		/* print per-port info to dmesg */
 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
 					      ap->udma_mask);
 
-		/* print per-port info to dmesg */
-		if (!ata_port_is_dummy(ap))
-			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
-					"ctl 0x%p bmdma 0x%p irq %d\n",
-					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
+		if (!ata_port_is_dummy(ap)) {
+			ata_port_printk(ap, KERN_INFO,
+					"%cATA max %s %s\n",
+					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
 					ata_mode_string(xfer_mask),
-					ap->ioaddr.cmd_addr,
-					ap->ioaddr.ctl_addr,
-					ap->ioaddr.bmdma_addr,
-					irq_line);
-		else
+					ap->link.eh_info.desc);
+			ata_ehi_clear_desc(&ap->link.eh_info);
+		} else
 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
 	}
 
@@ -6344,7 +7130,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 
 		/* probe */
 		if (ap->ops->error_handler) {
-			struct ata_eh_info *ehi = &ap->eh_info;
+			struct ata_eh_info *ehi = &ap->link.eh_info;
 			unsigned long flags;
 
 			ata_port_probe(ap);
@@ -6352,7 +7138,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 			/* kick EH for boot probing */
 			spin_lock_irqsave(ap->lock, flags);
 
-			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
+			ehi->probe_mask =
+				(1 << ata_link_max_devices(&ap->link)) - 1;
 			ehi->action |= ATA_EH_SOFTRESET;
 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
 
@@ -6385,7 +7172,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
 
-		ata_scsi_scan_host(ap);
+		ata_scsi_scan_host(ap, 1);
+		ata_lpm_schedule(ap, ap->pm_policy);
 	}
 
 	return 0;
@@ -6404,6 +7192,10 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
  *	request IRQ and register it.  This helper takes necessasry
  *	arguments and performs the three steps in one go.
  *
+ *	An invalid IRQ skips the IRQ registration and expects the host to
+ *	have set polling mode on the port. In this case, @irq_handler
+ *	should be NULL.
+ *
  *	LOCKING:
  *	Inherited from calling layer (may sleep).
  *
@@ -6414,19 +7206,25 @@ int ata_host_activate(struct ata_host *host, int irq,
 		      irq_handler_t irq_handler, unsigned long irq_flags,
 		      struct scsi_host_template *sht)
 {
-	int rc;
+	int i, rc;
 
 	rc = ata_host_start(host);
 	if (rc)
 		return rc;
 
+	/* Special case for polling mode */
+	if (!irq) {
+		WARN_ON(irq_handler);
+		return ata_host_register(host, sht);
+	}
+
 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
 			      dev_driver_string(host->dev), host);
 	if (rc)
 		return rc;
 
-	/* Used to print device info at probe */
-	host->irq = irq;
+	for (i = 0; i < host->n_ports; i++)
+		ata_port_desc(host->ports[i], "irq %d", irq);
 
 	rc = ata_host_register(host, sht);
 	/* if failed, just free the IRQ and leave ports alone */
@@ -6447,10 +7245,11 @@ int ata_host_activate(struct ata_host *host, int irq,
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_port_detach(struct ata_port *ap)
+static void ata_port_detach(struct ata_port *ap)
 {
 	unsigned long flags;
-	int i;
+	struct ata_link *link;
+	struct ata_device *dev;
 
 	if (!ap->ops->error_handler)
 		goto skip_eh;
@@ -6462,15 +7261,13 @@ void ata_port_detach(struct ata_port *ap)
 
 	ata_port_wait_eh(ap);
 
-	/* EH is now guaranteed to see UNLOADING, so no new device
-	 * will be attached.  Disable all existing devices.
+	/* EH is now guaranteed to see UNLOADING - EH context belongs
+	 * to us.  Disable all existing devices.
 	 */
-	spin_lock_irqsave(ap->lock, flags);
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		ata_dev_disable(&ap->device[i]);
-
-	spin_unlock_irqrestore(ap->lock, flags);
+	ata_port_for_each_link(link, ap) {
+		ata_link_for_each_dev(dev, link)
+			ata_dev_disable(dev);
+	}
 
 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
 	 * will be skipped and retrials will be terminated with bad
@@ -6482,12 +7279,9 @@ void ata_port_detach(struct ata_port *ap)
 
 	ata_port_wait_eh(ap);
 
-	/* Flush hotplug task.  The sequence is similar to
-	 * ata_port_flush_task().
-	 */
-	flush_workqueue(ata_aux_wq);
-	cancel_delayed_work(&ap->hotplug_task);
 	flush_workqueue(ata_aux_wq);
+	if (!cancel_delayed_work(&ap->hotplug_task))
+		flush_workqueue(ata_aux_wq);
 
  skip_eh:
 	/* remove the associated SCSI host */
@@ -6509,6 +7303,9 @@ void ata_host_detach(struct ata_host *host)
 
 	for (i = 0; i < host->n_ports; i++)
 		ata_port_detach(host->ports[i]);
+
+	/* the host is dead now, dissociate ACPI */
+	ata_acpi_dissociate(host);
 }
 
 /**
@@ -6553,7 +7350,7 @@ void ata_std_ports(struct ata_ioports *ioaddr)
  */
 void ata_pci_remove_one(struct pci_dev *pdev)
 {
-	struct device *dev = pci_dev_to_dev(pdev);
+	struct device *dev = &pdev->dev;
 	struct ata_host *host = dev_get_drvdata(dev);
 
 	ata_host_detach(host);
@@ -6761,7 +7558,6 @@ static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
 }
 
 const struct ata_port_operations ata_dummy_port_ops = {
-	.port_disable		= ata_port_disable,
 	.check_status		= ata_dummy_check_status,
 	.check_altstatus	= ata_dummy_check_status,
 	.dev_select		= ata_noop_dev_select,
@@ -6786,7 +7582,6 @@ const struct ata_port_info ata_dummy_port_info = {
  * likely to change as new drivers are added and updated.
  * Do not depend on ABI/API stability.
  */
-
 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
@@ -6823,7 +7618,9 @@ EXPORT_SYMBOL_GPL(ata_interrupt);
 EXPORT_SYMBOL_GPL(ata_do_set_mode);
 EXPORT_SYMBOL_GPL(ata_data_xfer);
 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
 EXPORT_SYMBOL_GPL(ata_qc_prep);
+EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
 EXPORT_SYMBOL_GPL(ata_bmdma_start);
@@ -6838,14 +7635,12 @@ EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
 EXPORT_SYMBOL_GPL(ata_port_probe);
 EXPORT_SYMBOL_GPL(ata_dev_disable);
 EXPORT_SYMBOL_GPL(sata_set_spd);
-EXPORT_SYMBOL_GPL(sata_phy_debounce);
-EXPORT_SYMBOL_GPL(sata_phy_resume);
-EXPORT_SYMBOL_GPL(sata_phy_reset);
-EXPORT_SYMBOL_GPL(__sata_phy_reset);
+EXPORT_SYMBOL_GPL(sata_link_debounce);
+EXPORT_SYMBOL_GPL(sata_link_resume);
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_std_prereset);
 EXPORT_SYMBOL_GPL(ata_std_softreset);
-EXPORT_SYMBOL_GPL(sata_port_hardreset);
+EXPORT_SYMBOL_GPL(sata_link_hardreset);
 EXPORT_SYMBOL_GPL(sata_std_hardreset);
 EXPORT_SYMBOL_GPL(ata_std_postreset);
 EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -6854,6 +7649,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_wait_register);
 EXPORT_SYMBOL_GPL(ata_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 EXPORT_SYMBOL_GPL(ata_wait_ready);
 EXPORT_SYMBOL_GPL(ata_port_queue_task);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
@@ -6866,8 +7662,8 @@ EXPORT_SYMBOL_GPL(sata_scr_valid);
 EXPORT_SYMBOL_GPL(sata_scr_read);
 EXPORT_SYMBOL_GPL(sata_scr_write);
 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
-EXPORT_SYMBOL_GPL(ata_port_online);
-EXPORT_SYMBOL_GPL(ata_port_offline);
+EXPORT_SYMBOL_GPL(ata_link_online);
+EXPORT_SYMBOL_GPL(ata_link_offline);
 #ifdef CONFIG_PM
 EXPORT_SYMBOL_GPL(ata_host_suspend);
 EXPORT_SYMBOL_GPL(ata_host_resume);
@@ -6875,7 +7671,6 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
 EXPORT_SYMBOL_GPL(ata_id_string);
 EXPORT_SYMBOL_GPL(ata_id_c_string);
 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
-EXPORT_SYMBOL_GPL(ata_device_blacklisted);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
@@ -6884,9 +7679,9 @@ EXPORT_SYMBOL_GPL(ata_timing_merge);
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
-EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
 EXPORT_SYMBOL_GPL(ata_pci_init_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 #ifdef CONFIG_PM
@@ -6899,19 +7694,30 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter);
 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
 #endif /* CONFIG_PCI */
 
-EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
+EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
+EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
+EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
+EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
+
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
+EXPORT_SYMBOL_GPL(ata_port_desc);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
+#endif /* CONFIG_PCI */
 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
+EXPORT_SYMBOL_GPL(ata_link_abort);
 EXPORT_SYMBOL_GPL(ata_port_abort);
 EXPORT_SYMBOL_GPL(ata_port_freeze);
+EXPORT_SYMBOL_GPL(sata_async_notification);
 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
 EXPORT_SYMBOL_GPL(ata_do_eh);
 EXPORT_SYMBOL_GPL(ata_irq_on);
-EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
-EXPORT_SYMBOL_GPL(ata_irq_ack);
-EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
 
 EXPORT_SYMBOL_GPL(ata_cable_40wire);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f7582c9..f0124a8 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -33,6 +33,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_eh.h>
@@ -56,6 +57,7 @@ enum {
  */
 enum {
 	ATA_EH_PRERESET_TIMEOUT		= 10 * HZ,
+	ATA_EH_FASTDRAIN_INTERVAL	= 3 * HZ,
 };
 
 /* The following table determines how we sequence resets.  Each entry
@@ -73,7 +75,6 @@ static const unsigned long ata_eh_reset_timeouts[] = {
 };
 
 static void __ata_port_freeze(struct ata_port *ap);
-static void ata_eh_finish(struct ata_port *ap);
 #ifdef CONFIG_PM
 static void ata_eh_handle_port_suspend(struct ata_port *ap);
 static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -85,6 +86,138 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
 { }
 #endif /* CONFIG_PM */
 
+static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
+				 va_list args)
+{
+	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
+				     ATA_EH_DESC_LEN - ehi->desc_len,
+				     fmt, args);
+}
+
+/**
+ *	__ata_ehi_push_desc - push error description without adding separator
+ *	@ehi: target EHI
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to @ehi->desc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(ehi, fmt, args);
+	va_end(args);
+}
+
+/**
+ *	ata_ehi_push_desc - push error description with separator
+ *	@ehi: target EHI
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to @ehi->desc.
+ *	If @ehi->desc is not empty, ", " is added in-between.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+	va_list args;
+
+	if (ehi->desc_len)
+		__ata_ehi_push_desc(ehi, ", ");
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(ehi, fmt, args);
+	va_end(args);
+}
+
+/**
+ *	ata_ehi_clear_desc - clean error description
+ *	@ehi: target EHI
+ *
+ *	Clear @ehi->desc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+void ata_ehi_clear_desc(struct ata_eh_info *ehi)
+{
+	ehi->desc[0] = '\0';
+	ehi->desc_len = 0;
+}
+
+/**
+ *	ata_port_desc - append port description
+ *	@ap: target ATA port
+ *	@fmt: printf format string
+ *
+ *	Format string according to @fmt and append it to port
+ *	description.  If port description is not empty, " " is added
+ *	in-between.  This function is to be used while initializing
+ *	ata_host.  The description is printed on host registration.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
+{
+	va_list args;
+
+	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
+
+	if (ap->link.eh_info.desc_len)
+		__ata_ehi_push_desc(&ap->link.eh_info, " ");
+
+	va_start(args, fmt);
+	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
+	va_end(args);
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ *	ata_port_pbar_desc - append PCI BAR description
+ *	@ap: target ATA port
+ *	@bar: target PCI BAR
+ *	@offset: offset into PCI BAR
+ *	@name: name of the area
+ *
+ *	If @offset is negative, this function formats a string which
+ *	contains the name, address, size and type of the BAR and
+ *	appends it to the port description.  If @offset is zero or
+ *	positive, only name and offsetted address is appended.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+			const char *name)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	char *type = "";
+	unsigned long long start, len;
+
+	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
+		type = "m";
+	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
+		type = "i";
+
+	start = (unsigned long long)pci_resource_start(pdev, bar);
+	len = (unsigned long long)pci_resource_len(pdev, bar);
+
+	if (offset < 0)
+		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
+	else
+		ata_port_desc(ap, "%s 0x%llx", name, start + offset);
+}
+
+#endif /* CONFIG_PCI */
+
 static void ata_ering_record(struct ata_ering *ering, int is_io,
 			     unsigned int err_mask)
 {
@@ -129,28 +262,29 @@ static int ata_ering_map(struct ata_ering *ering,
 
 static unsigned int ata_eh_dev_action(struct ata_device *dev)
 {
-	struct ata_eh_context *ehc = &dev->ap->eh_context;
+	struct ata_eh_context *ehc = &dev->link->eh_context;
 
 	return ehc->i.action | ehc->i.dev_action[dev->devno];
 }
 
-static void ata_eh_clear_action(struct ata_device *dev,
+static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
 				struct ata_eh_info *ehi, unsigned int action)
 {
-	int i;
+	struct ata_device *tdev;
 
 	if (!dev) {
 		ehi->action &= ~action;
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			ehi->dev_action[i] &= ~action;
+		ata_link_for_each_dev(tdev, link)
+			ehi->dev_action[tdev->devno] &= ~action;
 	} else {
 		/* doesn't make sense for port-wide EH actions */
 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
 
 		/* break ehi->action into ehi->dev_action */
 		if (ehi->action & action) {
-			for (i = 0; i < ATA_MAX_DEVICES; i++)
-				ehi->dev_action[i] |= ehi->action & action;
+			ata_link_for_each_dev(tdev, link)
+				ehi->dev_action[tdev->devno] |=
+					ehi->action & action;
 			ehi->action &= ~action;
 		}
 
@@ -195,7 +329,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
 
 	ret = EH_HANDLED;
 	spin_lock_irqsave(ap->lock, flags);
-	qc = ata_qc_from_tag(ap, ap->active_tag);
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
 	if (qc) {
 		WARN_ON(qc->scsicmd != cmd);
 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
@@ -224,7 +358,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
 void ata_scsi_error(struct Scsi_Host *host)
 {
 	struct ata_port *ap = ata_shost_to_port(host);
-	int i, repeat_cnt = ATA_EH_MAX_REPEAT;
+	int i;
 	unsigned long flags;
 
 	DPRINTK("ENTER\n");
@@ -290,24 +424,35 @@ void ata_scsi_error(struct Scsi_Host *host)
 			__ata_port_freeze(ap);
 
 		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* initialize eh_tries */
+		ap->eh_tries = ATA_EH_MAX_TRIES;
 	} else
 		spin_unlock_wait(ap->lock);
 
  repeat:
 	/* invoke error handler */
 	if (ap->ops->error_handler) {
+		struct ata_link *link;
+
+		/* kill fast drain timer */
+		del_timer_sync(&ap->fastdrain_timer);
+
 		/* process port resume request */
 		ata_eh_handle_port_resume(ap);
 
 		/* fetch & clear EH info */
 		spin_lock_irqsave(ap->lock, flags);
 
-		memset(&ap->eh_context, 0, sizeof(ap->eh_context));
-		ap->eh_context.i = ap->eh_info;
-		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+		__ata_port_for_each_link(link, ap) {
+			memset(&link->eh_context, 0, sizeof(link->eh_context));
+			link->eh_context.i = link->eh_info;
+			memset(&link->eh_info, 0, sizeof(link->eh_info));
+		}
 
 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
 
 		spin_unlock_irqrestore(ap->lock, flags);
 
@@ -327,20 +472,18 @@ void ata_scsi_error(struct Scsi_Host *host)
 		spin_lock_irqsave(ap->lock, flags);
 
 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
-			if (--repeat_cnt) {
-				ata_port_printk(ap, KERN_INFO,
-					"EH pending after completion, "
-					"repeating EH (cnt=%d)\n", repeat_cnt);
+			if (--ap->eh_tries) {
 				spin_unlock_irqrestore(ap->lock, flags);
 				goto repeat;
 			}
 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
-					"tries, giving up\n", ATA_EH_MAX_REPEAT);
+					"tries, giving up\n", ATA_EH_MAX_TRIES);
 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
 		}
 
 		/* this run is complete, make sure EH info is clear */
-		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
+		__ata_port_for_each_link(link, ap)
+			memset(&link->eh_info, 0, sizeof(link->eh_info));
 
 		/* Clear host_eh_scheduled while holding ap->lock such
 		 * that if exception occurs after this point but
@@ -351,7 +494,7 @@ void ata_scsi_error(struct Scsi_Host *host)
 
 		spin_unlock_irqrestore(ap->lock, flags);
 	} else {
-		WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
+		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
 		ap->ops->eng_timeout(ap);
 	}
 
@@ -416,99 +559,92 @@ void ata_port_wait_eh(struct ata_port *ap)
 	}
 }
 
-/**
- *	ata_qc_timeout - Handle timeout of queued command
- *	@qc: Command that timed out
- *
- *	Some part of the kernel (currently, only the SCSI layer)
- *	has noticed that the active command on port @ap has not
- *	completed after a specified length of time.  Handle this
- *	condition by disabling DMA (if necessary) and completing
- *	transactions, with error if necessary.
- *
- *	This also handles the case of the "lost interrupt", where
- *	for some reason (possibly hardware bug, possibly driver bug)
- *	an interrupt was not delivered to the driver, even though the
- *	transaction completed successfully.
- *
- *	TODO: kill this function once old EH is gone.
- *
- *	LOCKING:
- *	Inherited from SCSI layer (none, can sleep)
- */
-static void ata_qc_timeout(struct ata_queued_cmd *qc)
+static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
-	struct ata_port *ap = qc->ap;
-	u8 host_stat = 0, drv_stat;
-	unsigned long flags;
+	unsigned int tag;
+	int nr = 0;
 
-	DPRINTK("ENTER\n");
-
-	ap->hsm_task_state = HSM_ST_IDLE;
-
-	spin_lock_irqsave(ap->lock, flags);
+	/* count only non-internal commands */
+	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
+		if (ata_qc_from_tag(ap, tag))
+			nr++;
 
-	switch (qc->tf.protocol) {
+	return nr;
+}
 
-	case ATA_PROT_DMA:
-	case ATA_PROT_ATAPI_DMA:
-		host_stat = ap->ops->bmdma_status(ap);
+void ata_eh_fastdrain_timerfn(unsigned long arg)
+{
+	struct ata_port *ap = (void *)arg;
+	unsigned long flags;
+	int cnt;
 
-		/* before we do anything else, clear DMA-Start bit */
-		ap->ops->bmdma_stop(qc);
+	spin_lock_irqsave(ap->lock, flags);
 
-		/* fall through */
+	cnt = ata_eh_nr_in_flight(ap);
 
-	default:
-		ata_altstatus(ap);
-		drv_stat = ata_chk_status(ap);
+	/* are we done? */
+	if (!cnt)
+		goto out_unlock;
 
-		/* ack bmdma irq events */
-		ap->ops->irq_clear(ap);
+	if (cnt == ap->fastdrain_cnt) {
+		unsigned int tag;
 
-		ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
-			       "stat 0x%x host_stat 0x%x\n",
-			       qc->tf.command, drv_stat, host_stat);
+		/* No progress during the last interval, tag all
+		 * in-flight qcs as timed out and freeze the port.
+		 */
+		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
+			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+			if (qc)
+				qc->err_mask |= AC_ERR_TIMEOUT;
+		}
 
-		/* complete taskfile transaction */
-		qc->err_mask |= AC_ERR_TIMEOUT;
-		break;
+		ata_port_freeze(ap);
+	} else {
+		/* some qcs have finished, give it another chance */
+		ap->fastdrain_cnt = cnt;
+		ap->fastdrain_timer.expires =
+			jiffies + ATA_EH_FASTDRAIN_INTERVAL;
+		add_timer(&ap->fastdrain_timer);
 	}
 
+ out_unlock:
 	spin_unlock_irqrestore(ap->lock, flags);
-
-	ata_eh_qc_complete(qc);
-
-	DPRINTK("EXIT\n");
 }
 
 /**
- *	ata_eng_timeout - Handle timeout of queued command
- *	@ap: Port on which timed-out command is active
- *
- *	Some part of the kernel (currently, only the SCSI layer)
- *	has noticed that the active command on port @ap has not
- *	completed after a specified length of time.  Handle this
- *	condition by disabling DMA (if necessary) and completing
- *	transactions, with error if necessary.
- *
- *	This also handles the case of the "lost interrupt", where
- *	for some reason (possibly hardware bug, possibly driver bug)
- *	an interrupt was not delivered to the driver, even though the
- *	transaction completed successfully.
+ *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
+ *	@ap: target ATA port
+ *	@fastdrain: activate fast drain
  *
- *	TODO: kill this function once old EH is gone.
+ *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
+ *	is non-zero and EH wasn't pending before.  Fast drain ensures
+ *	that EH kicks in in timely manner.
  *
  *	LOCKING:
- *	Inherited from SCSI layer (none, can sleep)
+ *	spin_lock_irqsave(host lock)
  */
-void ata_eng_timeout(struct ata_port *ap)
+static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 {
-	DPRINTK("ENTER\n");
+	int cnt;
 
-	ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
+	/* already scheduled? */
+	if (ap->pflags & ATA_PFLAG_EH_PENDING)
+		return;
 
-	DPRINTK("EXIT\n");
+	ap->pflags |= ATA_PFLAG_EH_PENDING;
+
+	if (!fastdrain)
+		return;
+
+	/* do we have in-flight qcs? */
+	cnt = ata_eh_nr_in_flight(ap);
+	if (!cnt)
+		return;
+
+	/* activate fast drain */
+	ap->fastdrain_cnt = cnt;
+	ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL;
+	add_timer(&ap->fastdrain_timer);
 }
 
 /**
@@ -528,7 +664,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 	WARN_ON(!ap->ops->error_handler);
 
 	qc->flags |= ATA_QCFLAG_FAILED;
-	qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
+	ata_eh_set_pending(ap, 1);
 
 	/* The following will fail if timeout has already expired.
 	 * ata_scsi_error() takes care of such scmds on EH entry.
@@ -555,34 +691,25 @@ void ata_port_schedule_eh(struct ata_port *ap)
 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
 		return;
 
-	ap->pflags |= ATA_PFLAG_EH_PENDING;
+	ata_eh_set_pending(ap, 1);
 	scsi_schedule_eh(ap->scsi_host);
 
 	DPRINTK("port EH scheduled\n");
 }
 
-/**
- *	ata_port_abort - abort all qc's on the port
- *	@ap: ATA port to abort qc's for
- *
- *	Abort all active qc's of @ap and schedule EH.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- *	RETURNS:
- *	Number of aborted qc's.
- */
-int ata_port_abort(struct ata_port *ap)
+static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
 {
 	int tag, nr_aborted = 0;
 
 	WARN_ON(!ap->ops->error_handler);
 
+	/* we're gonna abort all commands, no need for fast drain */
+	ata_eh_set_pending(ap, 0);
+
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
 
-		if (qc) {
+		if (qc && (!link || qc->dev->link == link)) {
 			qc->flags |= ATA_QCFLAG_FAILED;
 			ata_qc_complete(qc);
 			nr_aborted++;
@@ -596,6 +723,40 @@ int ata_port_abort(struct ata_port *ap)
 }
 
 /**
+ *	ata_link_abort - abort all qc's on the link
+ *	@link: ATA link to abort qc's for
+ *
+ *	Abort all active qc's active on @link and schedule EH.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	Number of aborted qc's.
+ */
+int ata_link_abort(struct ata_link *link)
+{
+	return ata_do_link_abort(link->ap, link);
+}
+
+/**
+ *	ata_port_abort - abort all qc's on the port
+ *	@ap: ATA port to abort qc's for
+ *
+ *	Abort all active qc's of @ap and schedule EH.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Number of aborted qc's.
+ */
+int ata_port_abort(struct ata_port *ap)
+{
+	return ata_do_link_abort(ap, NULL);
+}
+
+/**
  *	__ata_port_freeze - freeze port
  *	@ap: ATA port to freeze
  *
@@ -650,6 +811,79 @@ int ata_port_freeze(struct ata_port *ap)
 }
 
 /**
+ *	sata_async_notification - SATA async notification handler
+ *	@ap: ATA port where async notification is received
+ *
+ *	Handler to be called when async notification via SDB FIS is
+ *	received.  This function schedules EH if necessary.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	1 if EH is scheduled, 0 otherwise.
+ */
+int sata_async_notification(struct ata_port *ap)
+{
+	u32 sntf;
+	int rc;
+
+	if (!(ap->flags & ATA_FLAG_AN))
+		return 0;
+
+	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+	if (rc == 0)
+		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+	if (!ap->nr_pmp_links || rc) {
+		/* PMP is not attached or SNTF is not available */
+		if (!ap->nr_pmp_links) {
+			/* PMP is not attached.  Check whether ATAPI
+			 * AN is configured.  If so, notify media
+			 * change.
+			 */
+			struct ata_device *dev = ap->link.device;
+
+			if ((dev->class == ATA_DEV_ATAPI) &&
+			    (dev->flags & ATA_DFLAG_AN))
+				ata_scsi_media_change_notify(dev);
+			return 0;
+		} else {
+			/* PMP is attached but SNTF is not available.
+			 * ATAPI async media change notification is
+			 * not used.  The PMP must be reporting PHY
+			 * status change, schedule EH.
+			 */
+			ata_port_schedule_eh(ap);
+			return 1;
+		}
+	} else {
+		/* PMP is attached and SNTF is available */
+		struct ata_link *link;
+
+		/* check and notify ATAPI AN */
+		ata_port_for_each_link(link, ap) {
+			if (!(sntf & (1 << link->pmp)))
+				continue;
+
+			if ((link->device->class == ATA_DEV_ATAPI) &&
+			    (link->device->flags & ATA_DFLAG_AN))
+				ata_scsi_media_change_notify(link->device);
+		}
+
+		/* If PMP is reporting that PHY status of some
+		 * downstream ports has changed, schedule EH.
+		 */
+		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
+			ata_port_schedule_eh(ap);
+			return 1;
+		}
+
+		return 0;
+	}
+}
+
+/**
  *	ata_eh_freeze_port - EH helper to freeze port
  *	@ap: ATA port to freeze
  *
@@ -760,9 +994,10 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
  *	LOCKING:
  *	None.
  */
-static void ata_eh_detach_dev(struct ata_device *dev)
+void ata_eh_detach_dev(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
 	unsigned long flags;
 
 	ata_dev_disable(dev);
@@ -777,31 +1012,32 @@ static void ata_eh_detach_dev(struct ata_device *dev)
 	}
 
 	/* clear per-dev EH actions */
-	ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
-	ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
+	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
+	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
 
 	spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
  *	ata_eh_about_to_do - about to perform eh_action
- *	@ap: target ATA port
+ *	@link: target ATA link
  *	@dev: target ATA dev for per-dev action (can be NULL)
  *	@action: action about to be performed
  *
  *	Called just before performing EH actions to clear related bits
- *	in @ap->eh_info such that eh actions are not unnecessarily
+ *	in @link->eh_info such that eh actions are not unnecessarily
  *	repeated.
  *
  *	LOCKING:
  *	None.
  */
-static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
-			       unsigned int action)
+void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+			unsigned int action)
 {
+	struct ata_port *ap = link->ap;
+	struct ata_eh_info *ehi = &link->eh_info;
+	struct ata_eh_context *ehc = &link->eh_context;
 	unsigned long flags;
-	struct ata_eh_info *ehi = &ap->eh_info;
-	struct ata_eh_context *ehc = &ap->eh_context;
 
 	spin_lock_irqsave(ap->lock, flags);
 
@@ -818,7 +1054,7 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
 		ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
 	}
 
-	ata_eh_clear_action(dev, ehi, action);
+	ata_eh_clear_action(link, dev, ehi, action);
 
 	if (!(ehc->i.flags & ATA_EHI_QUIET))
 		ap->pflags |= ATA_PFLAG_RECOVERED;
@@ -828,26 +1064,28 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
 
 /**
  *	ata_eh_done - EH action complete
- *	@ap: target ATA port
+*	@ap: target ATA port
  *	@dev: target ATA dev for per-dev action (can be NULL)
  *	@action: action just completed
  *
  *	Called right after performing EH actions to clear related bits
- *	in @ap->eh_context.
+ *	in @link->eh_context.
  *
  *	LOCKING:
  *	None.
  */
-static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
-			unsigned int action)
+void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+		 unsigned int action)
 {
+	struct ata_eh_context *ehc = &link->eh_context;
+
 	/* if reset is complete, clear all reset actions & reset modifier */
 	if (action & ATA_EH_RESET_MASK) {
 		action |= ATA_EH_RESET_MASK;
-		ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
+		ehc->i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
 	}
 
-	ata_eh_clear_action(dev, &ap->eh_context.i, action);
+	ata_eh_clear_action(link, dev, &ehc->i, action);
 }
 
 /**
@@ -864,7 +1102,7 @@ static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
  *	RETURNS:
  *	Descriptive string for @err_mask
  */
-static const char * ata_err_string(unsigned int err_mask)
+static const char *ata_err_string(unsigned int err_mask)
 {
 	if (err_mask & AC_ERR_HOST_BUS)
 		return "host bus error";
@@ -917,7 +1155,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev,
 	tf.protocol = ATA_PROT_PIO;
 
 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
-				     buf, sectors * ATA_SECT_SIZE);
+				     buf, sectors * ATA_SECT_SIZE, 0);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -941,7 +1179,7 @@ static unsigned int ata_read_log_page(struct ata_device *dev,
 static int ata_eh_read_log_10h(struct ata_device *dev,
 			       int *tag, struct ata_taskfile *tf)
 {
-	u8 *buf = dev->ap->sector_buf;
+	u8 *buf = dev->link->ap->sector_buf;
 	unsigned int err_mask;
 	u8 csum;
 	int i;
@@ -995,7 +1233,7 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
 {
 	struct ata_device *dev = qc->dev;
 	unsigned char *sense_buf = qc->scsicmd->sense_buffer;
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	struct ata_taskfile tf;
 	u8 cdb[ATAPI_CDB_LEN];
 
@@ -1026,17 +1264,17 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
 		tf.feature |= ATAPI_PKT_DMA;
 	} else {
 		tf.protocol = ATA_PROT_ATAPI;
-		tf.lbam = (8 * 1024) & 0xff;
-		tf.lbah = (8 * 1024) >> 8;
+		tf.lbam = SCSI_SENSE_BUFFERSIZE;
+		tf.lbah = 0;
 	}
 
 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-				 sense_buf, SCSI_SENSE_BUFFERSIZE);
+				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
 }
 
 /**
  *	ata_eh_analyze_serror - analyze SError for a failed port
- *	@ap: ATA port to analyze SError for
+ *	@link: ATA link to analyze SError for
  *
  *	Analyze SError if available and further determine cause of
  *	failure.
@@ -1044,11 +1282,12 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc)
  *	LOCKING:
  *	None.
  */
-static void ata_eh_analyze_serror(struct ata_port *ap)
+static void ata_eh_analyze_serror(struct ata_link *link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_eh_context *ehc = &link->eh_context;
 	u32 serror = ehc->i.serror;
 	unsigned int err_mask = 0, action = 0;
+	u32 hotplug_mask;
 
 	if (serror & SERR_PERSISTENT) {
 		err_mask |= AC_ERR_ATA_BUS;
@@ -1067,7 +1306,20 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
 		err_mask |= AC_ERR_SYSTEM;
 		action |= ATA_EH_HARDRESET;
 	}
-	if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
+
+	/* Determine whether a hotplug event has occurred.  Both
+	 * SError.N/X are considered hotplug events for enabled or
+	 * host links.  For disabled PMP links, only N bit is
+	 * considered as X bit is left at 1 for link plugging.
+	 */
+	hotplug_mask = 0;
+
+	if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
+		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
+	else
+		hotplug_mask = SERR_PHYRDY_CHG;
+
+	if (serror & hotplug_mask)
 		ata_ehi_hotplugged(&ehc->i);
 
 	ehc->i.err_mask |= err_mask;
@@ -1076,7 +1328,7 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
 
 /**
  *	ata_eh_analyze_ncq_error - analyze NCQ error
- *	@ap: ATA port to analyze NCQ error for
+ *	@link: ATA link to analyze NCQ error for
  *
  *	Read log page 10h, determine the offending qc and acquire
  *	error status TF.  For NCQ device errors, all LLDDs have to do
@@ -1086,10 +1338,11 @@ static void ata_eh_analyze_serror(struct ata_port *ap)
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-static void ata_eh_analyze_ncq_error(struct ata_port *ap)
+static void ata_eh_analyze_ncq_error(struct ata_link *link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
-	struct ata_device *dev = ap->device;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev = link->device;
 	struct ata_queued_cmd *qc;
 	struct ata_taskfile tf;
 	int tag, rc;
@@ -1099,7 +1352,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
 		return;
 
 	/* is it NCQ device error? */
-	if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
 		return;
 
 	/* has LLDD analyzed already? */
@@ -1116,13 +1369,13 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
 	/* okay, this error is ours */
 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
 	if (rc) {
-		ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
+		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
 				"(errno=%d)\n", rc);
 		return;
 	}
 
-	if (!(ap->sactive & (1 << tag))) {
-		ata_port_printk(ap, KERN_ERR, "log page 10h reported "
+	if (!(link->sactive & (1 << tag))) {
+		ata_link_printk(link, KERN_ERR, "log page 10h reported "
 				"inactive tag %d\n", tag);
 		return;
 	}
@@ -1130,7 +1383,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
 	/* we've got the perpetrator, condemn it */
 	qc = __ata_qc_from_tag(ap, tag);
 	memcpy(&qc->result_tf, &tf, sizeof(tf));
-	qc->err_mask |= AC_ERR_DEV;
+	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
 	ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1337,7 +1590,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
 	/* speed down? */
 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
 		/* speed down SATA link speed if possible */
-		if (sata_down_spd_limit(dev->ap) == 0) {
+		if (sata_down_spd_limit(dev->link) == 0) {
 			action |= ATA_EH_HARDRESET;
 			goto done;
 		}
@@ -1368,7 +1621,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
 	 * SATA.  Consider it only for PATA.
 	 */
 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
-	    (dev->ap->cbl != ATA_CBL_SATA) &&
+	    (dev->link->ap->cbl != ATA_CBL_SATA) &&
 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
 			dev->spdn_cnt = 0;
@@ -1385,19 +1638,21 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io,
 }
 
 /**
- *	ata_eh_autopsy - analyze error and determine recovery action
- *	@ap: ATA port to perform autopsy on
+ *	ata_eh_link_autopsy - analyze error and determine recovery action
+ *	@link: host link to perform autopsy on
  *
- *	Analyze why @ap failed and determine which recovery action is
- *	needed.  This function also sets more detailed AC_ERR_* values
- *	and fills sense data for ATAPI CHECK SENSE.
+ *	Analyze why @link failed and determine which recovery actions
+ *	are needed.  This function also sets more detailed AC_ERR_*
+ *	values and fills sense data for ATAPI CHECK SENSE.
  *
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-static void ata_eh_autopsy(struct ata_port *ap)
+static void ata_eh_link_autopsy(struct ata_link *link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev;
 	unsigned int all_err_mask = 0;
 	int tag, is_io = 0;
 	u32 serror;
@@ -1409,15 +1664,19 @@ static void ata_eh_autopsy(struct ata_port *ap)
 		return;
 
 	/* obtain and analyze SError */
-	rc = sata_scr_read(ap, SCR_ERROR, &serror);
+	rc = sata_scr_read(link, SCR_ERROR, &serror);
 	if (rc == 0) {
 		ehc->i.serror |= serror;
-		ata_eh_analyze_serror(ap);
-	} else if (rc != -EOPNOTSUPP)
+		ata_eh_analyze_serror(link);
+	} else if (rc != -EOPNOTSUPP) {
+		/* SError read failed, force hardreset and probing */
+		ata_ehi_schedule_probe(&ehc->i);
 		ehc->i.action |= ATA_EH_HARDRESET;
+		ehc->i.err_mask |= AC_ERR_OTHER;
+	}
 
 	/* analyze NCQ failure */
-	ata_eh_analyze_ncq_error(ap);
+	ata_eh_analyze_ncq_error(link);
 
 	/* any real error trumps AC_ERR_OTHER */
 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
@@ -1428,7 +1687,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
 
-		if (!(qc->flags & ATA_QCFLAG_FAILED))
+		if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
 			continue;
 
 		/* inherit upper level err_mask */
@@ -1447,10 +1706,8 @@ static void ata_eh_autopsy(struct ata_port *ap)
 			qc->err_mask &= ~AC_ERR_OTHER;
 
 		/* SENSE_VALID trumps dev/unknown error and revalidation */
-		if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
-			ehc->i.action &= ~ATA_EH_REVALIDATE;
-		}
 
 		/* accumulate error info */
 		ehc->i.dev = qc->dev;
@@ -1463,39 +1720,69 @@ static void ata_eh_autopsy(struct ata_port *ap)
 	if (ap->pflags & ATA_PFLAG_FROZEN ||
 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
 		ehc->i.action |= ATA_EH_SOFTRESET;
-	else if (all_err_mask)
+	else if ((is_io && all_err_mask) ||
+		 (!is_io && (all_err_mask & ~AC_ERR_DEV)))
 		ehc->i.action |= ATA_EH_REVALIDATE;
 
-	/* if we have offending qcs and the associated failed device */
+	/* If we have offending qcs and the associated failed device,
+	 * perform per-dev EH action only on the offending device.
+	 */
 	if (ehc->i.dev) {
-		/* speed down */
-		ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
-						   all_err_mask);
-
-		/* perform per-dev EH action only on the offending device */
 		ehc->i.dev_action[ehc->i.dev->devno] |=
 			ehc->i.action & ATA_EH_PERDEV_MASK;
 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
 	}
 
+	/* consider speeding down */
+	dev = ehc->i.dev;
+	if (!dev && ata_link_max_devices(link) == 1 &&
+	    ata_dev_enabled(link->device))
+		dev = link->device;
+
+	if (dev)
+		ehc->i.action |= ata_eh_speed_down(dev, is_io, all_err_mask);
+
 	DPRINTK("EXIT\n");
 }
 
 /**
- *	ata_eh_report - report error handling to user
- *	@ap: ATA port EH is going on
+ *	ata_eh_autopsy - analyze error and determine recovery action
+ *	@ap: host port to perform autopsy on
+ *
+ *	Analyze all links of @ap and determine why they failed and
+ *	which recovery actions are needed.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void ata_eh_autopsy(struct ata_port *ap)
+{
+	struct ata_link *link;
+
+	__ata_port_for_each_link(link, ap)
+		ata_eh_link_autopsy(link);
+}
+
+/**
+ *	ata_eh_link_report - report error handling to user
+ *	@link: ATA link EH is going on
  *
  *	Report EH to user.
  *
  *	LOCKING:
  *	None.
  */
-static void ata_eh_report(struct ata_port *ap)
+static void ata_eh_link_report(struct ata_link *link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
 	const char *frozen, *desc;
+	char tries_buf[6];
 	int tag, nr_failed = 0;
 
+	if (ehc->i.flags & ATA_EHI_QUIET)
+		return;
+
 	desc = NULL;
 	if (ehc->i.desc[0] != '\0')
 		desc = ehc->i.desc;
@@ -1503,7 +1790,9 @@ static void ata_eh_report(struct ata_port *ap)
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
 
-		if (!(qc->flags & ATA_QCFLAG_FAILED))
+		if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link ||
+		    ((qc->flags & ATA_QCFLAG_QUIET) &&
+		     qc->err_mask == AC_ERR_DEV))
 			continue;
 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
 			continue;
@@ -1518,63 +1807,157 @@ static void ata_eh_report(struct ata_port *ap)
 	if (ap->pflags & ATA_PFLAG_FROZEN)
 		frozen = " frozen";
 
+	memset(tries_buf, 0, sizeof(tries_buf));
+	if (ap->eh_tries < ATA_EH_MAX_TRIES)
+		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
+			 ap->eh_tries);
+
 	if (ehc->i.dev) {
 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
-			       "SAct 0x%x SErr 0x%x action 0x%x%s\n",
-			       ehc->i.err_mask, ap->sactive, ehc->i.serror,
-			       ehc->i.action, frozen);
+			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+			       ehc->i.err_mask, link->sactive, ehc->i.serror,
+			       ehc->i.action, frozen, tries_buf);
 		if (desc)
-			ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
+			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
 	} else {
-		ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
-				"SAct 0x%x SErr 0x%x action 0x%x%s\n",
-				ehc->i.err_mask, ap->sactive, ehc->i.serror,
-				ehc->i.action, frozen);
+		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
+				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+				ehc->i.err_mask, link->sactive, ehc->i.serror,
+				ehc->i.action, frozen, tries_buf);
 		if (desc)
-			ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
+			ata_link_printk(link, KERN_ERR, "%s\n", desc);
 	}
 
+	if (ehc->i.serror)
+		ata_port_printk(ap, KERN_ERR,
+		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
+		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
+		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
+		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
+		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
+		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
+		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
+		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
+		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
+		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
+		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
+		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
+		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
+		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
+		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
+		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
+		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
+		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+
 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-		static const char *dma_str[] = {
-			[DMA_BIDIRECTIONAL]	= "bidi",
-			[DMA_TO_DEVICE]		= "out",
-			[DMA_FROM_DEVICE]	= "in",
-			[DMA_NONE]		= "",
-		};
 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
+		const u8 *cdb = qc->cdb;
+		char data_buf[20] = "";
+		char cdb_buf[70] = "";
 
-		if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
+		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+		    qc->dev->link != link || !qc->err_mask)
 			continue;
 
+		if (qc->dma_dir != DMA_NONE) {
+			static const char *dma_str[] = {
+				[DMA_BIDIRECTIONAL]	= "bidi",
+				[DMA_TO_DEVICE]		= "out",
+				[DMA_FROM_DEVICE]	= "in",
+			};
+			static const char *prot_str[] = {
+				[ATA_PROT_PIO]		= "pio",
+				[ATA_PROT_DMA]		= "dma",
+				[ATA_PROT_NCQ]		= "ncq",
+				[ATA_PROT_ATAPI]	= "pio",
+				[ATA_PROT_ATAPI_DMA]	= "dma",
+			};
+
+			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
+				 prot_str[qc->tf.protocol], qc->nbytes,
+				 dma_str[qc->dma_dir]);
+		}
+
+		if (is_atapi_taskfile(&qc->tf))
+			snprintf(cdb_buf, sizeof(cdb_buf),
+				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
+				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
+				 cdb[0], cdb[1], cdb[2], cdb[3],
+				 cdb[4], cdb[5], cdb[6], cdb[7],
+				 cdb[8], cdb[9], cdb[10], cdb[11],
+				 cdb[12], cdb[13], cdb[14], cdb[15]);
+
 		ata_dev_printk(qc->dev, KERN_ERR,
 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
-			"tag %d cdb 0x%x data %u %s\n         "
+			"tag %d%s\n         %s"
 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
-			"Emask 0x%x (%s)\n",
+			"Emask 0x%x (%s)%s\n",
 			cmd->command, cmd->feature, cmd->nsect,
 			cmd->lbal, cmd->lbam, cmd->lbah,
 			cmd->hob_feature, cmd->hob_nsect,
 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
-			cmd->device, qc->tag, qc->cdb[0], qc->nbytes,
-			dma_str[qc->dma_dir],
+			cmd->device, qc->tag, data_buf, cdb_buf,
 			res->command, res->feature, res->nsect,
 			res->lbal, res->lbam, res->lbah,
 			res->hob_feature, res->hob_nsect,
 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
-			res->device, qc->err_mask, ata_err_string(qc->err_mask));
+			res->device, qc->err_mask, ata_err_string(qc->err_mask),
+			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
+
+		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+				    ATA_ERR)) {
+			if (res->command & ATA_BUSY)
+				ata_dev_printk(qc->dev, KERN_ERR,
+				  "status: { Busy }\n");
+			else
+				ata_dev_printk(qc->dev, KERN_ERR,
+				  "status: { %s%s%s%s}\n",
+				  res->command & ATA_DRDY ? "DRDY " : "",
+				  res->command & ATA_DF ? "DF " : "",
+				  res->command & ATA_DRQ ? "DRQ " : "",
+				  res->command & ATA_ERR ? "ERR " : "");
+		}
+
+		if (cmd->command != ATA_CMD_PACKET &&
+		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
+				     ATA_ABORTED)))
+			ata_dev_printk(qc->dev, KERN_ERR,
+			  "error: { %s%s%s%s}\n",
+			  res->feature & ATA_ICRC ? "ICRC " : "",
+			  res->feature & ATA_UNC ? "UNC " : "",
+			  res->feature & ATA_IDNF ? "IDNF " : "",
+			  res->feature & ATA_ABORTED ? "ABRT " : "");
 	}
 }
 
-static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
+/**
+ *	ata_eh_report - report error handling to user
+ *	@ap: ATA port to report EH about
+ *
+ *	Report EH to user.
+ *
+ *	LOCKING:
+ *	None.
+ */
+void ata_eh_report(struct ata_port *ap)
+{
+	struct ata_link *link;
+
+	__ata_port_for_each_link(link, ap)
+		ata_eh_link_report(link);
+}
+
+static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
 			unsigned int *classes, unsigned long deadline)
 {
-	int i, rc;
+	struct ata_device *dev;
+	int rc;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		classes[i] = ATA_DEV_UNKNOWN;
+	ata_link_for_each_dev(dev, link)
+		classes[dev->devno] = ATA_DEV_UNKNOWN;
 
-	rc = reset(ap, classes, deadline);
+	rc = reset(link, classes, deadline);
 	if (rc)
 		return rc;
 
@@ -1582,73 +1965,111 @@ static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
 	 * is complete and convert all ATA_DEV_UNKNOWN to
 	 * ATA_DEV_NONE.
 	 */
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		if (classes[i] != ATA_DEV_UNKNOWN)
+	ata_link_for_each_dev(dev, link)
+		if (classes[dev->devno] != ATA_DEV_UNKNOWN)
 			break;
 
-	if (i < ATA_MAX_DEVICES)
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			if (classes[i] == ATA_DEV_UNKNOWN)
-				classes[i] = ATA_DEV_NONE;
+	if (dev) {
+		ata_link_for_each_dev(dev, link) {
+			if (classes[dev->devno] == ATA_DEV_UNKNOWN)
+				classes[dev->devno] = ATA_DEV_NONE;
+		}
+	}
 
 	return 0;
 }
 
-static int ata_eh_followup_srst_needed(int rc, int classify,
+static int ata_eh_followup_srst_needed(struct ata_link *link,
+				       int rc, int classify,
 				       const unsigned int *classes)
 {
+	if (link->flags & ATA_LFLAG_NO_SRST)
+		return 0;
 	if (rc == -EAGAIN)
 		return 1;
 	if (rc != 0)
 		return 0;
-	if (classify && classes[0] == ATA_DEV_UNKNOWN)
+	if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link))
+		return 1;
+	if (classify && !(link->flags & ATA_LFLAG_ASSUME_CLASS) &&
+	    classes[0] == ATA_DEV_UNKNOWN)
 		return 1;
 	return 0;
 }
 
-static int ata_eh_reset(struct ata_port *ap, int classify,
-			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
-			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+int ata_eh_reset(struct ata_link *link, int classify,
+		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts);
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
 	unsigned int *classes = ehc->classes;
+	unsigned int lflags = link->flags;
 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
 	int try = 0;
-	unsigned long deadline;
-	unsigned int action;
+	struct ata_device *dev;
+	unsigned long deadline, now;
+	unsigned int tmp_action;
 	ata_reset_fn_t reset;
-	int i, rc;
+	unsigned long flags;
+	u32 sstatus;
+	int rc;
 
 	/* about to reset */
-	ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags |= ATA_PFLAG_RESETTING;
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+
+	ata_link_for_each_dev(dev, link) {
+		/* If we issue an SRST then an ATA drive (not ATAPI)
+		 * may change configuration and be in PIO0 timing. If
+		 * we do a hard reset (or are coming from power on)
+		 * this is true for ATA or ATAPI. Until we've set a
+		 * suitable controller mode we should not touch the
+		 * bus as we may be talking too fast.
+		 */
+		dev->pio_mode = XFER_PIO_0;
+
+		/* If the controller has a pio mode setup function
+		 * then use it to set the chipset to rights. Don't
+		 * touch the DMA setup as that will be dealt with when
+		 * configuring devices.
+		 */
+		if (ap->ops->set_piomode)
+			ap->ops->set_piomode(ap, dev);
+	}
 
 	/* Determine which reset to use and record in ehc->i.action.
 	 * prereset() may examine and modify it.
 	 */
-	action = ehc->i.action;
-	ehc->i.action &= ~ATA_EH_RESET_MASK;
-	if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
-					 !(action & ATA_EH_HARDRESET))))
-		ehc->i.action |= ATA_EH_SOFTRESET;
+	if (softreset && (!hardreset || (!(lflags & ATA_LFLAG_NO_SRST) &&
+					 !sata_set_spd_needed(link) &&
+					 !(ehc->i.action & ATA_EH_HARDRESET))))
+		tmp_action = ATA_EH_SOFTRESET;
 	else
-		ehc->i.action |= ATA_EH_HARDRESET;
+		tmp_action = ATA_EH_HARDRESET;
+
+	ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action;
 
 	if (prereset) {
-		rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT);
+		rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
 		if (rc) {
 			if (rc == -ENOENT) {
-				ata_port_printk(ap, KERN_DEBUG,
+				ata_link_printk(link, KERN_DEBUG,
 						"port disabled. ignoring.\n");
-				ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
+				ehc->i.action &= ~ATA_EH_RESET_MASK;
 
-				for (i = 0; i < ATA_MAX_DEVICES; i++)
-					classes[i] = ATA_DEV_NONE;
+				ata_link_for_each_dev(dev, link)
+					classes[dev->devno] = ATA_DEV_NONE;
 
 				rc = 0;
 			} else
-				ata_port_printk(ap, KERN_ERR,
+				ata_link_printk(link, KERN_ERR,
 					"prereset failed (errno=%d)\n", rc);
-			return rc;
+			goto out;
 		}
 	}
 
@@ -1659,9 +2080,10 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
 		reset = softreset;
 	else {
 		/* prereset told us not to reset, bang classes and return */
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			classes[i] = ATA_DEV_NONE;
-		return 0;
+		ata_link_for_each_dev(dev, link)
+			classes[dev->devno] = ATA_DEV_NONE;
+		rc = 0;
+		goto out;
 	}
 
 	/* did prereset() screw up?  if so, fix up to avoid oopsing */
@@ -1677,7 +2099,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
 
 	/* shut up during boot probing */
 	if (verbose)
-		ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
+		ata_link_printk(link, KERN_INFO, "%s resetting link\n",
 				reset == softreset ? "soft" : "hard");
 
 	/* mark that this EH session started with reset */
@@ -1686,78 +2108,118 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
 	else
 		ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
 
-	rc = ata_do_reset(ap, reset, classes, deadline);
+	rc = ata_do_reset(link, reset, classes, deadline);
 
 	if (reset == hardreset &&
-	    ata_eh_followup_srst_needed(rc, classify, classes)) {
+	    ata_eh_followup_srst_needed(link, rc, classify, classes)) {
 		/* okay, let's do follow-up softreset */
 		reset = softreset;
 
 		if (!reset) {
-			ata_port_printk(ap, KERN_ERR,
+			ata_link_printk(link, KERN_ERR,
 					"follow-up softreset required "
 					"but no softreset avaliable\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto fail;
 		}
 
-		ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
-		rc = ata_do_reset(ap, reset, classes, deadline);
-
-		if (rc == 0 && classify &&
-		    classes[0] == ATA_DEV_UNKNOWN) {
-			ata_port_printk(ap, KERN_ERR,
-					"classification failed\n");
-			return -EINVAL;
-		}
+		ata_eh_about_to_do(link, NULL, ATA_EH_RESET_MASK);
+		rc = ata_do_reset(link, reset, classes, deadline);
 	}
 
-	if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) {
-		unsigned long now = jiffies;
-
-		if (time_before(now, deadline)) {
-			unsigned long delta = deadline - jiffies;
-
-			ata_port_printk(ap, KERN_WARNING, "reset failed "
-				"(errno=%d), retrying in %u secs\n",
-				rc, (jiffies_to_msecs(delta) + 999) / 1000);
+	/* -EAGAIN can happen if we skipped followup SRST */
+	if (rc && rc != -EAGAIN)
+		goto fail;
 
-			schedule_timeout_uninterruptible(delta);
+	/* was classification successful? */
+	if (classify && classes[0] == ATA_DEV_UNKNOWN &&
+	    !(lflags & ATA_LFLAG_ASSUME_CLASS)) {
+		if (try < max_tries) {
+			ata_link_printk(link, KERN_WARNING,
+					"classification failed\n");
+			rc = -EINVAL;
+			goto fail;
 		}
 
-		if (reset == hardreset &&
-		    try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
-			sata_down_spd_limit(ap);
-		if (hardreset)
-			reset = hardreset;
-		goto retry;
+		ata_link_printk(link, KERN_WARNING,
+				"classfication failed, assuming ATA\n");
+		lflags |= ATA_LFLAG_ASSUME_ATA;
 	}
 
-	if (rc == 0) {
+	ata_link_for_each_dev(dev, link) {
 		/* After the reset, the device state is PIO 0 and the
-		 * controller state is undefined.  Record the mode.
+		 * controller state is undefined.  Reset also wakes up
+		 * drives from sleeping mode.
 		 */
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			ap->device[i].pio_mode = XFER_PIO_0;
+		dev->pio_mode = XFER_PIO_0;
+		dev->flags &= ~ATA_DFLAG_SLEEPING;
 
-		if (postreset)
-			postreset(ap, classes);
+		if (ata_link_offline(link))
+			continue;
 
-		/* reset successful, schedule revalidation */
-		ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
-		ehc->i.action |= ATA_EH_REVALIDATE;
+		/* apply class override and convert UNKNOWN to NONE */
+		if (lflags & ATA_LFLAG_ASSUME_ATA)
+			classes[dev->devno] = ATA_DEV_ATA;
+		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
+			classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
+		else if (classes[dev->devno] == ATA_DEV_UNKNOWN)
+			classes[dev->devno] = ATA_DEV_NONE;
 	}
 
+	/* record current link speed */
+	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
+		link->sata_spd = (sstatus >> 4) & 0xf;
+
+	if (postreset)
+		postreset(link, classes);
+
+	/* reset successful, schedule revalidation */
+	ata_eh_done(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+	ehc->i.action |= ATA_EH_REVALIDATE;
+
+	rc = 0;
+ out:
+	/* clear hotplug flag */
+	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->pflags &= ~ATA_PFLAG_RESETTING;
+	spin_unlock_irqrestore(ap->lock, flags);
+
 	return rc;
+
+ fail:
+	if (rc == -ERESTART || try >= max_tries)
+		goto out;
+
+	now = jiffies;
+	if (time_before(now, deadline)) {
+		unsigned long delta = deadline - now;
+
+		ata_link_printk(link, KERN_WARNING, "reset failed "
+				"(errno=%d), retrying in %u secs\n",
+				rc, (jiffies_to_msecs(delta) + 999) / 1000);
+
+		while (delta)
+			delta = schedule_timeout_uninterruptible(delta);
+	}
+
+	if (rc == -EPIPE || try == max_tries - 1)
+		sata_down_spd_limit(link);
+	if (hardreset)
+		reset = hardreset;
+	goto retry;
 }
 
-static int ata_eh_revalidate_and_attach(struct ata_port *ap,
+static int ata_eh_revalidate_and_attach(struct ata_link *link,
 					struct ata_device **r_failed_dev)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &link->eh_context;
 	struct ata_device *dev;
 	unsigned int new_mask = 0;
 	unsigned long flags;
-	int i, rc = 0;
+	int rc = 0;
 
 	DPRINTK("ENTER\n");
 
@@ -1765,27 +2227,28 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
 	 * be done backwards such that PDIAG- is released by the slave
 	 * device before the master device is identified.
 	 */
-	for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
-		unsigned int action, readid_flags = 0;
-
-		dev = &ap->device[i];
-		action = ata_eh_dev_action(dev);
+	ata_link_for_each_dev_reverse(dev, link) {
+		unsigned int action = ata_eh_dev_action(dev);
+		unsigned int readid_flags = 0;
 
 		if (ehc->i.flags & ATA_EHI_DID_RESET)
 			readid_flags |= ATA_READID_POSTRESET;
 
 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
-			if (ata_port_offline(ap)) {
+			WARN_ON(dev->class == ATA_DEV_PMP);
+
+			if (ata_link_offline(link)) {
 				rc = -EIO;
 				goto err;
 			}
 
-			ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
-			rc = ata_dev_revalidate(dev, readid_flags);
+			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
+			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
+						readid_flags);
 			if (rc)
 				goto err;
 
-			ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
+			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
 
 			/* Configuration may have changed, reconfigure
 			 * transfer mode.
@@ -1799,11 +2262,14 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
 			   ata_class_enabled(ehc->classes[dev->devno])) {
 			dev->class = ehc->classes[dev->devno];
 
-			rc = ata_dev_read_id(dev, &dev->class, readid_flags,
-					     dev->id);
+			if (dev->class == ATA_DEV_PMP)
+				rc = sata_pmp_attach(dev);
+			else
+				rc = ata_dev_read_id(dev, &dev->class,
+						     readid_flags, dev->id);
 			switch (rc) {
 			case 0:
-				new_mask |= 1 << i;
+				new_mask |= 1 << dev->devno;
 				break;
 			case -ENOENT:
 				/* IDENTIFY was issued to non-existent
@@ -1821,16 +2287,16 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
 	}
 
 	/* PDIAG- should have been released, ask cable type if post-reset */
-	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect)
+	if (ata_is_host_link(link) && ap->ops->cable_detect &&
+	    (ehc->i.flags & ATA_EHI_DID_RESET))
 		ap->cbl = ap->ops->cable_detect(ap);
 
 	/* Configure new devices forward such that user doesn't see
 	 * device detection messages backwards.
 	 */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
-
-		if (!(new_mask & (1 << i)))
+	ata_link_for_each_dev(dev, link) {
+		if (!(new_mask & (1 << dev->devno)) ||
+		    dev->class == ATA_DEV_PMP)
 			continue;
 
 		ehc->i.flags |= ATA_EHI_PRINTINFO;
@@ -1855,40 +2321,44 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
 	return rc;
 }
 
-static int ata_port_nr_enabled(struct ata_port *ap)
+static int ata_link_nr_enabled(struct ata_link *link)
 {
-	int i, cnt = 0;
+	struct ata_device *dev;
+	int cnt = 0;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		if (ata_dev_enabled(&ap->device[i]))
+	ata_link_for_each_dev(dev, link)
+		if (ata_dev_enabled(dev))
 			cnt++;
 	return cnt;
 }
 
-static int ata_port_nr_vacant(struct ata_port *ap)
+static int ata_link_nr_vacant(struct ata_link *link)
 {
-	int i, cnt = 0;
+	struct ata_device *dev;
+	int cnt = 0;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		if (ap->device[i].class == ATA_DEV_UNKNOWN)
+	ata_link_for_each_dev(dev, link)
+		if (dev->class == ATA_DEV_UNKNOWN)
 			cnt++;
 	return cnt;
 }
 
-static int ata_eh_skip_recovery(struct ata_port *ap)
+static int ata_eh_skip_recovery(struct ata_link *link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
-	int i;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev;
+
+	/* skip disabled links */
+	if (link->flags & ATA_LFLAG_DISABLED)
+		return 1;
 
 	/* thaw frozen port, resume link and recover failed devices */
-	if ((ap->pflags & ATA_PFLAG_FROZEN) ||
-	    (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
+	if ((link->ap->pflags & ATA_PFLAG_FROZEN) ||
+	    (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_link_nr_enabled(link))
 		return 0;
 
 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-
+	ata_link_for_each_dev(dev, link) {
 		if (dev->class == ATA_DEV_UNKNOWN &&
 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
 			return 0;
@@ -1897,6 +2367,60 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
 	return 1;
 }
 
+static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+{
+	struct ata_eh_context *ehc = &dev->link->eh_context;
+
+	ehc->tries[dev->devno]--;
+
+	switch (err) {
+	case -ENODEV:
+		/* device missing or wrong IDENTIFY data, schedule probing */
+		ehc->i.probe_mask |= (1 << dev->devno);
+	case -EINVAL:
+		/* give it just one more chance */
+		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+	case -EIO:
+		if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
+			/* This is the last chance, better to slow
+			 * down than lose it.
+			 */
+			sata_down_spd_limit(dev->link);
+			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		}
+	}
+
+	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
+		/* disable device if it has used up all its chances */
+		ata_dev_disable(dev);
+
+		/* detach if offline */
+		if (ata_link_offline(dev->link))
+			ata_eh_detach_dev(dev);
+
+		/* probe if requested */
+		if ((ehc->i.probe_mask & (1 << dev->devno)) &&
+		    !(ehc->did_probe_mask & (1 << dev->devno))) {
+			ata_eh_detach_dev(dev);
+			ata_dev_init(dev);
+
+			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+			ehc->did_probe_mask |= (1 << dev->devno);
+			ehc->i.action |= ATA_EH_SOFTRESET;
+		}
+
+		return 1;
+	} else {
+		/* soft didn't work?  be haaaaard */
+		if (ehc->i.flags & ATA_EHI_DID_RESET)
+			ehc->i.action |= ATA_EH_HARDRESET;
+		else
+			ehc->i.action |= ATA_EH_SOFTRESET;
+
+		return 0;
+	}
+}
+
 /**
  *	ata_eh_recover - recover host port after error
  *	@ap: host port to recover
@@ -1904,12 +2428,13 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
  *	@softreset: softreset method (can be NULL)
  *	@hardreset: hardreset method (can be NULL)
  *	@postreset: postreset method (can be NULL)
+ *	@r_failed_link: out parameter for failed link
  *
  *	This is the alpha and omega, eum and yang, heart and soul of
  *	libata exception handling.  On entry, actions required to
- *	recover the port and hotplug requests are recorded in
- *	eh_context.  This function executes all the operations with
- *	appropriate retrials and fallbacks to resurrect failed
+ *	recover each link and hotplug requests are recorded in the
+ *	link's eh_context.  This function executes all the operations
+ *	with appropriate retrials and fallbacks to resurrect failed
  *	devices, detach goners and greet newcomers.
  *
  *	LOCKING:
@@ -1918,146 +2443,174 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
  *	RETURNS:
  *	0 on success, -errno on failure.
  */
-static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
-			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
-			  ata_postreset_fn_t postreset)
+int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+		   ata_postreset_fn_t postreset,
+		   struct ata_link **r_failed_link)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_link *link;
 	struct ata_device *dev;
-	int i, rc;
+	int nr_failed_devs, nr_disabled_devs;
+	int reset, rc;
+	unsigned long flags;
 
 	DPRINTK("ENTER\n");
 
 	/* prep for recovery */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		dev = &ap->device[i];
-
-		ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
-
-		/* collect port action mask recorded in dev actions */
-		ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK;
-		ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK;
-
-		/* process hotplug request */
-		if (dev->flags & ATA_DFLAG_DETACH)
-			ata_eh_detach_dev(dev);
+	ata_port_for_each_link(link, ap) {
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		/* re-enable link? */
+		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
+			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
+			spin_lock_irqsave(ap->lock, flags);
+			link->flags &= ~ATA_LFLAG_DISABLED;
+			spin_unlock_irqrestore(ap->lock, flags);
+			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
+		}
 
-		if (!ata_dev_enabled(dev) &&
-		    ((ehc->i.probe_mask & (1 << dev->devno)) &&
-		     !(ehc->did_probe_mask & (1 << dev->devno)))) {
-			ata_eh_detach_dev(dev);
-			ata_dev_init(dev);
-			ehc->did_probe_mask |= (1 << dev->devno);
-			ehc->i.action |= ATA_EH_SOFTRESET;
+		ata_link_for_each_dev(dev, link) {
+			if (link->flags & ATA_LFLAG_NO_RETRY)
+				ehc->tries[dev->devno] = 1;
+			else
+				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+
+			/* collect port action mask recorded in dev actions */
+			ehc->i.action |= ehc->i.dev_action[dev->devno] &
+					 ~ATA_EH_PERDEV_MASK;
+			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
+
+			/* process hotplug request */
+			if (dev->flags & ATA_DFLAG_DETACH)
+				ata_eh_detach_dev(dev);
+
+			if (!ata_dev_enabled(dev) &&
+			    ((ehc->i.probe_mask & (1 << dev->devno)) &&
+			     !(ehc->did_probe_mask & (1 << dev->devno)))) {
+				ata_eh_detach_dev(dev);
+				ata_dev_init(dev);
+				ehc->did_probe_mask |= (1 << dev->devno);
+				ehc->i.action |= ATA_EH_SOFTRESET;
+			}
 		}
 	}
 
  retry:
 	rc = 0;
+	nr_failed_devs = 0;
+	nr_disabled_devs = 0;
+	reset = 0;
 
 	/* if UNLOADING, finish immediately */
 	if (ap->pflags & ATA_PFLAG_UNLOADING)
 		goto out;
 
-	/* skip EH if possible. */
-	if (ata_eh_skip_recovery(ap))
-		ehc->i.action = 0;
+	/* prep for EH */
+	ata_port_for_each_link(link, ap) {
+		struct ata_eh_context *ehc = &link->eh_context;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++)
-		ehc->classes[i] = ATA_DEV_UNKNOWN;
+		/* skip EH if possible. */
+		if (ata_eh_skip_recovery(link))
+			ehc->i.action = 0;
 
-	/* reset */
-	if (ehc->i.action & ATA_EH_RESET_MASK) {
-		ata_eh_freeze_port(ap);
+		/* do we need to reset? */
+		if (ehc->i.action & ATA_EH_RESET_MASK)
+			reset = 1;
 
-		rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
-				  softreset, hardreset, postreset);
-		if (rc) {
-			ata_port_printk(ap, KERN_ERR,
-					"reset failed, giving up\n");
-			goto out;
+		ata_link_for_each_dev(dev, link)
+			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
+	}
+
+	/* reset */
+	if (reset) {
+		/* if PMP is attached, this function only deals with
+		 * downstream links, port should stay thawed.
+		 */
+		if (!ap->nr_pmp_links)
+			ata_eh_freeze_port(ap);
+
+		ata_port_for_each_link(link, ap) {
+			struct ata_eh_context *ehc = &link->eh_context;
+
+			if (!(ehc->i.action & ATA_EH_RESET_MASK))
+				continue;
+
+			rc = ata_eh_reset(link, ata_link_nr_vacant(link),
+					  prereset, softreset, hardreset,
+					  postreset);
+			if (rc) {
+				ata_link_printk(link, KERN_ERR,
+						"reset failed, giving up\n");
+				goto out;
+			}
 		}
 
-		ata_eh_thaw_port(ap);
+		if (!ap->nr_pmp_links)
+			ata_eh_thaw_port(ap);
 	}
 
-	/* revalidate existing devices and attach new ones */
-	rc = ata_eh_revalidate_and_attach(ap, &dev);
-	if (rc)
-		goto dev_fail;
+	/* the rest */
+	ata_port_for_each_link(link, ap) {
+		struct ata_eh_context *ehc = &link->eh_context;
 
-	/* configure transfer mode if necessary */
-	if (ehc->i.flags & ATA_EHI_SETMODE) {
-		rc = ata_set_mode(ap, &dev);
+		/* revalidate existing devices and attach new ones */
+		rc = ata_eh_revalidate_and_attach(link, &dev);
 		if (rc)
 			goto dev_fail;
-		ehc->i.flags &= ~ATA_EHI_SETMODE;
-	}
-
-	goto out;
 
- dev_fail:
-	ehc->tries[dev->devno]--;
+		/* if PMP got attached, return, pmp EH will take care of it */
+		if (link->device->class == ATA_DEV_PMP) {
+			ehc->i.action = 0;
+			return 0;
+		}
 
-	switch (rc) {
-	case -ENODEV:
-		/* device missing or wrong IDENTIFY data, schedule probing */
-		ehc->i.probe_mask |= (1 << dev->devno);
-	case -EINVAL:
-		/* give it just one more chance */
-		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
-	case -EIO:
-		if (ehc->tries[dev->devno] == 1) {
-			/* This is the last chance, better to slow
-			 * down than lose it.
-			 */
-			sata_down_spd_limit(ap);
-			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+		/* configure transfer mode if necessary */
+		if (ehc->i.flags & ATA_EHI_SETMODE) {
+			rc = ata_set_mode(link, &dev);
+			if (rc)
+				goto dev_fail;
+			ehc->i.flags &= ~ATA_EHI_SETMODE;
 		}
-	}
 
-	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
-		/* disable device if it has used up all its chances */
-		ata_dev_disable(dev);
+		if (ehc->i.action & ATA_EHI_LPM)
+			ata_link_for_each_dev(dev, link)
+				ata_dev_enable_pm(dev, ap->pm_policy);
 
-		/* detach if offline */
-		if (ata_port_offline(ap))
-			ata_eh_detach_dev(dev);
+		/* this link is okay now */
+		ehc->i.flags = 0;
+		continue;
 
-		/* probe if requested */
-		if ((ehc->i.probe_mask & (1 << dev->devno)) &&
-		    !(ehc->did_probe_mask & (1 << dev->devno))) {
-			ata_eh_detach_dev(dev);
-			ata_dev_init(dev);
+dev_fail:
+		nr_failed_devs++;
+		if (ata_eh_handle_dev_fail(dev, rc))
+			nr_disabled_devs++;
 
-			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
-			ehc->did_probe_mask |= (1 << dev->devno);
-			ehc->i.action |= ATA_EH_SOFTRESET;
+		if (ap->pflags & ATA_PFLAG_FROZEN) {
+			/* PMP reset requires working host port.
+			 * Can't retry if it's frozen.
+			 */
+			if (ap->nr_pmp_links)
+				goto out;
+			break;
 		}
-	} else {
-		/* soft didn't work?  be haaaaard */
-		if (ehc->i.flags & ATA_EHI_DID_RESET)
-			ehc->i.action |= ATA_EH_HARDRESET;
-		else
-			ehc->i.action |= ATA_EH_SOFTRESET;
 	}
 
-	if (ata_port_nr_enabled(ap)) {
-		ata_port_printk(ap, KERN_WARNING, "failed to recover some "
-				"devices, retrying in 5 secs\n");
-		ssleep(5);
-	} else {
-		/* no device left, repeat fast */
-		msleep(500);
-	}
+	if (nr_failed_devs) {
+		if (nr_failed_devs != nr_disabled_devs) {
+			ata_port_printk(ap, KERN_WARNING, "failed to recover "
+					"some devices, retrying in 5 secs\n");
+			ssleep(5);
+		} else {
+			/* no device left to recover, repeat fast */
+			msleep(500);
+		}
 
-	goto retry;
+		goto retry;
+	}
 
  out:
-	if (rc) {
-		for (i = 0; i < ATA_MAX_DEVICES; i++)
-			ata_dev_disable(&ap->device[i]);
-	}
+	if (rc && r_failed_link)
+		*r_failed_link = link;
 
 	DPRINTK("EXIT, rc=%d\n", rc);
 	return rc;
@@ -2073,7 +2626,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
  *	LOCKING:
  *	None.
  */
-static void ata_eh_finish(struct ata_port *ap)
+void ata_eh_finish(struct ata_port *ap)
 {
 	int tag;
 
@@ -2088,8 +2641,15 @@ static void ata_eh_finish(struct ata_port *ap)
 			/* FIXME: Once EH migration is complete,
 			 * generate sense data in this function,
 			 * considering both err_mask and tf.
+			 *
+			 * There's no point in retrying invalid
+			 * (detected by libata) and non-IO device
+			 * errors (rejected by device).  Finish them
+			 * immediately.
 			 */
-			if (qc->err_mask & AC_ERR_INVALID)
+			if ((qc->err_mask & AC_ERR_INVALID) ||
+			    (!(qc->flags & ATA_QCFLAG_IO) &&
+			     qc->err_mask == AC_ERR_DEV))
 				ata_eh_qc_complete(qc);
 			else
 				ata_eh_qc_retry(qc);
@@ -2103,6 +2663,10 @@ static void ata_eh_finish(struct ata_port *ap)
 			}
 		}
 	}
+
+	/* make sure nr_active_links is zero after EH */
+	WARN_ON(ap->nr_active_links);
+	ap->nr_active_links = 0;
 }
 
 /**
@@ -2122,9 +2686,19 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
 	       ata_postreset_fn_t postreset)
 {
+	struct ata_device *dev;
+	int rc;
+
 	ata_eh_autopsy(ap);
 	ata_eh_report(ap);
-	ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
+
+	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
+			    NULL);
+	if (rc) {
+		ata_link_for_each_dev(dev, &ap->link)
+			ata_dev_disable(dev);
+	}
+
 	ata_eh_finish(ap);
 }
 
@@ -2154,19 +2728,25 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
 
 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
 
+	/* tell ACPI we're suspending */
+	rc = ata_acpi_on_suspend(ap);
+	if (rc)
+		goto out;
+
 	/* suspend */
 	ata_eh_freeze_port(ap);
 
 	if (ap->ops->port_suspend)
 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
 
+ out:
 	/* report result */
 	spin_lock_irqsave(ap->lock, flags);
 
 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
 	if (rc == 0)
 		ap->pflags |= ATA_PFLAG_SUSPENDED;
-	else
+	else if (ap->pflags & ATA_PFLAG_FROZEN)
 		ata_port_schedule_eh(ap);
 
 	if (ap->pm_result) {
@@ -2207,6 +2787,9 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
 	if (ap->ops->port_resume)
 		rc = ap->ops->port_resume(ap);
 
+	/* tell ACPI that we're resuming */
+	ata_acpi_on_resume(ap);
+
 	/* report result */
 	spin_lock_irqsave(ap->lock, flags);
 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
new file mode 100644
index 0000000..c0c4dbc
--- /dev/null
+++ b/drivers/ata/libata-pmp.c
@@ -0,0 +1,1191 @@
+/*
+ * libata-pmp.c - libata port multiplier support
+ *
+ * Copyright (c) 2007  SUSE Linux Products GmbH
+ * Copyright (c) 2007  Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include "libata.h"
+
+/**
+ *	sata_pmp_read - read PMP register
+ *	@link: link to read PMP register for
+ *	@reg: register to read
+ *	@r_val: resulting value
+ *
+ *	Read PMP register.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *pmp_dev = ap->link.device;
+	struct ata_taskfile tf;
+	unsigned int err_mask;
+
+	ata_tf_init(pmp_dev, &tf);
+	tf.command = ATA_CMD_PMP_READ;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.feature = reg;
+	tf.device = link->pmp;
+
+	err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+				     SATA_PMP_SCR_TIMEOUT);
+	if (err_mask)
+		return err_mask;
+
+	*r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
+	return 0;
+}
+
+/**
+ *	sata_pmp_write - write PMP register
+ *	@link: link to write PMP register for
+ *	@reg: register to write
+ *	@r_val: value to write
+ *
+ *	Write PMP register.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *pmp_dev = ap->link.device;
+	struct ata_taskfile tf;
+
+	ata_tf_init(pmp_dev, &tf);
+	tf.command = ATA_CMD_PMP_WRITE;
+	tf.protocol = ATA_PROT_NODATA;
+	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	tf.feature = reg;
+	tf.device = link->pmp;
+	tf.nsect = val & 0xff;
+	tf.lbal = (val >> 8) & 0xff;
+	tf.lbam = (val >> 16) & 0xff;
+	tf.lbah = (val >> 24) & 0xff;
+
+	return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+				 SATA_PMP_SCR_TIMEOUT);
+}
+
+/**
+ *	sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP
+ *	@qc: ATA command in question
+ *
+ *	A host which has command switching PMP support cannot issue
+ *	commands to multiple links simultaneously.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ *	RETURNS:
+ *	ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_port *ap = link->ap;
+
+	if (ap->excl_link == NULL || ap->excl_link == link) {
+		if (ap->nr_active_links == 0 || ata_link_active(link)) {
+			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+			return ata_std_qc_defer(qc);
+		}
+
+		ap->excl_link = link;
+	}
+
+	return ATA_DEFER_PORT;
+}
+
+/**
+ *	sata_pmp_scr_read - read PSCR
+ *	@link: ATA link to read PSCR for
+ *	@reg: PSCR to read
+ *	@r_val: resulting value
+ *
+ *	Read PSCR @reg into @r_val for @link, to be called from
+ *	ata_scr_read().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
+{
+	unsigned int err_mask;
+
+	if (reg > SATA_PMP_PSCR_CONTROL)
+		return -EINVAL;
+
+	err_mask = sata_pmp_read(link, reg, r_val);
+	if (err_mask) {
+		ata_link_printk(link, KERN_WARNING, "failed to read SCR %d "
+				"(Emask=0x%x)\n", reg, err_mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	sata_pmp_scr_write - write PSCR
+ *	@link: ATA link to write PSCR for
+ *	@reg: PSCR to write
+ *	@val: value to be written
+ *
+ *	Write @val to PSCR @reg for @link, to be called from
+ *	ata_scr_write() and ata_scr_write_flush().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+	unsigned int err_mask;
+
+	if (reg > SATA_PMP_PSCR_CONTROL)
+		return -EINVAL;
+
+	err_mask = sata_pmp_write(link, reg, val);
+	if (err_mask) {
+		ata_link_printk(link, KERN_WARNING, "failed to write SCR %d "
+				"(Emask=0x%x)\n", reg, err_mask);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ *	sata_pmp_std_prereset - prepare PMP link for reset
+ *	@link: link to be reset
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	@link is about to be reset.  Initialize it.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_eh_context *ehc = &link->eh_context;
+	const unsigned long *timing = sata_ehc_deb_timing(ehc);
+	int rc;
+
+	/* force HRST? */
+	if (link->flags & ATA_LFLAG_NO_SRST)
+		ehc->i.action |= ATA_EH_HARDRESET;
+
+	/* handle link resume */
+	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
+	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
+		ehc->i.action |= ATA_EH_HARDRESET;
+
+	/* if we're about to do hardreset, nothing more to do */
+	if (ehc->i.action & ATA_EH_HARDRESET)
+		return 0;
+
+	/* resume link */
+	rc = sata_link_resume(link, timing, deadline);
+	if (rc) {
+		/* phy resume failed */
+		ata_link_printk(link, KERN_WARNING, "failed to resume link "
+				"for reset (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* clear SError bits including .X which blocks the port when set */
+	rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
+	if (rc) {
+		ata_link_printk(link, KERN_ERR,
+				"failed to clear SError (errno=%d)\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ *	sata_pmp_std_hardreset - standard hardreset method for PMP link
+ *	@link: link to be reset
+ *	@class: resulting class of attached device
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Hardreset PMP port @link.  Note that this function doesn't
+ *	wait for BSY clearance.  There simply isn't a generic way to
+ *	wait the event.  Instead, this function return -EAGAIN thus
+ *	telling libata-EH to followup with softreset.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline)
+{
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+	u32 tmp;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	/* do hardreset */
+	rc = sata_link_hardreset(link, timing, deadline);
+	if (rc) {
+		ata_link_printk(link, KERN_ERR,
+				"COMRESET failed (errno=%d)\n", rc);
+		goto out;
+	}
+
+	/* clear SError bits including .X which blocks the port when set */
+	rc = sata_scr_write(link, SCR_ERROR, 0xffffffff);
+	if (rc) {
+		ata_link_printk(link, KERN_ERR, "failed to clear SError "
+				"during hardreset (errno=%d)\n", rc);
+		goto out;
+	}
+
+	/* if device is present, follow up with srst to wait for !BSY */
+	if (ata_link_online(link))
+		rc = -EAGAIN;
+ out:
+	/* if SCR isn't accessible, we need to reset the PMP */
+	if (rc && rc != -EAGAIN && sata_scr_read(link, SCR_STATUS, &tmp))
+		rc = -ERESTART;
+
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	ata_std_postreset - standard postreset method for PMP link
+ *	@link: the target ata_link
+ *	@classes: classes of attached devices
+ *
+ *	This function is invoked after a successful reset.  Note that
+ *	the device might have been reset more than once using
+ *	different reset methods before postreset is invoked.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class)
+{
+	u32 serror;
+
+	DPRINTK("ENTER\n");
+
+	/* clear SError */
+	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
+		sata_scr_write(link, SCR_ERROR, serror);
+
+	/* print link status */
+	sata_print_link_status(link);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	sata_pmp_read_gscr - read GSCR block of SATA PMP
+ *	@dev: PMP device
+ *	@gscr: buffer to read GSCR block into
+ *
+ *	Read selected PMP GSCRs from the PMP at @dev.  This will serve
+ *	as configuration and identification info for the PMP.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
+{
+	static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 };
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) {
+		int reg = gscr_to_read[i];
+		unsigned int err_mask;
+
+		err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
+		if (err_mask) {
+			ata_dev_printk(dev, KERN_ERR, "failed to read PMP "
+				"GSCR[%d] (Emask=0x%x)\n", reg, err_mask);
+			return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static const char *sata_pmp_spec_rev_str(const u32 *gscr)
+{
+	u32 rev = gscr[SATA_PMP_GSCR_REV];
+
+	if (rev & (1 << 2))
+		return "1.1";
+	if (rev & (1 << 1))
+		return "1.0";
+	return "<unknown>";
+}
+
+static int sata_pmp_configure(struct ata_device *dev, int print_info)
+{
+	struct ata_port *ap = dev->link->ap;
+	u32 *gscr = dev->gscr;
+	unsigned int err_mask = 0;
+	const char *reason;
+	int nr_ports, rc;
+
+	nr_ports = sata_pmp_gscr_ports(gscr);
+
+	if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) {
+		rc = -EINVAL;
+		reason = "invalid nr_ports";
+		goto fail;
+	}
+
+	if ((ap->flags & ATA_FLAG_AN) &&
+	    (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY))
+		dev->flags |= ATA_DFLAG_AN;
+
+	/* monitor SERR_PHYRDY_CHG on fan-out ports */
+	err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN,
+				  SERR_PHYRDY_CHG);
+	if (err_mask) {
+		rc = -EIO;
+		reason = "failed to write GSCR_ERROR_EN";
+		goto fail;
+	}
+
+	/* turn off notification till fan-out ports are reset and configured */
+	if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
+		gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
+
+		err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_FEAT_EN,
+					  gscr[SATA_PMP_GSCR_FEAT_EN]);
+		if (err_mask) {
+			rc = -EIO;
+			reason = "failed to write GSCR_FEAT_EN";
+			goto fail;
+		}
+	}
+
+	if (print_info) {
+		ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
+			       "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
+			       sata_pmp_spec_rev_str(gscr),
+			       sata_pmp_gscr_vendor(gscr),
+			       sata_pmp_gscr_devid(gscr),
+			       sata_pmp_gscr_rev(gscr),
+			       nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
+			       gscr[SATA_PMP_GSCR_FEAT]);
+
+		if (!(dev->flags & ATA_DFLAG_AN))
+			ata_dev_printk(dev, KERN_INFO,
+				"Asynchronous notification not supported, "
+				"hotplug won't\n         work on fan-out "
+				"ports. Use warm-plug instead.\n");
+	}
+
+	return 0;
+
+ fail:
+	ata_dev_printk(dev, KERN_ERR,
+		       "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
+		       reason, err_mask);
+	return rc;
+}
+
+static int sata_pmp_init_links(struct ata_port *ap, int nr_ports)
+{
+	struct ata_link *pmp_link = ap->pmp_link;
+	int i;
+
+	if (!pmp_link) {
+		pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
+				   GFP_NOIO);
+		if (!pmp_link)
+			return -ENOMEM;
+
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+			ata_link_init(ap, &pmp_link[i], i);
+
+		ap->pmp_link = pmp_link;
+	}
+
+	for (i = 0; i < nr_ports; i++) {
+		struct ata_link *link = &pmp_link[i];
+		struct ata_eh_context *ehc = &link->eh_context;
+
+		link->flags = 0;
+		ehc->i.probe_mask |= 1;
+		ehc->i.action |= ATA_EH_SOFTRESET;
+		ehc->i.flags |= ATA_EHI_RESUME_LINK;
+	}
+
+	return 0;
+}
+
+static void sata_pmp_quirks(struct ata_port *ap)
+{
+	u32 *gscr = ap->link.device->gscr;
+	u16 vendor = sata_pmp_gscr_vendor(gscr);
+	u16 devid = sata_pmp_gscr_devid(gscr);
+	struct ata_link *link;
+
+	if (vendor == 0x1095 && devid == 0x3726) {
+		/* sil3726 quirks */
+		ata_port_for_each_link(link, ap) {
+			/* SError.N need a kick in the ass to get working */
+			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
+
+			/* class code report is unreliable */
+			if (link->pmp < 5)
+				link->flags |= ATA_LFLAG_ASSUME_ATA;
+
+			/* port 5 is for SEMB device and it doesn't like SRST */
+			if (link->pmp == 5)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_SEMB;
+		}
+	} else if (vendor == 0x1095 && devid == 0x4723) {
+		/* sil4723 quirks */
+		ata_port_for_each_link(link, ap) {
+			/* SError.N need a kick in the ass to get working */
+			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
+
+			/* class code report is unreliable */
+			if (link->pmp < 2)
+				link->flags |= ATA_LFLAG_ASSUME_ATA;
+
+			/* the config device at port 2 locks up on SRST */
+			if (link->pmp == 2)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_ATA;
+		}
+	} else if (vendor == 0x1095 && devid == 0x4726) {
+		/* sil4726 quirks */
+		ata_port_for_each_link(link, ap) {
+			/* SError.N need a kick in the ass to get working */
+			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
+
+			/* class code report is unreliable */
+			if (link->pmp < 5)
+				link->flags |= ATA_LFLAG_ASSUME_ATA;
+
+			/* The config device, which can be either at
+			 * port 0 or 5, locks up on SRST.
+			 */
+			if (link->pmp == 0 || link->pmp == 5)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_ATA;
+
+			/* Port 6 is for SEMB device which doesn't
+			 * like SRST either.
+			 */
+			if (link->pmp == 6)
+				link->flags |= ATA_LFLAG_NO_SRST |
+					       ATA_LFLAG_ASSUME_SEMB;
+		}
+	} else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 ||
+					devid == 0x5734 || devid == 0x5744)) {
+		/* sil5723/5744 quirks */
+
+		/* sil5723/5744 has either two or three downstream
+		 * ports depending on operation mode.  The last port
+		 * is empty if any actual IO device is available or
+		 * occupied by a pseudo configuration device
+		 * otherwise.  Don't try hard to recover it.
+		 */
+		ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+	} else if (vendor == 0x11ab && devid == 0x4140) {
+		/* Marvell 88SM4140 quirks.  Fan-out ports require PHY
+		 * reset to work; other than that, it behaves very
+		 * nicely.
+		 */
+		ata_port_for_each_link(link, ap)
+			link->flags |= ATA_LFLAG_HRST_TO_RESUME;
+	}
+}
+
+/**
+ *	sata_pmp_attach - attach a SATA PMP device
+ *	@dev: SATA PMP device to attach
+ *
+ *	Configure and attach SATA PMP device @dev.  This function is
+ *	also responsible for allocating and initializing PMP links.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+int sata_pmp_attach(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	unsigned long flags;
+	struct ata_link *tlink;
+	int rc;
+
+	/* is it hanging off the right place? */
+	if (!(ap->flags & ATA_FLAG_PMP)) {
+		ata_dev_printk(dev, KERN_ERR,
+			       "host does not support Port Multiplier\n");
+		return -EINVAL;
+	}
+
+	if (!ata_is_host_link(link)) {
+		ata_dev_printk(dev, KERN_ERR,
+			       "Port Multipliers cannot be nested\n");
+		return -EINVAL;
+	}
+
+	if (dev->devno) {
+		ata_dev_printk(dev, KERN_ERR,
+			       "Port Multiplier must be the first device\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(link->pmp != 0);
+	link->pmp = SATA_PMP_CTRL_PORT;
+
+	/* read GSCR block */
+	rc = sata_pmp_read_gscr(dev, dev->gscr);
+	if (rc)
+		goto fail;
+
+	/* config PMP */
+	rc = sata_pmp_configure(dev, 1);
+	if (rc)
+		goto fail;
+
+	rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
+	if (rc) {
+		ata_dev_printk(dev, KERN_INFO,
+			       "failed to initialize PMP links\n");
+		goto fail;
+	}
+
+	/* attach it */
+	spin_lock_irqsave(ap->lock, flags);
+	WARN_ON(ap->nr_pmp_links);
+	ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr);
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	sata_pmp_quirks(ap);
+
+	if (ap->ops->pmp_attach)
+		ap->ops->pmp_attach(ap);
+
+	ata_port_for_each_link(tlink, ap)
+		sata_link_init_spd(tlink);
+
+	ata_acpi_associate_sata_port(ap);
+
+	return 0;
+
+ fail:
+	link->pmp = 0;
+	return rc;
+}
+
+/**
+ *	sata_pmp_detach - detach a SATA PMP device
+ *	@dev: SATA PMP device to detach
+ *
+ *	Detach SATA PMP device @dev.  This function is also
+ *	responsible for deconfiguring PMP links.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+static void sata_pmp_detach(struct ata_device *dev)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	struct ata_link *tlink;
+	unsigned long flags;
+
+	ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n");
+
+	WARN_ON(!ata_is_host_link(link) || dev->devno ||
+		link->pmp != SATA_PMP_CTRL_PORT);
+
+	if (ap->ops->pmp_detach)
+		ap->ops->pmp_detach(ap);
+
+	ata_port_for_each_link(tlink, ap)
+		ata_eh_detach_dev(tlink->device);
+
+	spin_lock_irqsave(ap->lock, flags);
+	ap->nr_pmp_links = 0;
+	link->pmp = 0;
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	ata_acpi_associate_sata_port(ap);
+}
+
+/**
+ *	sata_pmp_same_pmp - does new GSCR matches the configured PMP?
+ *	@dev: PMP device to compare against
+ *	@new_gscr: GSCR block of the new device
+ *
+ *	Compare @new_gscr against @dev and determine whether @dev is
+ *	the PMP described by @new_gscr.
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	1 if @dev matches @new_gscr, 0 otherwise.
+ */
+static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
+{
+	const u32 *old_gscr = dev->gscr;
+	u16 old_vendor, new_vendor, old_devid, new_devid;
+	int old_nr_ports, new_nr_ports;
+
+	old_vendor = sata_pmp_gscr_vendor(old_gscr);
+	new_vendor = sata_pmp_gscr_vendor(new_gscr);
+	old_devid = sata_pmp_gscr_devid(old_gscr);
+	new_devid = sata_pmp_gscr_devid(new_gscr);
+	old_nr_ports = sata_pmp_gscr_ports(old_gscr);
+	new_nr_ports = sata_pmp_gscr_ports(new_gscr);
+
+	if (old_vendor != new_vendor) {
+		ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+			       "vendor mismatch '0x%x' != '0x%x'\n",
+			       old_vendor, new_vendor);
+		return 0;
+	}
+
+	if (old_devid != new_devid) {
+		ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+			       "device ID mismatch '0x%x' != '0x%x'\n",
+			       old_devid, new_devid);
+		return 0;
+	}
+
+	if (old_nr_ports != new_nr_ports) {
+		ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+			       "nr_ports mismatch '0x%x' != '0x%x'\n",
+			       old_nr_ports, new_nr_ports);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ *	sata_pmp_revalidate - revalidate SATA PMP
+ *	@dev: PMP device to revalidate
+ *	@new_class: new class code
+ *
+ *	Re-read GSCR block and make sure @dev is still attached to the
+ *	port and properly configured.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
+{
+	struct ata_link *link = dev->link;
+	struct ata_port *ap = link->ap;
+	u32 *gscr = (void *)ap->sector_buf;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
+
+	if (!ata_dev_enabled(dev)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	/* wrong class? */
+	if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	/* read GSCR */
+	rc = sata_pmp_read_gscr(dev, gscr);
+	if (rc)
+		goto fail;
+
+	/* is the pmp still there? */
+	if (!sata_pmp_same_pmp(dev, gscr)) {
+		rc = -ENODEV;
+		goto fail;
+	}
+
+	memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS);
+
+	rc = sata_pmp_configure(dev, 0);
+	if (rc)
+		goto fail;
+
+	ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
+
+	DPRINTK("EXIT, rc=0\n");
+	return 0;
+
+ fail:
+	ata_dev_printk(dev, KERN_ERR,
+		       "PMP revalidation failed (errno=%d)\n", rc);
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ *	sata_pmp_revalidate_quick - revalidate SATA PMP quickly
+ *	@dev: PMP device to revalidate
+ *
+ *	Make sure the attached PMP is accessible.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate_quick(struct ata_device *dev)
+{
+	unsigned int err_mask;
+	u32 prod_id;
+
+	err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
+	if (err_mask) {
+		ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID "
+			       "(Emask=0x%x)\n", err_mask);
+		return -EIO;
+	}
+
+	if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
+		ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n");
+		/* something weird is going on, request full PMP recovery */
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ *	sata_pmp_eh_recover_pmp - recover PMP
+ *	@ap: ATA port PMP is attached to
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method
+ *	@hardreset: hardreset method
+ *	@postreset: postreset method (can be NULL)
+ *
+ *	Recover PMP attached to @ap.  Recovery procedure is somewhat
+ *	similar to that of ata_eh_recover() except that reset should
+ *	always be performed in hard->soft sequence and recovery
+ *	failure results in PMP detachment.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
+		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+	struct ata_link *link = &ap->link;
+	struct ata_eh_context *ehc = &link->eh_context;
+	struct ata_device *dev = link->device;
+	int tries = ATA_EH_PMP_TRIES;
+	int detach = 0, rc = 0;
+	int reval_failed = 0;
+
+	DPRINTK("ENTER\n");
+
+	if (dev->flags & ATA_DFLAG_DETACH) {
+		detach = 1;
+		goto fail;
+	}
+
+ retry:
+	ehc->classes[0] = ATA_DEV_UNKNOWN;
+
+	if (ehc->i.action & ATA_EH_RESET_MASK) {
+		struct ata_link *tlink;
+
+		ata_eh_freeze_port(ap);
+
+		/* reset */
+		ehc->i.action = ATA_EH_HARDRESET;
+		rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
+				  postreset);
+		if (rc) {
+			ata_link_printk(link, KERN_ERR,
+					"failed to reset PMP, giving up\n");
+			goto fail;
+		}
+
+		ata_eh_thaw_port(ap);
+
+		/* PMP is reset, SErrors cannot be trusted, scan all */
+		ata_port_for_each_link(tlink, ap)
+			ata_ehi_schedule_probe(&tlink->eh_context.i);
+	}
+
+	/* If revalidation is requested, revalidate and reconfigure;
+	 * otherwise, do quick revalidation.
+	 */
+	if (ehc->i.action & ATA_EH_REVALIDATE)
+		rc = sata_pmp_revalidate(dev, ehc->classes[0]);
+	else
+		rc = sata_pmp_revalidate_quick(dev);
+
+	if (rc) {
+		tries--;
+
+		if (rc == -ENODEV) {
+			ehc->i.probe_mask |= 1;
+			detach = 1;
+			/* give it just two more chances */
+			tries = min(tries, 2);
+		}
+
+		if (tries) {
+			int sleep = ehc->i.flags & ATA_EHI_DID_RESET;
+
+			/* consecutive revalidation failures? speed down */
+			if (reval_failed)
+				sata_down_spd_limit(link);
+			else
+				reval_failed = 1;
+
+			ata_dev_printk(dev, KERN_WARNING,
+				       "retrying hardreset%s\n",
+				       sleep ? " in 5 secs" : "");
+			if (sleep)
+				ssleep(5);
+			ehc->i.action |= ATA_EH_HARDRESET;
+			goto retry;
+		} else {
+			ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
+				       "after %d tries, giving up\n",
+				       ATA_EH_PMP_TRIES);
+			goto fail;
+		}
+	}
+
+	/* okay, PMP resurrected */
+	ehc->i.flags = 0;
+
+	DPRINTK("EXIT, rc=0\n");
+	return 0;
+
+ fail:
+	sata_pmp_detach(dev);
+	if (detach)
+		ata_eh_detach_dev(dev);
+	else
+		ata_dev_disable(dev);
+
+	DPRINTK("EXIT, rc=%d\n", rc);
+	return rc;
+}
+
+static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
+{
+	struct ata_link *link;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	ata_port_for_each_link(link, ap) {
+		if (!(link->flags & ATA_LFLAG_DISABLED))
+			continue;
+
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* Some PMPs require hardreset sequence to get
+		 * SError.N working.
+		 */
+		if ((link->flags & ATA_LFLAG_HRST_TO_RESUME) &&
+		    (link->eh_context.i.flags & ATA_EHI_RESUME_LINK))
+			sata_link_hardreset(link, sata_deb_timing_normal,
+					    jiffies + ATA_TMOUT_INTERNAL_QUICK);
+
+		/* unconditionally clear SError.N */
+		rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+		if (rc) {
+			ata_link_printk(link, KERN_ERR, "failed to clear "
+					"SError.N (errno=%d)\n", rc);
+			return rc;
+		}
+
+		spin_lock_irqsave(ap->lock, flags);
+	}
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return 0;
+}
+
+static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
+{
+	struct ata_port *ap = link->ap;
+	unsigned long flags;
+
+	if (link_tries[link->pmp] && --link_tries[link->pmp])
+		return 1;
+
+	/* disable this link */
+	if (!(link->flags & ATA_LFLAG_DISABLED)) {
+		ata_link_printk(link, KERN_WARNING,
+			"failed to recover link after %d tries, disabling\n",
+			ATA_EH_PMP_LINK_TRIES);
+
+		spin_lock_irqsave(ap->lock, flags);
+		link->flags |= ATA_LFLAG_DISABLED;
+		spin_unlock_irqrestore(ap->lock, flags);
+	}
+
+	ata_dev_disable(link->device);
+	link->eh_context.i.action = 0;
+
+	return 0;
+}
+
+/**
+ *	sata_pmp_eh_recover - recover PMP-enabled port
+ *	@ap: ATA port to recover
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method
+ *	@hardreset: hardreset method
+ *	@postreset: postreset method (can be NULL)
+ *	@pmp_prereset: PMP prereset method (can be NULL)
+ *	@pmp_softreset: PMP softreset method (can be NULL)
+ *	@pmp_hardreset: PMP hardreset method (can be NULL)
+ *	@pmp_postreset: PMP postreset method (can be NULL)
+ *
+ *	Drive EH recovery operation for PMP enabled port @ap.  This
+ *	function recovers host and PMP ports with proper retrials and
+ *	fallbacks.  Actual recovery operations are performed using
+ *	ata_eh_recover() and sata_pmp_eh_recover_pmp().
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ *
+ *	RETURNS:
+ *	0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover(struct ata_port *ap,
+		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
+		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
+		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
+{
+	int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
+	struct ata_link *pmp_link = &ap->link;
+	struct ata_device *pmp_dev = pmp_link->device;
+	struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
+	struct ata_link *link;
+	struct ata_device *dev;
+	unsigned int err_mask;
+	u32 gscr_error, sntf;
+	int cnt, rc;
+
+	pmp_tries = ATA_EH_PMP_TRIES;
+	ata_port_for_each_link(link, ap)
+		link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+ retry:
+	/* PMP attached? */
+	if (!ap->nr_pmp_links) {
+		rc = ata_eh_recover(ap, prereset, softreset, hardreset,
+				    postreset, NULL);
+		if (rc) {
+			ata_link_for_each_dev(dev, &ap->link)
+				ata_dev_disable(dev);
+			return rc;
+		}
+
+		if (pmp_dev->class != ATA_DEV_PMP)
+			return 0;
+
+		/* new PMP online */
+		ata_port_for_each_link(link, ap)
+			link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+		/* fall through */
+	}
+
+	/* recover pmp */
+	rc = sata_pmp_eh_recover_pmp(ap, prereset, softreset, hardreset,
+				     postreset);
+	if (rc)
+		goto pmp_fail;
+
+	/* handle disabled links */
+	rc = sata_pmp_eh_handle_disabled_links(ap);
+	if (rc)
+		goto pmp_fail;
+
+	/* recover links */
+	rc = ata_eh_recover(ap, pmp_prereset, pmp_softreset, pmp_hardreset,
+			    pmp_postreset, &link);
+	if (rc)
+		goto link_fail;
+
+	/* Connection status might have changed while resetting other
+	 * links, check SATA_PMP_GSCR_ERROR before returning.
+	 */
+
+	/* clear SNotification */
+	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+	if (rc == 0)
+		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+	/* enable notification */
+	if (pmp_dev->flags & ATA_DFLAG_AN) {
+		pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
+
+		err_mask = sata_pmp_write(pmp_dev->link, SATA_PMP_GSCR_FEAT_EN,
+					  pmp_dev->gscr[SATA_PMP_GSCR_FEAT_EN]);
+		if (err_mask) {
+			ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
+				       "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
+			rc = -EIO;
+			goto pmp_fail;
+		}
+	}
+
+	/* check GSCR_ERROR */
+	err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
+	if (err_mask) {
+		ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
+			       "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
+		rc = -EIO;
+		goto pmp_fail;
+	}
+
+	cnt = 0;
+	ata_port_for_each_link(link, ap) {
+		if (!(gscr_error & (1 << link->pmp)))
+			continue;
+
+		if (sata_pmp_handle_link_fail(link, link_tries)) {
+			ata_ehi_hotplugged(&link->eh_context.i);
+			cnt++;
+		} else {
+			ata_link_printk(link, KERN_WARNING,
+				"PHY status changed but maxed out on retries, "
+				"giving up\n");
+			ata_link_printk(link, KERN_WARNING,
+				"Manully issue scan to resume this link\n");
+		}
+	}
+
+	if (cnt) {
+		ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
+				"ports, repeating recovery\n");
+		goto retry;
+	}
+
+	return 0;
+
+ link_fail:
+	if (sata_pmp_handle_link_fail(link, link_tries)) {
+		pmp_ehc->i.action |= ATA_EH_HARDRESET;
+		goto retry;
+	}
+
+	/* fall through */
+ pmp_fail:
+	/* Control always ends up here after detaching PMP.  Shut up
+	 * and return if we're unloading.
+	 */
+	if (ap->pflags & ATA_PFLAG_UNLOADING)
+		return rc;
+
+	if (!ap->nr_pmp_links)
+		goto retry;
+
+	if (--pmp_tries) {
+		ata_port_printk(ap, KERN_WARNING,
+				"failed to recover PMP, retrying in 5 secs\n");
+		pmp_ehc->i.action |= ATA_EH_HARDRESET;
+		ssleep(5);
+		goto retry;
+	}
+
+	ata_port_printk(ap, KERN_ERR,
+			"failed to recover PMP after %d tries, giving up\n",
+			ATA_EH_PMP_TRIES);
+	sata_pmp_detach(pmp_dev);
+	ata_dev_disable(pmp_dev);
+
+	return rc;
+}
+
+/**
+ *	sata_pmp_do_eh - do standard error handling for PMP-enabled host
+ *	@ap: host port to handle error for
+ *	@prereset: prereset method (can be NULL)
+ *	@softreset: softreset method
+ *	@hardreset: hardreset method
+ *	@postreset: postreset method (can be NULL)
+ *	@pmp_prereset: PMP prereset method (can be NULL)
+ *	@pmp_softreset: PMP softreset method (can be NULL)
+ *	@pmp_hardreset: PMP hardreset method (can be NULL)
+ *	@pmp_postreset: PMP postreset method (can be NULL)
+ *
+ *	Perform standard error handling sequence for PMP-enabled host
+ *	@ap.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep).
+ */
+void sata_pmp_do_eh(struct ata_port *ap,
+		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
+		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
+		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset)
+{
+	ata_eh_autopsy(ap);
+	ata_eh_report(ap);
+	sata_pmp_eh_recover(ap, prereset, softreset, hardreset, postreset,
+			    pmp_prereset, pmp_softreset, pmp_hardreset,
+			    pmp_postreset);
+	ata_eh_finish(ap);
+}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d97b2a9..68c9b63 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -45,7 +45,7 @@
 #include <scsi/scsi_transport.h>
 #include <linux/libata.h>
 #include <linux/hdreg.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include "libata.h"
 
@@ -53,9 +53,9 @@
 
 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
 
-static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
 					const struct scsi_device *scsidev);
-static struct ata_device * ata_scsi_find_dev(struct ata_port *ap,
+static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
 					    const struct scsi_device *scsidev);
 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
 			      unsigned int id, unsigned int lun);
@@ -71,11 +71,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
 #define ALL_SUB_MPAGES 0xff
 
 
-static const u8 def_rw_recovery_mpage[] = {
+static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
 	RW_RECOVERY_MPAGE,
 	RW_RECOVERY_MPAGE_LEN - 2,
-	(1 << 7) |	/* AWRE, sat-r06 say it shall be 0 */
-	    (1 << 6),	/* ARRE (auto read reallocation) */
+	(1 << 7),	/* AWRE */
 	0,		/* read retry count */
 	0, 0, 0, 0,
 	0,		/* write retry count */
@@ -111,6 +110,74 @@ static struct scsi_transport_template ata_scsi_transport_template = {
 };
 
 
+static const struct {
+	enum link_pm	value;
+	const char	*name;
+} link_pm_policy[] = {
+	{ NOT_AVAILABLE, "max_performance" },
+	{ MIN_POWER, "min_power" },
+	{ MAX_PERFORMANCE, "max_performance" },
+	{ MEDIUM_POWER, "medium_power" },
+};
+
+static const char *ata_scsi_lpm_get(enum link_pm policy)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
+		if (link_pm_policy[i].value == policy)
+			return link_pm_policy[i].name;
+
+	return NULL;
+}
+
+static ssize_t ata_scsi_lpm_put(struct class_device *class_dev,
+	const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(class_dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	enum link_pm policy = 0;
+	int i;
+
+	/*
+	 * we are skipping array location 0 on purpose - this
+	 * is because a value of NOT_AVAILABLE is displayed
+	 * to the user as max_performance, but when the user
+	 * writes "max_performance", they actually want the
+	 * value to match MAX_PERFORMANCE.
+	 */
+	for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
+		const int len = strlen(link_pm_policy[i].name);
+		if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
+		   buf[len] == '\n') {
+			policy = link_pm_policy[i].value;
+			break;
+		}
+	}
+	if (!policy)
+		return -EINVAL;
+
+	ata_lpm_schedule(ap, policy);
+	return count;
+}
+
+static ssize_t
+ata_scsi_lpm_show(struct class_device *class_dev, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(class_dev);
+	struct ata_port *ap = ata_shost_to_port(shost);
+	const char *policy =
+		ata_scsi_lpm_get(ap->pm_policy);
+
+	if (!policy)
+		return -EINVAL;
+
+	return snprintf(buf, 23, "%s\n", policy);
+}
+CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+		ata_scsi_lpm_show, ata_scsi_lpm_put);
+EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy);
+
 static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
 				   void (*done)(struct scsi_cmnd *))
 {
@@ -229,7 +296,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 
 		scsi_cmd[1]  = (4 << 1); /* PIO Data-in */
 		scsi_cmd[2]  = 0x0e;     /* no off.line or cc, read from dev,
-		                            block count in sector count field */
+					    block count in sector count field */
 		data_dir = DMA_FROM_DEVICE;
 	} else {
 		scsi_cmd[1]  = (3 << 1); /* Non-data */
@@ -253,7 +320,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 	/* Good values for timeout and retries?  Values below
 	   from scsi_ioctl_send_command() for default case... */
 	cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
-	                          sensebuf, (10*HZ), 5, 0);
+				  sensebuf, (10*HZ), 5, 0);
 
 	if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
 		u8 *desc = sensebuf + 8;
@@ -264,18 +331,18 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 		if (cmd_result & SAM_STAT_CHECK_CONDITION) {
 			struct scsi_sense_hdr sshdr;
 			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
-			                      &sshdr);
-			if (sshdr.sense_key==0 &&
-			    sshdr.asc==0 && sshdr.ascq==0)
+					     &sshdr);
+			if (sshdr.sense_key == 0 &&
+			    sshdr.asc == 0 && sshdr.ascq == 0)
 				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
 		}
 
 		/* Send userspace a few ATA registers (same as drivers/ide) */
-		if (sensebuf[0] == 0x72 &&     /* format is "descriptor" */
-		    desc[0] == 0x09 ) {        /* code is "ATA Descriptor" */
-			args[0] = desc[13];    /* status */
-			args[1] = desc[3];     /* error */
-			args[2] = desc[5];     /* sector count (0:7) */
+		if (sensebuf[0] == 0x72 &&	/* format is "descriptor" */
+		    desc[0] == 0x09) {		/* code is "ATA Descriptor" */
+			args[0] = desc[13];	/* status */
+			args[1] = desc[3];	/* error */
+			args[2] = desc[5];	/* sector count (0:7) */
 			if (copy_to_user(arg, args, sizeof(args)))
 				rc = -EFAULT;
 		}
@@ -351,8 +418,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
 			struct scsi_sense_hdr sshdr;
 			scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
 						&sshdr);
-			if (sshdr.sense_key==0 &&
-				sshdr.asc==0 && sshdr.ascq==0)
+			if (sshdr.sense_key == 0 &&
+				sshdr.asc == 0 && sshdr.ascq == 0)
 				cmd_result &= ~SAM_STAT_CHECK_CONDITION;
 		}
 
@@ -451,8 +518,8 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
 		qc->scsidone = done;
 
 		if (cmd->use_sg) {
-			qc->__sg = (struct scatterlist *) cmd->request_buffer;
-			qc->n_elem = cmd->use_sg;
+			qc->__sg = scsi_sglist(cmd);
+			qc->n_elem = scsi_sg_count(cmd);
 		} else if (cmd->request_bufflen) {
 			qc->__sg = &qc->sgent;
 			qc->n_elem = 1;
@@ -755,6 +822,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
 {
 	sdev->use_10_for_rw = 1;
 	sdev->use_10_for_ms = 1;
+
+	/* Schedule policy is determined by ->qc_defer() callback and
+	 * it needs to see every deferred qc.  Set dev_blocked to 1 to
+	 * prevent SCSI midlayer from automatically deferring
+	 * requests.
+	 */
+	sdev->max_device_blocked = 1;
 }
 
 static void ata_scsi_dev_config(struct scsi_device *sdev,
@@ -768,10 +842,17 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
 	 * Decrement max hw segments accordingly.
 	 */
 	if (dev->class == ATA_DEV_ATAPI) {
-		request_queue_t *q = sdev->request_queue;
+		struct request_queue *q = sdev->request_queue;
 		blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
 	}
 
+	if (dev->flags & ATA_DFLAG_AN) {
+		struct scsi_device_shadow *shdev = sdev_shadow(sdev);
+
+		if (shdev)
+			set_bit(SDEV_EVT_MEDIA_CHANGE, shdev->supported_events);
+	}
+
 	if (dev->flags & ATA_DFLAG_NCQ) {
 		int depth;
 
@@ -800,8 +881,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
 
 	ata_scsi_sdev_config(sdev);
 
-	blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);
-
 	sdev->manage_start_stop = 1;
 
 	if (dev)
@@ -943,6 +1022,13 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
 		goto invalid_fld;       /* LOEJ bit set not supported */
 	if (((cdb[4] >> 4) & 0xf) != 0)
 		goto invalid_fld;       /* power conditions not supported */
+
+	if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) {
+		/* the device lacks PM support, finish without doing anything */
+		scmd->result = SAM_STAT_GOOD;
+		return 1;
+	}
+
 	if (cdb[4] & 0x1) {
 		tf->nsect = 1;	/* 1 sector, lba=0 */
 
@@ -969,7 +1055,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
 		if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
 		    (system_state == SYSTEM_HALT ||
 		     system_state == SYSTEM_POWER_OFF)) {
-			static unsigned long warned = 0;
+			static unsigned long warned;
 
 			if (!test_and_set_bit(0, &warned)) {
 				ata_dev_printk(qc->dev, KERN_WARNING,
@@ -1034,6 +1120,9 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
 	else
 		tf->command = ATA_CMD_FLUSH;
 
+	/* flush is critical for IO integrity, consider it an IO command */
+	qc->flags |= ATA_QCFLAG_IO;
+
 	return 0;
 }
 
@@ -1357,29 +1446,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
 	struct ata_port *ap = qc->ap;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	u8 *cdb = cmd->cmnd;
- 	int need_sense = (qc->err_mask != 0);
-
-	/* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
-	 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
-	 * cache
-	 */
-	if (ap->ops->error_handler && !need_sense) {
-		switch (qc->tf.command) {
-		case ATA_CMD_SET_FEATURES:
-			if ((qc->tf.feature == SETFEATURES_WC_ON) ||
-			    (qc->tf.feature == SETFEATURES_WC_OFF)) {
-				ap->eh_info.action |= ATA_EH_REVALIDATE;
-				ata_port_schedule_eh(ap);
-			}
-			break;
-
-		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
-		case ATA_CMD_SET_MULTI: /* multi_count changed */
-			ap->eh_info.action |= ATA_EH_REVALIDATE;
-			ata_port_schedule_eh(ap);
-			break;
-		}
-	}
+	int need_sense = (qc->err_mask != 0);
 
 	/* For ATA pass thru (SAT) commands, generate a sense block if
 	 * user mandated it or if there's an error.  Note that if we
@@ -1389,7 +1456,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
 	 * was no error, SK, ASC and ASCQ will all be zero.
 	 */
 	if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
- 	    ((cdb[2] & 0x20) || need_sense)) {
+	    ((cdb[2] & 0x20) || need_sense)) {
 		ata_gen_passthru_sense(qc);
 	} else {
 		if (!need_sense) {
@@ -1422,37 +1489,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
 }
 
 /**
- *	ata_scmd_need_defer - Check whether we need to defer scmd
- *	@dev: ATA device to which the command is addressed
- *	@is_io: Is the command IO (and thus possibly NCQ)?
- *
- *	NCQ and non-NCQ commands cannot run together.  As upper layer
- *	only knows the queue depth, we are responsible for maintaining
- *	exclusion.  This function checks whether a new command can be
- *	issued to @dev.
- *
- *	LOCKING:
- *	spin_lock_irqsave(host lock)
- *
- *	RETURNS:
- *	1 if deferring is needed, 0 otherwise.
- */
-static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
-{
-	struct ata_port *ap = dev->ap;
-	int is_ncq = is_io && ata_ncq_enabled(dev);
-
-	if (is_ncq) {
-		if (!ata_tag_valid(ap->active_tag))
-			return 0;
-	} else {
-		if (!ata_tag_valid(ap->active_tag) && !ap->sactive)
-			return 0;
-	}
-	return 1;
-}
-
-/**
  *	ata_scsi_translate - Translate then issue SCSI command to ATA device
  *	@dev: ATA device to which the command is addressed
  *	@cmd: SCSI command to execute
@@ -1483,14 +1519,12 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
 			      void (*done)(struct scsi_cmnd *),
 			      ata_xlat_func_t xlat_func)
 {
+	struct ata_port *ap = dev->link->ap;
 	struct ata_queued_cmd *qc;
-	int is_io = xlat_func == ata_scsi_rw_xlat;
+	int rc;
 
 	VPRINTK("ENTER\n");
 
-	if (unlikely(ata_scmd_need_defer(dev, is_io)))
-		goto defer;
-
 	qc = ata_scsi_qc_new(dev, cmd, done);
 	if (!qc)
 		goto err_mem;
@@ -1498,14 +1532,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
 	/* data is present; dma-map it */
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	    cmd->sc_data_direction == DMA_TO_DEVICE) {
-		if (unlikely(cmd->request_bufflen < 1)) {
+		if (unlikely(scsi_bufflen(cmd) < 1)) {
 			ata_dev_printk(dev, KERN_WARNING,
 				       "WARNING: zero len r/w req\n");
 			goto err_did;
 		}
 
 		if (cmd->use_sg)
-			ata_sg_init(qc, cmd->request_buffer, cmd->use_sg);
+			ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
 		else
 			ata_sg_init_one(qc, cmd->request_buffer,
 					cmd->request_bufflen);
@@ -1518,6 +1552,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
 	if (xlat_func(qc))
 		goto early_finish;
 
+	if (ap->ops->qc_defer) {
+		if ((rc = ap->ops->qc_defer(qc)))
+			goto defer;
+	}
+
 	/* select device, send command to hardware */
 	ata_qc_issue(qc);
 
@@ -1525,7 +1564,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
 	return 0;
 
 early_finish:
-        ata_qc_free(qc);
+	ata_qc_free(qc);
 	qc->scsidone(cmd);
 	DPRINTK("EXIT - early finish (good or error)\n");
 	return 0;
@@ -1539,8 +1578,12 @@ err_mem:
 	return 0;
 
 defer:
+	ata_qc_free(qc);
 	DPRINTK("EXIT - defer\n");
-	return SCSI_MLQUEUE_DEVICE_BUSY;
+	if (rc == ATA_DEFER_LINK)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	else
+		return SCSI_MLQUEUE_HOST_BUSY;
 }
 
 /**
@@ -1562,11 +1605,10 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
 	u8 *buf;
 	unsigned int buflen;
 
-	if (cmd->use_sg) {
-		struct scatterlist *sg;
+	struct scatterlist *sg = scsi_sglist(cmd);
 
-		sg = (struct scatterlist *) cmd->request_buffer;
-		buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+	if (sg) {
+		buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
 		buflen = sg->length;
 	} else {
 		buf = cmd->request_buffer;
@@ -1590,12 +1632,9 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
 
 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
 {
-	if (cmd->use_sg) {
-		struct scatterlist *sg;
-
-		sg = (struct scatterlist *) cmd->request_buffer;
+	struct scatterlist *sg = scsi_sglist(cmd);
+	if (sg)
 		kunmap_atomic(buf - sg->offset, KM_IRQ0);
-	}
 }
 
 /**
@@ -1615,8 +1654,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
  */
 
 void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
-		        unsigned int (*actor) (struct ata_scsi_args *args,
-			     		   u8 *rbuf, unsigned int buflen))
+			unsigned int (*actor) (struct ata_scsi_args *args,
+					       u8 *rbuf, unsigned int buflen))
 {
 	u8 *rbuf;
 	unsigned int buflen, rc;
@@ -1817,6 +1856,62 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
 }
 
 /**
+ *	ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
+ *	@args: device IDENTIFY data / SCSI command of interest.
+ *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *	@buflen: Response buffer length.
+ *
+ *	Yields SAT-specified ATA VPD page.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ */
+
+unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
+			      unsigned int buflen)
+{
+	u8 pbuf[60];
+	struct ata_taskfile tf;
+	unsigned int i;
+
+	if (!buflen)
+		return 0;
+
+	memset(&pbuf, 0, sizeof(pbuf));
+	memset(&tf, 0, sizeof(tf));
+
+	pbuf[1] = 0x89;			/* our page code */
+	pbuf[2] = (0x238 >> 8);		/* page size fixed at 238h */
+	pbuf[3] = (0x238 & 0xff);
+
+	memcpy(&pbuf[8], "linux   ", 8);
+	memcpy(&pbuf[16], "libata          ", 16);
+	memcpy(&pbuf[32], DRV_VERSION, 4);
+	ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4);
+
+	/* we don't store the ATA device signature, so we fake it */
+
+	tf.command = ATA_DRDY;		/* really, this is Status reg */
+	tf.lbal = 0x1;
+	tf.nsect = 0x1;
+
+	ata_tf_to_fis(&tf, 0, 1, &pbuf[36]);	/* TODO: PMP? */
+	pbuf[36] = 0x34;		/* force D2H Reg FIS (34h) */
+
+	pbuf[56] = ATA_CMD_ID_ATA;
+
+	i = min(buflen, 60U);
+	memcpy(rbuf, &pbuf[0], i);
+	buflen -= i;
+
+	if (!buflen)
+		return 0;
+
+	memcpy(&rbuf[60], &args->id[0], min(buflen, 512U));
+	return 0;
+}
+
+/**
  *	ata_scsiop_noop - Command handler that simply returns success.
  *	@args: device IDENTIFY data / SCSI command of interest.
  *	@rbuf: Response buffer, to which simulated SCSI cmd output is sent.
@@ -2109,7 +2204,7 @@ saving_not_supp:
  *	None.
  */
 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
-			        unsigned int buflen)
+				 unsigned int buflen)
 {
 	u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
 
@@ -2273,8 +2368,8 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
 		qc->tf.feature |= ATAPI_PKT_DMA;
 	} else {
 		qc->tf.protocol = ATA_PROT_ATAPI;
-		qc->tf.lbam = (8 * 1024) & 0xff;
-		qc->tf.lbah = (8 * 1024) >> 8;
+		qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
+		qc->tf.lbah = 0;
 	}
 	qc->nbytes = SCSI_SENSE_BUFFERSIZE;
 
@@ -2383,6 +2478,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
 	struct ata_device *dev = qc->dev;
 	int using_pio = (dev->flags & ATA_DFLAG_PIO);
 	int nodata = (scmd->sc_data_direction == DMA_NONE);
+	unsigned int nbytes;
 
 	memset(qc->cdb, 0, dev->cdb_len);
 	memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
@@ -2396,20 +2492,55 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
 	}
 
 	qc->tf.command = ATA_CMD_PACKET;
-	qc->nbytes = scmd->request_bufflen;
+	qc->nbytes = scsi_bufflen(scmd);
 
 	/* check whether ATAPI DMA is safe */
 	if (!using_pio && ata_check_atapi_dma(qc))
 		using_pio = 1;
 
+	/* Some controller variants snoop this value for Packet
+	 * transfers to do state machine and FIFO management.  Thus we
+	 * want to set it properly, and for DMA where it is
+	 * effectively meaningless.
+	 */
+	nbytes = min(qc->nbytes, (unsigned int)63 * 1024);
+
+	/* Most ATAPI devices which honor transfer chunk size don't
+	 * behave according to the spec when odd chunk size which
+	 * matches the transfer length is specified.  If the number of
+	 * bytes to transfer is 2n+1.  According to the spec, what
+	 * should happen is to indicate that 2n+1 is going to be
+	 * transferred and transfer 2n+2 bytes where the last byte is
+	 * padding.
+	 *
+	 * In practice, this doesn't happen.  ATAPI devices first
+	 * indicate and transfer 2n bytes and then indicate and
+	 * transfer 2 bytes where the last byte is padding.
+	 *
+	 * This inconsistency confuses several controllers which
+	 * perform PIO using DMA such as Intel AHCIs and sil3124/32.
+	 * These controllers use actual number of transferred bytes to
+	 * update DMA poitner and transfer of 4n+2 bytes make those
+	 * controller push DMA pointer by 4n+4 bytes because SATA data
+	 * FISes are aligned to 4 bytes.  This causes data corruption
+	 * and buffer overrun.
+	 *
+	 * Always setting nbytes to even number solves this problem
+	 * because then ATAPI devices don't have to split data at 2n
+	 * boundaries.
+	 */
+	if (nbytes & 0x1)
+		nbytes++;
+
+	qc->tf.lbam = (nbytes & 0xFF);
+	qc->tf.lbah = (nbytes >> 8);
+
 	if (using_pio || nodata) {
 		/* no data, or PIO data xfer */
 		if (nodata)
 			qc->tf.protocol = ATA_PROT_ATAPI_NODATA;
 		else
 			qc->tf.protocol = ATA_PROT_ATAPI;
-		qc->tf.lbam = (8 * 1024) & 0xff;
-		qc->tf.lbah = (8 * 1024) >> 8;
 	} else {
 		/* DMA data xfer */
 		qc->tf.protocol = ATA_PROT_ATAPI_DMA;
@@ -2420,24 +2551,42 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
 			qc->tf.feature |= ATAPI_DMADIR;
 	}
 
+
+	/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
+	   as ATAPI tape drives don't get this right otherwise */
 	return 0;
 }
 
-static struct ata_device * ata_find_dev(struct ata_port *ap, int id)
+static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
 {
-	if (likely(id < ATA_MAX_DEVICES))
-		return &ap->device[id];
+	if (ap->nr_pmp_links == 0) {
+		if (likely(devno < ata_link_max_devices(&ap->link)))
+			return &ap->link.device[devno];
+	} else {
+		if (likely(devno < ap->nr_pmp_links))
+			return &ap->pmp_link[devno].device[0];
+	}
+
 	return NULL;
 }
 
-static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
-					const struct scsi_device *scsidev)
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
+					      const struct scsi_device *scsidev)
 {
+	int devno;
+
 	/* skip commands not addressed to targets we simulate */
-	if (unlikely(scsidev->channel || scsidev->lun))
-		return NULL;
+	if (ap->nr_pmp_links == 0) {
+		if (unlikely(scsidev->channel || scsidev->lun))
+			return NULL;
+		devno = scsidev->id;
+	} else {
+		if (unlikely(scsidev->id || scsidev->lun))
+			return NULL;
+		devno = scsidev->channel;
+	}
 
-	return ata_find_dev(ap, scsidev->id);
+	return ata_find_dev(ap, devno);
 }
 
 /**
@@ -2458,7 +2607,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev)
 	if (unlikely(!ata_dev_enabled(dev)))
 		return 0;
 
-	if (!atapi_enabled || (dev->ap->flags & ATA_FLAG_NO_ATAPI)) {
+	if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) {
 		if (unlikely(dev->class == ATA_DEV_ATAPI)) {
 			ata_dev_printk(dev, KERN_WARNING,
 				       "WARNING: ATAPI is %s, device ignored.\n",
@@ -2508,27 +2657,27 @@ static u8
 ata_scsi_map_proto(u8 byte1)
 {
 	switch((byte1 & 0x1e) >> 1) {
-		case 3:		/* Non-data */
-			return ATA_PROT_NODATA;
-
-		case 6:		/* DMA */
-		case 10:	/* UDMA Data-in */
-		case 11:	/* UDMA Data-Out */
-			return ATA_PROT_DMA;
-
-		case 4:		/* PIO Data-in */
-		case 5:		/* PIO Data-out */
-			return ATA_PROT_PIO;
-
-		case 0:		/* Hard Reset */
-		case 1:		/* SRST */
-		case 8:		/* Device Diagnostic */
-		case 9:		/* Device Reset */
-		case 7:		/* DMA Queued */
-		case 12:	/* FPDMA */
-		case 15:	/* Return Response Info */
-		default:	/* Reserved */
-			break;
+	case 3:		/* Non-data */
+		return ATA_PROT_NODATA;
+
+	case 6:		/* DMA */
+	case 10:	/* UDMA Data-in */
+	case 11:	/* UDMA Data-Out */
+		return ATA_PROT_DMA;
+
+	case 4:		/* PIO Data-in */
+	case 5:		/* PIO Data-out */
+		return ATA_PROT_PIO;
+
+	case 0:		/* Hard Reset */
+	case 1:		/* SRST */
+	case 8:		/* Device Diagnostic */
+	case 9:		/* Device Reset */
+	case 7:		/* DMA Queued */
+	case 12:	/* FPDMA */
+	case 15:	/* Return Response Info */
+	default:	/* Reserved */
+		break;
 	}
 
 	return ATA_PROT_UNKNOWN;
@@ -2557,10 +2706,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 	if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
 		goto invalid_fld;
 
-	if (cdb[1] & 0xe0)
-		/* PIO multi not supported yet */
-		goto invalid_fld;
-
 	/*
 	 * 12 and 16 byte CDBs use different offsets to
 	 * provide the various register values.
@@ -2624,7 +2769,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 			ata_dev_printk(dev, KERN_WARNING,
 				       "invalid multi_count %u ignored\n",
 				       multi_count);
-	}	
+	}
 
 	/* READ/WRITE LONG use a non-standard sect_size */
 	qc->sect_size = ATA_SECT_SIZE;
@@ -2635,7 +2780,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 	case ATA_CMD_WRITE_LONG_ONCE:
 		if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
 			goto invalid_fld;
-		qc->sect_size = scmd->request_bufflen;
+		qc->sect_size = scsi_bufflen(scmd);
 	}
 
 	/*
@@ -2665,10 +2810,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
 	 * TODO: find out if we need to do more here to
 	 *       cover scatter/gather case.
 	 */
-	qc->nbytes = scmd->request_bufflen;
+	qc->nbytes = scsi_bufflen(scmd);
 
-	/* request result TF */
-	qc->flags |= ATA_QCFLAG_RESULT_TF;
+	/* request result TF and be quiet about device error */
+	qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
 
 	return 0;
 
@@ -2750,28 +2895,49 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
 				      void (*done)(struct scsi_cmnd *),
 				      struct ata_device *dev)
 {
+	u8 scsi_op = scmd->cmnd[0];
+	ata_xlat_func_t xlat_func;
 	int rc = 0;
 
-	if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) {
-		DPRINTK("bad CDB len=%u, max=%u\n",
-			scmd->cmd_len, dev->cdb_len);
-		scmd->result = DID_ERROR << 16;
-		done(scmd);
-		return 0;
-	}
-
 	if (dev->class == ATA_DEV_ATA) {
-		ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
-							      scmd->cmnd[0]);
+		if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+			goto bad_cdb_len;
 
-		if (xlat_func)
-			rc = ata_scsi_translate(dev, scmd, done, xlat_func);
-		else
-			ata_scsi_simulate(dev, scmd, done);
-	} else
-		rc = ata_scsi_translate(dev, scmd, done, atapi_xlat);
+		xlat_func = ata_get_xlat_func(dev, scsi_op);
+	} else {
+		if (unlikely(!scmd->cmd_len))
+			goto bad_cdb_len;
+
+		xlat_func = NULL;
+		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+			/* relay SCSI command to ATAPI device */
+			int len = COMMAND_SIZE(scsi_op);
+			if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+				goto bad_cdb_len;
+
+			xlat_func = atapi_xlat;
+		} else {
+			/* ATA_16 passthru, treat as an ATA command */
+			if (unlikely(scmd->cmd_len > 16))
+				goto bad_cdb_len;
+
+			xlat_func = ata_get_xlat_func(dev, scsi_op);
+		}
+	}
+
+	if (xlat_func)
+		rc = ata_scsi_translate(dev, scmd, done, xlat_func);
+	else
+		ata_scsi_simulate(dev, scmd, done);
 
 	return rc;
+
+ bad_cdb_len:
+	DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
+		scmd->cmd_len, scsi_op, dev->cdb_len);
+	scmd->result = DID_ERROR << 16;
+	done(scmd);
+	return 0;
 }
 
 /**
@@ -2839,6 +3005,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
 {
 	struct ata_scsi_args args;
 	const u8 *scsicmd = cmd->cmnd;
+	u8 tmp8;
 
 	args.dev = dev;
 	args.id = dev->id;
@@ -2846,66 +3013,94 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
 	args.done = done;
 
 	switch(scsicmd[0]) {
-		/* no-op's, complete with success */
-		case SYNCHRONIZE_CACHE:
-		case REZERO_UNIT:
-		case SEEK_6:
-		case SEEK_10:
-		case TEST_UNIT_READY:
-		case FORMAT_UNIT:		/* FIXME: correct? */
-		case SEND_DIAGNOSTIC:		/* FIXME: correct? */
-			ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
-			break;
+	/* TODO: worth improving? */
+	case FORMAT_UNIT:
+		ata_scsi_invalid_field(cmd, done);
+		break;
 
-		case INQUIRY:
-			if (scsicmd[1] & 2)	           /* is CmdDt set?  */
-				ata_scsi_invalid_field(cmd, done);
-			else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
-				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
-			else if (scsicmd[2] == 0x00)
-				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
-			else if (scsicmd[2] == 0x80)
-				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
-			else if (scsicmd[2] == 0x83)
-				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
-			else
-				ata_scsi_invalid_field(cmd, done);
+	case INQUIRY:
+		if (scsicmd[1] & 2)	           /* is CmdDt set?  */
+			ata_scsi_invalid_field(cmd, done);
+		else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
+		else switch (scsicmd[2]) {
+		case 0x00:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
 			break;
-
-		case MODE_SENSE:
-		case MODE_SENSE_10:
-			ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+		case 0x80:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
 			break;
-
-		case MODE_SELECT:	/* unconditionally return */
-		case MODE_SELECT_10:	/* bad-field-in-cdb */
+		case 0x83:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
+			break;
+		case 0x89:
+			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
+			break;
+		default:
 			ata_scsi_invalid_field(cmd, done);
 			break;
+		}
+		break;
+
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+		break;
 
-		case READ_CAPACITY:
+	case MODE_SELECT:	/* unconditionally return */
+	case MODE_SELECT_10:	/* bad-field-in-cdb */
+		ata_scsi_invalid_field(cmd, done);
+		break;
+
+	case READ_CAPACITY:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+		break;
+
+	case SERVICE_ACTION_IN:
+		if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
 			ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
-			break;
+		else
+			ata_scsi_invalid_field(cmd, done);
+		break;
 
-		case SERVICE_ACTION_IN:
-			if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
-				ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
-			else
-				ata_scsi_invalid_field(cmd, done);
-			break;
+	case REPORT_LUNS:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+		break;
 
-		case REPORT_LUNS:
-			ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
-			break;
+	case REQUEST_SENSE:
+		ata_scsi_set_sense(cmd, 0, 0, 0);
+		cmd->result = (DRIVER_SENSE << 24);
+		done(cmd);
+		break;
 
-		/* mandatory commands we haven't implemented yet */
-		case REQUEST_SENSE:
+	/* if we reach this, then writeback caching is disabled,
+	 * turning this into a no-op.
+	 */
+	case SYNCHRONIZE_CACHE:
+		/* fall through */
+
+	/* no-op's, complete with success */
+	case REZERO_UNIT:
+	case SEEK_6:
+	case SEEK_10:
+	case TEST_UNIT_READY:
+		ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
+		break;
 
-		/* all other commands */
-		default:
-			ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
-			/* "Invalid command operation code" */
-			done(cmd);
-			break;
+	case SEND_DIAGNOSTIC:
+		tmp8 = scsicmd[1] & ~(1 << 3);
+		if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
+			ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
+		else
+			ata_scsi_invalid_field(cmd, done);
+		break;
+
+	/* all other commands */
+	default:
+		ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+		/* "Invalid command operation code" */
+		done(cmd);
+		break;
 	}
 }
 
@@ -2932,6 +3127,13 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
 		shost->max_channel = 1;
 		shost->max_cmd_len = 16;
 
+		/* Schedule policy is determined by ->qc_defer()
+		 * callback and it needs to see every deferred qc.
+		 * Set host_blocked to 1 to prevent SCSI midlayer from
+		 * automatically deferring requests.
+		 */
+		shost->max_host_blocked = 1;
+
 		rc = scsi_add_host(ap->scsi_host, ap->host->dev);
 		if (rc)
 			goto err_add;
@@ -2951,26 +3153,79 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
 	return rc;
 }
 
-void ata_scsi_scan_host(struct ata_port *ap)
+void ata_scsi_scan_host(struct ata_port *ap, int sync)
 {
-	unsigned int i;
+	int tries = 5;
+	struct ata_device *last_failed_dev = NULL;
+	struct ata_link *link;
+	struct ata_device *dev;
 
 	if (ap->flags & ATA_FLAG_DISABLED)
 		return;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		struct scsi_device *sdev;
+ repeat:
+	ata_port_for_each_link(link, ap) {
+		ata_link_for_each_dev(dev, link) {
+			struct scsi_device *sdev;
+			int channel = 0, id = 0;
 
-		if (!ata_dev_enabled(dev) || dev->sdev)
-			continue;
+			if (!ata_dev_enabled(dev) || dev->sdev)
+				continue;
 
-		sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
-		if (!IS_ERR(sdev)) {
-			dev->sdev = sdev;
-			scsi_device_put(sdev);
+			if (ata_is_host_link(link))
+				id = dev->devno;
+			else
+				channel = link->pmp;
+
+			sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
+						 NULL);
+			if (!IS_ERR(sdev)) {
+				dev->sdev = sdev;
+				scsi_device_put(sdev);
+			}
+		}
+	}
+
+	/* If we scanned while EH was in progress or allocation
+	 * failure occurred, scan would have failed silently.  Check
+	 * whether all devices are attached.
+	 */
+	ata_port_for_each_link(link, ap) {
+		ata_link_for_each_dev(dev, link) {
+			if (ata_dev_enabled(dev) && !dev->sdev)
+				goto exit_loop;
 		}
 	}
+ exit_loop:
+	if (!link)
+		return;
+
+	/* we're missing some SCSI devices */
+	if (sync) {
+		/* If caller requested synchrnous scan && we've made
+		 * any progress, sleep briefly and repeat.
+		 */
+		if (dev != last_failed_dev) {
+			msleep(100);
+			last_failed_dev = dev;
+			goto repeat;
+		}
+
+		/* We might be failing to detect boot device, give it
+		 * a few more chances.
+		 */
+		if (--tries) {
+			msleep(100);
+			goto repeat;
+		}
+
+		ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
+				"failed without making any progress,\n"
+				"                  switching to async\n");
+	}
+
+	queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+			   round_jiffies_relative(HZ));
 }
 
 /**
@@ -3009,7 +3264,7 @@ int ata_scsi_offline_dev(struct ata_device *dev)
  */
 static void ata_scsi_remove_dev(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
+	struct ata_port *ap = dev->link->ap;
 	struct scsi_device *sdev;
 	unsigned long flags;
 
@@ -3056,6 +3311,42 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
 	}
 }
 
+static void ata_scsi_handle_link_detach(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+
+	ata_link_for_each_dev(dev, link) {
+		unsigned long flags;
+
+		if (!(dev->flags & ATA_DFLAG_DETACHED))
+			continue;
+
+		spin_lock_irqsave(ap->lock, flags);
+		dev->flags &= ~ATA_DFLAG_DETACHED;
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		ata_scsi_remove_dev(dev);
+	}
+}
+
+/**
+ *	ata_scsi_media_change_notify - send media change event
+ *	@dev: Pointer to the disk device with media change event
+ *
+ *	Tell the block layer to send a media change notification
+ *	event.
+ *
+ * 	LOCKING:
+ * 	spin_lock_irqsave(host lock)
+ */
+void ata_scsi_media_change_notify(struct ata_device *dev)
+{
+	if (dev->sdev)
+		sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
+				     GFP_ATOMIC);
+}
+
 /**
  *	ata_scsi_hotplug - SCSI part of hotplug
  *	@work: Pointer to ATA port to perform SCSI hotplug on
@@ -3080,36 +3371,17 @@ void ata_scsi_hotplug(void *_data)
 
 	DPRINTK("ENTER\n");
 
-	/* unplug detached devices */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		unsigned long flags;
-
-		if (!(dev->flags & ATA_DFLAG_DETACHED))
-			continue;
-
-		spin_lock_irqsave(ap->lock, flags);
-		dev->flags &= ~ATA_DFLAG_DETACHED;
-		spin_unlock_irqrestore(ap->lock, flags);
-
-		ata_scsi_remove_dev(dev);
-	}
+	/* Unplug detached devices.  We cannot use link iterator here
+	 * because PMP links have to be scanned even if PMP is
+	 * currently not attached.  Iterate manually.
+	 */
+	ata_scsi_handle_link_detach(&ap->link);
+	if (ap->pmp_link)
+		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+			ata_scsi_handle_link_detach(&ap->pmp_link[i]);
 
 	/* scan for new ones */
-	ata_scsi_scan_host(ap);
-
-	/* If we scanned while EH was in progress, scan would have
-	 * failed silently.  Requeue if there are enabled but
-	 * unattached devices.
-	 */
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		if (ata_dev_enabled(dev) && !dev->sdev) {
-			queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
-				round_jiffies_relative(HZ));
-			break;
-		}
-	}
+	ata_scsi_scan_host(ap, 0);
 
 	DPRINTK("EXIT\n");
 }
@@ -3135,27 +3407,42 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
 {
 	struct ata_port *ap = ata_shost_to_port(shost);
 	unsigned long flags;
-	int rc = 0;
+	int devno, rc = 0;
 
 	if (!ap->ops->error_handler)
 		return -EOPNOTSUPP;
 
-	if ((channel != SCAN_WILD_CARD && channel != 0) ||
-	    (lun != SCAN_WILD_CARD && lun != 0))
+	if (lun != SCAN_WILD_CARD && lun)
 		return -EINVAL;
 
+	if (ap->nr_pmp_links == 0) {
+		if (channel != SCAN_WILD_CARD && channel)
+			return -EINVAL;
+		devno = id;
+	} else {
+		if (id != SCAN_WILD_CARD && id)
+			return -EINVAL;
+		devno = channel;
+	}
+
 	spin_lock_irqsave(ap->lock, flags);
 
-	if (id == SCAN_WILD_CARD) {
-		ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
-		ap->eh_info.action |= ATA_EH_SOFTRESET;
+	if (devno == SCAN_WILD_CARD) {
+		struct ata_link *link;
+
+		ata_port_for_each_link(link, ap) {
+			struct ata_eh_info *ehi = &link->eh_info;
+			ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1;
+			ehi->action |= ATA_EH_SOFTRESET;
+		}
 	} else {
-		struct ata_device *dev = ata_find_dev(ap, id);
+		struct ata_device *dev = ata_find_dev(ap, devno);
 
 		if (dev) {
-			ap->eh_info.probe_mask |= 1 << dev->devno;
-			ap->eh_info.action |= ATA_EH_SOFTRESET;
-			ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
+			struct ata_eh_info *ehi = &dev->link->eh_info;
+			ehi->probe_mask |= 1 << dev->devno;
+			ehi->action |= ATA_EH_SOFTRESET;
+			ehi->flags |= ATA_EHI_RESUME_LINK;
 		} else
 			rc = -EINVAL;
 	}
@@ -3185,24 +3472,26 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
 void ata_scsi_dev_rescan(void *_data)
 {
 	struct ata_port *ap = _data;
+	struct ata_link *link;
+	struct ata_device *dev;
 	unsigned long flags;
-	unsigned int i;
 
 	spin_lock_irqsave(ap->lock, flags);
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
-		struct scsi_device *sdev = dev->sdev;
+	ata_port_for_each_link(link, ap) {
+		ata_link_for_each_dev(dev, link) {
+			struct scsi_device *sdev = dev->sdev;
 
-		if (!ata_dev_enabled(dev) || !sdev)
-			continue;
-		if (scsi_device_get(sdev))
-			continue;
+			if (!ata_dev_enabled(dev) || !sdev)
+				continue;
+			if (scsi_device_get(sdev))
+				continue;
 
-		spin_unlock_irqrestore(ap->lock, flags);
-		scsi_rescan_device(&(sdev->sdev_gendev));
-		scsi_device_put(sdev);
-		spin_lock_irqsave(ap->lock, flags);
+			spin_unlock_irqrestore(ap->lock, flags);
+			scsi_rescan_device(&(sdev->sdev_gendev));
+			scsi_device_put(sdev);
+			spin_lock_irqsave(ap->lock, flags);
+		}
 	}
 
 	spin_unlock_irqrestore(ap->lock, flags);
@@ -3330,7 +3619,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
 {
 	ata_scsi_sdev_config(sdev);
-	ata_scsi_dev_config(sdev, ap->device);
+	ata_scsi_dev_config(sdev, ap->link.device);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
@@ -3353,8 +3642,8 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
 
 	ata_scsi_dump_cdb(ap, cmd);
 
-	if (likely(ata_scsi_dev_enabled(ap->device)))
-		rc = __ata_scsi_queuecmd(cmd, done, ap->device);
+	if (likely(ata_scsi_dev_enabled(ap->link.device)))
+		rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
 	else {
 		cmd->result = (DID_BAD_TARGET << 16);
 		done(cmd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 88fb23b..1cdb102 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1,5 +1,5 @@
 /*
- *  libata-bmdma.c - helper library for PCI IDE BMDMA
+ *  libata-sff.c - helper library for PCI IDE BMDMA
  *
  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
@@ -64,46 +64,6 @@ u8 ata_irq_on(struct ata_port *ap)
 	return tmp;
 }
 
-u8 ata_dummy_irq_on (struct ata_port *ap) 	{ return 0; }
-
-/**
- *	ata_irq_ack - Acknowledge a device interrupt.
- *	@ap: Port on which interrupts are enabled.
- *
- *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
- *	or BUSY+DRQ clear).  Obtain dma status and port status from
- *	device.  Clear the interrupt.  Return port status.
- *
- *	LOCKING:
- */
-
-u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
-{
-	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
-	u8 host_stat = 0, post_stat = 0, status;
-
-	status = ata_busy_wait(ap, bits, 1000);
-	if (status & bits)
-		if (ata_msg_err(ap))
-			printk(KERN_ERR "abnormal status 0x%X\n", status);
-
-	if (ap->ioaddr.bmdma_addr) {
-		/* get controller status; clear intr, err bits */
-		host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-		iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
-			 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-
-		post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-	}
-	if (ata_msg_intr(ap))
-		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
-			__FUNCTION__,
-			host_stat, post_stat, status);
-	return status;
-}
-
-u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }
-
 /**
  *	ata_tf_load - send taskfile registers to host controller
  *	@ap: Port to which output is sent
@@ -196,7 +156,7 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
 
-	tf->command = ata_check_status(ap);
+	tf->command = ata_chk_status(ap);
 	tf->feature = ioread8(ioaddr->error_addr);
 	tf->nsect = ioread8(ioaddr->nsect_addr);
 	tf->lbal = ioread8(ioaddr->lbal_addr);
@@ -211,6 +171,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
 		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
 		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+		iowrite8(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
 	}
 }
 
@@ -286,7 +248,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
  *	LOCKING:
  *	spin_lock_irqsave(host lock)
  */
-void ata_bmdma_start (struct ata_queued_cmd *qc)
+void ata_bmdma_start(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	u8 dmactl;
@@ -295,7 +257,7 @@ void ata_bmdma_start (struct ata_queued_cmd *qc)
 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 
-	/* Strictly, one may wish to issue a readb() here, to
+	/* Strictly, one may wish to issue an ioread8() here, to
 	 * flush the mmio write.  However, control also passes
 	 * to the hardware at this point, and it will interrupt
 	 * us when we are to resume control.  So, in effect,
@@ -305,6 +267,9 @@ void ata_bmdma_start (struct ata_queued_cmd *qc)
 	 * is expected, so I think it is best to not add a readb()
 	 * without first all the MMIO ATA cards/mobos.
 	 * Or maybe I'm just being paranoid.
+	 *
+	 * FIXME: The posting of this write means I/O starts are
+	 * unneccessarily delayed for MMIO
 	 */
 }
 
@@ -440,7 +405,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
 	unsigned long flags;
 	int thaw = 0;
 
-	qc = __ata_qc_from_tag(ap, ap->active_tag);
+	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
 	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
 		qc = NULL;
 
@@ -495,7 +460,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
 	ata_reset_fn_t hardreset;
 
 	hardreset = NULL;
-	if (sata_scr_valid(ap))
+	if (sata_scr_valid(&ap->link))
 		hardreset = sata_std_hardreset;
 
 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
@@ -539,6 +504,23 @@ int ata_sff_port_start(struct ata_port *ap)
 
 #ifdef CONFIG_PCI
 
+#if 0
+static int ata_legacy_port_reserved(unsigned long res_start)
+{
+	struct resource *conflict, res;
+
+	res.start = res_start;
+	res.end = res_start + 8 - 1;
+	conflict = ____request_resource(&ioport_resource, &res);
+	while (conflict->child)
+		conflict = ____request_resource(conflict, &res);
+	if (!strcmp(conflict->name, "libata"))
+		return 1;	/* true */
+
+	return 0;		/* false */
+}
+#endif
+
 static int ata_resources_present(struct pci_dev *pdev, int port)
 {
 	int i;
@@ -571,6 +553,10 @@ int ata_pci_init_bmdma(struct ata_host *host)
 	struct pci_dev *pdev = to_pci_dev(gdev);
 	int i, rc;
 
+	/* No BAR4 allocation: No DMA */
+	if (pci_resource_start(pdev, 4) == 0)
+		return 0;
+
 	/* TODO: If we get no DMA mask we should fall back to PIO */
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
@@ -598,19 +584,26 @@ int ata_pci_init_bmdma(struct ata_host *host)
 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
 		    (ioread8(bmdma + 2) & 0x80))
 			host->flags |= ATA_HOST_SIMPLEX;
+
+		ata_port_desc(ap, "bmdma 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
 	}
 
 	return 0;
 }
 
 /**
- *	ata_pci_init_native_host - acquire native ATA resources and init host
+ *	ata_pci_init_sff_host - acquire native PCI ATA resources and init host
  *	@host: target ATA host
  *
  *	Acquire native PCI ATA resources for @host and initialize the
  *	first two ports of @host accordingly.  Ports marked dummy are
  *	skipped and allocation failure makes the port dummy.
  *
+ *	Note that native PCI resources are valid even for legacy hosts
+ *	as we fix up pdev resources array early in boot, so this
+ *	function can be used for both native and legacy SFF hosts.
+ *
  *	LOCKING:
  *	Inherited from calling layer (may sleep).
  *
@@ -618,7 +611,7 @@ int ata_pci_init_bmdma(struct ata_host *host)
  *	0 if at least one port is initialized, -ENODEV if no port is
  *	available.
  */
-int ata_pci_init_native_host(struct ata_host *host)
+int ata_pci_init_sff_host(struct ata_host *host)
 {
 	struct device *gdev = host->dev;
 	struct pci_dev *pdev = to_pci_dev(gdev);
@@ -661,6 +654,10 @@ int ata_pci_init_native_host(struct ata_host *host)
 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
 		ata_std_ports(&ap->ioaddr);
 
+		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, base),
+			(unsigned long long)pci_resource_start(pdev, base + 1));
+
 		mask |= 1 << i;
 	}
 
@@ -673,7 +670,7 @@ int ata_pci_init_native_host(struct ata_host *host)
 }
 
 /**
- *	ata_pci_prepare_native_host - helper to prepare native PCI ATA host
+ *	ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
  *	@pdev: target PCI device
  *	@ppi: array of port_info, must be enough for two ports
  *	@r_host: out argument for the initialized ATA host
@@ -687,9 +684,9 @@ int ata_pci_init_native_host(struct ata_host *host)
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-int ata_pci_prepare_native_host(struct pci_dev *pdev,
-				const struct ata_port_info * const * ppi,
-				struct ata_host **r_host)
+int ata_pci_prepare_sff_host(struct pci_dev *pdev,
+			     const struct ata_port_info * const * ppi,
+			     struct ata_host **r_host)
 {
 	struct ata_host *host;
 	int rc;
@@ -705,7 +702,7 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev,
 		goto err_out;
 	}
 
-	rc = ata_pci_init_native_host(host);
+	rc = ata_pci_init_sff_host(host);
 	if (rc)
 		goto err_out;
 
@@ -730,237 +727,6 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev,
 	return rc;
 }
 
-struct ata_legacy_devres {
-	unsigned int	mask;
-	unsigned long	cmd_port[2];
-	void __iomem *	cmd_addr[2];
-	void __iomem *	ctl_addr[2];
-	unsigned int	irq[2];
-	void *		irq_dev_id[2];
-};
-
-static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
-{
-	int i;
-
-	for (i = 0; i < 2; i++) {
-		if (!legacy_dr->irq[i])
-			continue;
-
-		free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
-		legacy_dr->irq[i] = 0;
-		legacy_dr->irq_dev_id[i] = NULL;
-	}
-}
-
-static void ata_legacy_release(struct device *gdev, void *res)
-{
-	struct ata_legacy_devres *this = res;
-	int i;
-
-	ata_legacy_free_irqs(this);
-
-	for (i = 0; i < 2; i++) {
-		if (this->cmd_addr[i])
-			ioport_unmap(this->cmd_addr[i]);
-		if (this->ctl_addr[i])
-			ioport_unmap(this->ctl_addr[i]);
-		if (this->cmd_port[i])
-			release_region(this->cmd_port[i], 8);
-	}
-}
-
-static int ata_legacy_port_reserved(unsigned long res_start)
-{
-	struct resource *conflict, res;
-
-	res.start = res_start;
-	res.end = res_start + 8 - 1;
-	conflict = ____request_resource(&ioport_resource, &res);
-	while (conflict->child)
-		conflict = ____request_resource(conflict, &res);
-	if (!strcmp(conflict->name, "libata"))
-		return 1;	/* true */
-
-	return 0;		/* false */
-}
-
-static int ata_init_legacy_port(struct ata_port *ap,
-				struct ata_legacy_devres *legacy_dr)
-{
-	struct ata_host *host = ap->host;
-	int port_no = ap->port_no;
-	unsigned long cmd_port, ctl_port;
-
-	if (port_no == 0) {
-		cmd_port = ATA_PRIMARY_CMD;
-		ctl_port = ATA_PRIMARY_CTL;
-	} else {
-		cmd_port = ATA_SECONDARY_CMD;
-		ctl_port = ATA_SECONDARY_CTL;
-	}
-
-	/* request cmd_port */
-	if (request_region(cmd_port, 8, "libata") ||
-	    ata_legacy_port_reserved(cmd_port))
-		legacy_dr->cmd_port[port_no] = cmd_port;
-	else {
-		dev_printk(KERN_WARNING, host->dev,
-			   "0x%0lX IDE port busy\n", cmd_port);
-		return -EBUSY;
-	}
-
-	/* iomap cmd and ctl ports */
-	legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
-	legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
-	if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) {
-		dev_printk(KERN_WARNING, host->dev,
-			   "failed to map cmd/ctl ports\n");
-		return -ENOMEM;
-	}
-
-	/* init IO addresses */
-	ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
-	ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
-	ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
-	ata_std_ports(&ap->ioaddr);
-
-	return 0;
-}
-
-/**
- *	ata_init_legacy_host - acquire legacy ATA resources and init ATA host
- *	@host: target ATA host
- *	@was_busy: out parameter, indicates whether any port was busy
- *
- *	Acquire legacy ATA resources for the first two ports of @host
- *	and initialize it accordingly.  Ports marked dummy are skipped
- *	and resource acquistion failure makes the port dummy.
- *
- *	LOCKING:
- *	Inherited from calling layer (may sleep).
- *
- *	RETURNS:
- *	0 if at least one port is initialized, -ENODEV if no port is
- *	available.
- */
-static int ata_init_legacy_host(struct ata_host *host, int *was_busy)
-{
-	struct device *gdev = host->dev;
-	struct ata_legacy_devres *legacy_dr;
-	int i, rc;
-
-	if (!devres_open_group(gdev, NULL, GFP_KERNEL))
-		return -ENOMEM;
-
-	rc = -ENOMEM;
-	legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
-				 GFP_KERNEL);
-	if (!legacy_dr)
-		goto err_out;
-	devres_add(gdev, legacy_dr);
-
-	for (i = 0; i < 2; i++) {
-		if (ata_port_is_dummy(host->ports[i]))
-			continue;
-
-		rc = ata_init_legacy_port(host->ports[i], legacy_dr);
-		if (rc == 0)
-			legacy_dr->mask |= 1 << i;
-		else {
-			if (rc == -EBUSY)
-				(*was_busy)++;
-			host->ports[i]->ops = &ata_dummy_port_ops;
-		}
-	}
-
-	if (!legacy_dr->mask) {
-		dev_printk(KERN_ERR, gdev, "no available legacy port\n");
-		return -ENODEV;
-	}
-
-	devres_remove_group(gdev, NULL);
-	return 0;
-
- err_out:
-	devres_release_group(gdev, NULL);
-	return rc;
-}
-
-/**
- *	ata_request_legacy_irqs - request legacy ATA IRQs
- *	@host: target ATA host
- *	@handler: array of IRQ handlers
- *	@irq_flags: array of IRQ flags
- *	@dev_id: array of IRQ dev_ids
- *
- *	Request legacy IRQs for non-dummy legacy ports in @host.  All
- *	IRQ parameters are passed as array to allow ports to have
- *	separate IRQ handlers.
- *
- *	LOCKING:
- *	Inherited from calling layer (may sleep).
- *
- *	RETURNS:
- *	0 on success, -errno otherwise.
- */
-static int ata_request_legacy_irqs(struct ata_host *host,
-				   irq_handler_t const *handler,
-				   const unsigned int *irq_flags,
-				   void * const *dev_id)
-{
-	struct device *gdev = host->dev;
-	struct ata_legacy_devres *legacy_dr;
-	int i, rc;
-
-	legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
-	BUG_ON(!legacy_dr);
-
-	for (i = 0; i < 2; i++) {
-		unsigned int irq;
-
-		/* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
-		if (i == 0)
-			irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
-		else
-			irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));
-
-		if (!(legacy_dr->mask & (1 << i)))
-			continue;
-
-		if (!handler[i]) {
-			dev_printk(KERN_ERR, gdev,
-				   "NULL handler specified for port %d\n", i);
-			rc = -EINVAL;
-			goto err_out;
-		}
-
-		rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
-				 dev_id[i]);
-		if (rc) {
-			dev_printk(KERN_ERR, gdev,
-				"irq %u request failed (errno=%d)\n", irq, rc);
-			goto err_out;
-		}
-
-		/* record irq allocation in legacy_dr */
-		legacy_dr->irq[i] = irq;
-		legacy_dr->irq_dev_id[i] = dev_id[i];
-
-		/* only used to print info */
-		if (i == 0)
-			host->irq = irq;
-		else
-			host->irq2 = irq;
-	}
-
-	return 0;
-
- err_out:
-	ata_legacy_free_irqs(legacy_dr);
-	return rc;
-}
-
 /**
  *	ata_pci_init_one - Initialize/register PCI IDE host controller
  *	@pdev: Controller to be initialized
@@ -1045,35 +811,11 @@ int ata_pci_init_one(struct pci_dev *pdev,
 #endif
 	}
 
-	/* alloc and init host */
-	host = ata_host_alloc_pinfo(dev, ppi, 2);
-	if (!host) {
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "failed to allocate ATA host\n");
-		rc = -ENOMEM;
+	/* prepare host */
+	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+	if (rc)
 		goto err_out;
-	}
-
-	if (!legacy_mode) {
-		rc = ata_pci_init_native_host(host);
-		if (rc)
-			goto err_out;
-	} else {
-		int was_busy = 0;
-
-		rc = ata_init_legacy_host(host, &was_busy);
-		if (was_busy)
-			pcim_pin_device(pdev);
-		if (rc)
-			goto err_out;
-
-		/* request respective PCI regions, may fail */
-		rc = pci_request_region(pdev, 1, DRV_NAME);
-		rc = pci_request_region(pdev, 3, DRV_NAME);
-	}
 
-	/* init BMDMA, may fail */
-	ata_pci_init_bmdma(host);
 	pci_set_master(pdev);
 
 	/* start host and request IRQ */
@@ -1084,17 +826,34 @@ int ata_pci_init_one(struct pci_dev *pdev,
 	if (!legacy_mode) {
 		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
 				      IRQF_SHARED, DRV_NAME, host);
-		host->irq = pdev->irq;
+		if (rc)
+			goto err_out;
+
+		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
+		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
 	} else {
-		irq_handler_t handler[2] = { host->ops->irq_handler,
-					     host->ops->irq_handler };
-		unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
-		void *dev_id[2] = { host, host };
+		if (!ata_port_is_dummy(host->ports[0])) {
+			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
+					      pi->port_ops->irq_handler,
+					      IRQF_SHARED, DRV_NAME, host);
+			if (rc)
+				goto err_out;
+
+			ata_port_desc(host->ports[0], "irq %d",
+				      ATA_PRIMARY_IRQ(pdev));
+		}
+
+		if (!ata_port_is_dummy(host->ports[1])) {
+			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
+					      pi->port_ops->irq_handler,
+					      IRQF_SHARED, DRV_NAME, host);
+			if (rc)
+				goto err_out;
 
-		rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
+			ata_port_desc(host->ports[1], "irq %d",
+				      ATA_SECONDARY_IRQ(pdev));
+		}
 	}
-	if (rc)
-		goto err_out;
 
 	/* register */
 	rc = ata_host_register(host, pi->sht);
@@ -1114,7 +873,7 @@ err_out:
  *	@pdev: PCI device
  *
  *	Some PCI ATA devices report simplex mode but in fact can be told to
- *	enter non simplex mode. This implements the neccessary logic to
+ *	enter non simplex mode. This implements the necessary logic to
  *	perform the task on such devices. Calling it on other devices will
  *	have -undefined- behaviour.
  */
@@ -1140,7 +899,7 @@ unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer
 	/* Filter out DMA modes if the device has been configured by
 	   the BIOS as PIO only */
 
-	if (adev->ap->ioaddr.bmdma_addr == 0)
+	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
 	return xfer_mask;
 }
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5bd1de1..e6ef0d1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -29,6 +29,7 @@
 #define __LIBATA_H__
 
 #define DRV_NAME	"libata"
+#define DRV_VERSION	"3.00"	/* must be exactly four chars */
 
 struct ata_scsi_args {
 	struct ata_device	*dev;
@@ -53,9 +54,11 @@ enum {
 };
 
 extern unsigned int ata_print_id;
+extern struct workqueue_struct *ata_wq;
 extern struct workqueue_struct *ata_aux_wq;
 extern int atapi_enabled;
 extern int atapi_dmadir;
+extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
@@ -67,21 +70,23 @@ extern void ata_dev_disable(struct ata_device *dev);
 extern void ata_port_flush_task(struct ata_port *ap);
 extern unsigned ata_exec_internal(struct ata_device *dev,
 				  struct ata_taskfile *tf, const u8 *cdb,
-				  int dma_dir, void *buf, unsigned int buflen);
+				  int dma_dir, void *buf, unsigned int buflen,
+				  unsigned long timeout);
 extern unsigned ata_exec_internal_sg(struct ata_device *dev,
 				     struct ata_taskfile *tf, const u8 *cdb,
 				     int dma_dir, struct scatterlist *sg,
-				     unsigned int n_elem);
+				     unsigned int n_elem, unsigned long timeout);
 extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
 extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
 			   unsigned int flags, u16 *id);
 extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
-extern int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags);
+extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+			      unsigned int readid_flags);
 extern int ata_dev_configure(struct ata_device *dev);
-extern int sata_down_spd_limit(struct ata_port *ap);
-extern int sata_set_spd_needed(struct ata_port *ap);
+extern int sata_down_spd_limit(struct ata_link *link);
+extern int sata_set_spd_needed(struct ata_link *link);
 extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
-extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
 extern void ata_sg_clean(struct ata_queued_cmd *qc);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern void ata_qc_issue(struct ata_queued_cmd *qc);
@@ -92,30 +97,39 @@ extern void ata_dev_select(struct ata_port *ap, unsigned int device,
 extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
 extern int ata_flush_cache(struct ata_device *dev);
 extern void ata_dev_init(struct ata_device *dev);
+extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
+extern int sata_link_init_spd(struct ata_link *link);
 extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
 extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
 extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy);
+extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
 
 /* libata-acpi.c */
 #ifdef CONFIG_ATA_ACPI
-extern int ata_acpi_exec_tfs(struct ata_port *ap);
-extern int ata_acpi_push_id(struct ata_device *dev);
+extern void ata_acpi_associate_sata_port(struct ata_port *ap);
+extern void ata_acpi_associate(struct ata_host *host);
+extern void ata_acpi_dissociate(struct ata_host *host);
+extern int ata_acpi_on_suspend(struct ata_port *ap);
+extern void ata_acpi_on_resume(struct ata_port *ap);
+extern int ata_acpi_on_devcfg(struct ata_device *dev);
+extern void ata_acpi_on_disable(struct ata_device *dev);
 #else
-static inline int ata_acpi_exec_tfs(struct ata_port *ap)
-{
-	return 0;
-}
-static inline int ata_acpi_push_id(struct ata_device *dev)
-{
-	return 0;
-}
+static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
+static inline void ata_acpi_associate(struct ata_host *host) { }
+static inline void ata_acpi_dissociate(struct ata_host *host) { }
+static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
+static inline void ata_acpi_on_resume(struct ata_port *ap) { }
+static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
+static inline void ata_acpi_on_disable(struct ata_device *dev) { }
 #endif
 
 /* libata-scsi.c */
 extern int ata_scsi_add_hosts(struct ata_host *host,
 			      struct scsi_host_template *sht);
-extern void ata_scsi_scan_host(struct ata_port *ap);
+extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(void *);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
 			       unsigned int buflen);
@@ -149,11 +163,32 @@ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
 extern void ata_scsi_dev_rescan(void *);
 extern int ata_bus_probe(struct ata_port *ap);
 
+/* libata-pmp.c */
+extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_pmp_attach(struct ata_device *dev);
+
 /* libata-eh.c */
 extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
 extern void ata_port_wait_eh(struct ata_port *ap);
+extern void ata_eh_fastdrain_timerfn(unsigned long arg);
 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
+extern void ata_eh_detach_dev(struct ata_device *dev);
+extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+			       unsigned int action);
+extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+			unsigned int action);
+extern void ata_eh_autopsy(struct ata_port *ap);
+extern void ata_eh_report(struct ata_port *ap);
+extern int ata_eh_reset(struct ata_link *link, int classify,
+			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
+extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+			  ata_postreset_fn_t postreset,
+			  struct ata_link **r_failed_disk);
+extern void ata_eh_finish(struct ata_port *ap);
 
 /* libata-sff.c */
 extern u8 ata_irq_on(struct ata_port *ap);
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
new file mode 100644
index 0000000..e4542ab
--- /dev/null
+++ b/drivers/ata/pata_acpi.c
@@ -0,0 +1,397 @@
+/*
+ *	ACPI PATA driver
+ *
+ *	(c) 2007 Red Hat  <alan@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acnames.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+#include <acpi/acexcep.h>
+#include <acpi/acmacros.h>
+#include <acpi/actypes.h>
+
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_acpi"
+#define DRV_VERSION	"0.2.3"
+
+struct pata_acpi {
+	struct ata_acpi_gtm gtm;
+	void *last;
+	unsigned long mask[2];
+};
+
+/**
+ *	pacpi_pre_reset	-	check for 40/80 pin
+ *	@ap: Port
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform the PATA port setup we need.
+ */
+
+static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct pata_acpi *acpi = ap->private_data;
+	if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
+		return -ENODEV;
+
+	return ata_std_prereset(link, deadline);
+}
+
+/**
+ *	pacpi_cable_detect	-	cable type detection
+ *	@ap: port to detect
+ *
+ *	Perform device specific cable detection
+ */
+
+static int pacpi_cable_detect(struct ata_port *ap)
+{
+	struct pata_acpi *acpi = ap->private_data;
+
+	if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA))
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	pacpi_error_handler - Setup and error handler
+ *	@ap: Port to handle
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void pacpi_error_handler(struct ata_port *ap)
+{
+	return ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset,
+				  NULL, ata_std_postreset);
+}
+
+/* Welcome to ACPI, bring a bucket */
+static const unsigned int pio_cycle[7] = {
+	600, 383, 240, 180, 120, 100, 80
+};
+static const unsigned int mwdma_cycle[5] = {
+	480, 150, 120, 100, 80
+};
+static const unsigned int udma_cycle[7] = {
+	120, 80, 60, 45, 30, 20, 15
+};
+
+/**
+ *	pacpi_discover_modes	-	filter non ACPI modes
+ *	@adev: ATA device
+ *	@mask: proposed modes
+ *
+ *	Try the modes available and see which ones the ACPI method will
+ *	set up sensibly. From this we get a mask of ACPI modes we can use
+ */
+
+static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
+{
+	int unit = adev->devno;
+	struct pata_acpi *acpi = ap->private_data;
+	int i;
+	u32 t;
+	unsigned long mask = (0x7f << ATA_SHIFT_UDMA) | (0x7 << ATA_SHIFT_MWDMA) | (0x1F << ATA_SHIFT_PIO);
+
+	struct ata_acpi_gtm probe;
+
+	probe = acpi->gtm;
+
+	/* We always use the 0 slot for crap hardware */
+	if (!(probe.flags & 0x10))
+		unit = 0;
+
+	ata_acpi_gtm(ap, &probe);
+
+	/* Start by scanning for PIO modes */
+	for (i = 0; i < 7; i++) {
+		t = probe.drive[unit].pio;
+		if (t <= pio_cycle[i]) {
+			mask |= (2 << (ATA_SHIFT_PIO + i)) - 1;
+			break;
+		}
+	}
+
+	/* See if we have MWDMA or UDMA data. We don't bother with MWDMA
+	   if UDMA is availabe as this means the BIOS set UDMA and our
+	   error changedown if it works is UDMA to PIO anyway */
+	if (probe.flags & (1 << (2 * unit))) {
+		/* MWDMA */
+		for (i = 0; i < 5; i++) {
+			t = probe.drive[unit].dma;
+			if (t <= mwdma_cycle[i]) {
+				mask |= (2 << (ATA_SHIFT_MWDMA + i)) - 1;
+				break;
+			}
+		}
+	} else {
+		/* UDMA */
+		for (i = 0; i < 7; i++) {
+			t = probe.drive[unit].dma;
+			if (t <= udma_cycle[i]) {
+				mask |= (2 << (ATA_SHIFT_UDMA + i)) - 1;
+				break;
+			}
+		}
+	}
+	if (mask & (0xF8 << ATA_SHIFT_UDMA))
+		ap->cbl = ATA_CBL_PATA80;
+	return mask;
+}
+
+/**
+ *	pacpi_mode_filter	-	mode filter for ACPI
+ *	@adev: device
+ *	@mask: mask of valid modes
+ *
+ *	Filter the valid mode list according to our own specific rules, in
+ *	this case the list of discovered valid modes obtained by ACPI probing
+ */
+
+static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	struct pata_acpi *acpi = adev->link->ap->private_data;
+	return ata_pci_default_filter(adev, mask & acpi->mask[adev->devno]);
+}
+
+/**
+ *	pacpi_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ */
+
+static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int unit = adev->devno;
+	struct pata_acpi *acpi = ap->private_data;
+
+	if (!(acpi->gtm.flags & 0x10))
+		unit = 0;
+
+	/* Now stuff the nS values into the structure */
+	acpi->gtm.drive[unit].pio = pio_cycle[adev->pio_mode - XFER_PIO_0];
+	ata_acpi_stm(ap, &acpi->gtm);
+	/* See what mode we actually got */
+	ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ *	pacpi_set_dmamode	-	set initial DMA mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ */
+
+static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	int unit = adev->devno;
+	struct pata_acpi *acpi = ap->private_data;
+
+	if (!(acpi->gtm.flags & 0x10))
+		unit = 0;
+
+	/* Now stuff the nS values into the structure */
+	if (adev->dma_mode >= XFER_UDMA_0) {
+		acpi->gtm.drive[unit].dma = udma_cycle[adev->dma_mode - XFER_UDMA_0];
+		acpi->gtm.flags |= (1 << (2 * unit));
+	} else {
+		acpi->gtm.drive[unit].dma = mwdma_cycle[adev->dma_mode - XFER_MW_DMA_0];
+		acpi->gtm.flags &= ~(1 << (2 * unit));
+	}
+	ata_acpi_stm(ap, &acpi->gtm);
+	/* See what mode we actually got */
+	ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ *	pacpi_qc_issue_prot	-	command issue
+ *	@qc: command pending
+ *
+ *	Called when the libata layer is about to issue a command. We wrap
+ *	this interface so that we can load the correct ATA timings if
+ *	neccessary.
+ */
+
+static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *adev = qc->dev;
+	struct pata_acpi *acpi = ap->private_data;
+
+	if (acpi->gtm.flags & 0x10)
+		return ata_qc_issue_prot(qc);
+
+	if (adev != acpi->last) {
+		pacpi_set_piomode(ap, adev);
+		if (adev->dma_mode)
+			pacpi_set_dmamode(ap, adev);
+		acpi->last = adev;
+	}
+	return ata_qc_issue_prot(qc);
+}
+
+/**
+ *	pacpi_port_start	-	port setup
+ *	@ap: ATA port being set up
+ *
+ *	Use the port_start hook to maintain private control structures
+ */
+
+static int pacpi_port_start(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct pata_acpi *acpi;
+
+	int ret;
+
+	if (ap->acpi_handle == NULL)
+		return -ENODEV;
+
+	acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
+	if (ap->private_data == NULL)
+		return -ENOMEM;
+	acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
+	acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
+	ret = ata_sff_port_start(ap);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+static struct scsi_host_template pacpi_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	/* Use standard CHS mapping rules */
+	.bios_param		= ata_std_bios_param,
+};
+
+static const struct ata_port_operations pacpi_ops = {
+	.set_piomode		= pacpi_set_piomode,
+	.set_dmamode		= pacpi_set_dmamode,
+	.mode_filter		= pacpi_mode_filter,
+
+	/* Task file is PCI ATA format, use helpers */
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= pacpi_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= pacpi_cable_detect,
+
+	/* BMDMA handling is PCI ATA format, use helpers */
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= pacpi_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	/* Timeout handling */
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	/* Generic PATA PCI ATA helpers */
+	.port_start		= pacpi_port_start,
+};
+
+
+/**
+ *	pacpi_init_one - Register ACPI ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in pacpi_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht		= &pacpi_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+		.pio_mask	= 0x1f,
+		.mwdma_mask	= 0x07,
+		.udma_mask 	= 0x7f,
+
+		.port_ops	= &pacpi_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id pacpi_pci_tbl[] = {
+	{ PCI_ANY_ID,		PCI_ANY_ID,			   PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+	{ }	/* terminate list */
+};
+
+static struct pci_driver pacpi_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= pacpi_pci_tbl,
+	.probe			= pacpi_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init pacpi_init(void)
+{
+	return pci_register_driver(&pacpi_pci_driver);
+}
+
+static void __exit pacpi_exit(void)
+{
+	pci_unregister_driver(&pacpi_pci_driver);
+}
+
+module_init(pacpi_init);
+module_exit(pacpi_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 3c55a5f..756f5b0 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.7.4"
+#define DRV_VERSION "0.7.5"
 
 /*
  *	Cable special cases
@@ -45,7 +45,14 @@ static struct dmi_system_id cable_dmi_table[] = {
 		.ident = "HP Pavilion N5430",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-			DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
+			DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
+		},
+	},
+	{
+		.ident = "Toshiba Satelite S1800-814",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
 		},
 	},
 	{ }
@@ -56,6 +63,9 @@ static int ali_cable_override(struct pci_dev *pdev)
 	/* Fujitsu P2000 */
 	if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
 	   	return 1;
+	/* Mitac 8317 (Winbook-A) and relatives */
+	if (pdev->subsystem_vendor == 0x1071  && pdev->subsystem_device == 0x8317)
+		return 1;
 	/* Systems by DMI */
 	if (dmi_check_system(cable_dmi_table))
 		return 1;
@@ -275,6 +285,21 @@ static void ali_lock_sectors(struct ata_device *adev)
 	adev->max_sectors = 255;
 }
 
+/**
+ *	ali_check_atapi_dma	-	DMA check for most ALi controllers
+ *	@adev: Device
+ *
+ *	Called to decide whether commands should be sent by DMA or PIO
+ */
+
+static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	/* If its not a media command, its not worth it */
+	if (qc->nbytes < 2048)
+		return -EOPNOTSUPP;
+	return 0;
+}
+
 static struct scsi_host_template ali_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
@@ -298,7 +323,6 @@ static struct scsi_host_template ali_sht = {
  */
 
 static struct ata_port_operations ali_early_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= ali_set_piomode,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
@@ -320,9 +344,8 @@ static struct ata_port_operations ali_early_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -330,8 +353,6 @@ static struct ata_port_operations ali_early_port_ops = {
  *	detect
  */
 static struct ata_port_operations ali_20_port_ops = {
-	.port_disable	= ata_port_disable,
-
 	.set_piomode	= ali_set_piomode,
 	.set_dmamode	= ali_set_dmamode,
 	.mode_filter	= ali_20_filter,
@@ -362,21 +383,20 @@ static struct ata_port_operations ali_20_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
  *	Port operations for DMA capable ALi with cable detect
  */
 static struct ata_port_operations ali_c2_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= ali_set_piomode,
 	.set_dmamode	= ali_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
+	.check_atapi_dma = ali_check_atapi_dma,
 	.check_status 	= ata_check_status,
 	.exec_command	= ata_exec_command,
 	.dev_select 	= ata_std_dev_select,
@@ -401,21 +421,20 @@ static struct ata_port_operations ali_c2_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
  *	Port operations for DMA capable ALi with cable detect and LBA48
  */
 static struct ata_port_operations ali_c5_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= ali_set_piomode,
 	.set_dmamode	= ali_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
+	.check_atapi_dma = ali_check_atapi_dma,
 	.check_status 	= ata_check_status,
 	.exec_command	= ata_exec_command,
 	.dev_select 	= ata_std_dev_select,
@@ -439,9 +458,8 @@ static struct ata_port_operations ali_c5_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 
@@ -455,23 +473,24 @@ static struct ata_port_operations ali_c5_port_ops = {
 
 static void ali_init_chipset(struct pci_dev *pdev)
 {
-	u8 rev, tmp;
+	u8 tmp;
 	struct pci_dev *north, *isa_bridge;
-
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	u8 pdev_revision;
 
 	/*
 	 * The chipset revision selects the driver operations and
 	 * mode data.
 	 */
 
-	if (rev >= 0x20 && rev < 0xC2) {
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
+
+	if (pdev_revision >= 0x20 && pdev_revision < 0xC2) {
 		/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
 		pci_read_config_byte(pdev, 0x4B, &tmp);
 		/* Clear CD-ROM DMA write bit */
 		tmp &= 0x7F;
 		pci_write_config_byte(pdev, 0x4B, tmp);
-	} else if (rev >= 0xC2) {
+	} else if (pdev_revision >= 0xC2) {
 		/* Enable cable detection logic */
 		pci_read_config_byte(pdev, 0x4B, &tmp);
 		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
@@ -483,21 +502,21 @@ static void ali_init_chipset(struct pci_dev *pdev)
 		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
 		   Set the south bridge enable bit */
 		pci_read_config_byte(isa_bridge, 0x79, &tmp);
-		if (rev == 0xC2)
+		if (pdev_revision == 0xC2)
 			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
-		else if (rev > 0xC2 && rev < 0xC5)
+		else if (pdev_revision > 0xC2 && pdev_revision < 0xC5)
 			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
 	}
-	if (rev >= 0x20) {
+	if (pdev_revision >= 0x20) {
 		/*
 		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
 		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
 		 * via 0x54/55.
 		 */
 		pci_read_config_byte(pdev, 0x53, &tmp);
-		if (rev <= 0x20)
+		if (pdev_revision <= 0x20)
 			tmp &= ~0x02;
-		if (rev >= 0xc7)
+		if (pdev_revision >= 0xc7)
 			tmp |= 0x03;
 		else
 			tmp |= 0x01;	/* CD_ROM enable for DMA */
@@ -518,16 +537,17 @@ static void ali_init_chipset(struct pci_dev *pdev)
 
 static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	u8 pdev_revision;
 	static const struct ata_port_info info_early = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &ali_early_port_ops
 	};
 	/* Revision 0x20 added DMA */
 	static const struct ata_port_info info_20 = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &ali_20_port_ops
@@ -535,7 +555,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* Revision 0x20 with support logic added UDMA */
 	static const struct ata_port_info info_20_udma = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x07,	/* UDMA33 */
@@ -544,60 +564,58 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* Revision 0xC2 adds UDMA66 */
 	static const struct ata_port_info info_c2 = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x1f,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &ali_c2_port_ops
 	};
-	/* Revision 0xC3 is UDMA100 */
+	/* Revision 0xC3 is UDMA66 for now */
 	static const struct ata_port_info info_c3 = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &ali_c2_port_ops
 	};
-	/* Revision 0xC4 is UDMA133 */
+	/* Revision 0xC4 is UDMA100 */
 	static const struct ata_port_info info_c4 = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
+		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &ali_c2_port_ops
 	};
 	/* Revision 0xC5 is UDMA133 with LBA48 DMA */
 	static const struct ata_port_info info_c5 = {
 		.sht = &ali_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA6,
 		.port_ops = &ali_c5_port_ops
 	};
 
 	const struct ata_port_info *ppi[] = { NULL, NULL };
-	u8 rev, tmp;
+	u8 tmp;
 	struct pci_dev *isa_bridge;
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-
 	/*
 	 * The chipset revision selects the driver operations and
 	 * mode data.
 	 */
-
-	if (rev < 0x20) {
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
+	if (pdev_revision < 0x20) {
 		ppi[0] = &info_early;
-	} else if (rev < 0xC2) {
+	} else if (pdev_revision < 0xC2) {
         	ppi[0] = &info_20;
-	} else if (rev == 0xC2) {
+	} else if (pdev_revision == 0xC2) {
         	ppi[0] = &info_c2;
-	} else if (rev == 0xC3) {
+	} else if (pdev_revision == 0xC3) {
         	ppi[0] = &info_c3;
-	} else if (rev == 0xC4) {
+	} else if (pdev_revision == 0xC4) {
         	ppi[0] = &info_c4;
 	} else
         	ppi[0] = &info_c5;
@@ -605,7 +623,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	ali_init_chipset(pdev);
 
 	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-	if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
+	if (isa_bridge && pdev_revision >= 0x20 && pdev_revision < 0xC2) {
 		/* Are we paired with a UDMA capable chip */
 		pci_read_config_byte(isa_bridge, 0x5E, &tmp);
 		if ((tmp & 0x1E) == 0x12)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index b439351..f1cd18e 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.10"
 
 /**
  *	timing_setup		-	shared timing computation and load
@@ -115,31 +115,33 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
 	}
 
 	/* UDMA timing */
-	pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
+	if (at.udma)
+		pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
 }
 
 /**
- *	amd_probe_init		-	perform reset handling
- *	@ap: ATA port
+ *	amd_pre_reset		-	perform reset handling
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Reset sequence checking enable bits to see which ports are
  *	active.
  */
 
-static int amd_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits amd_enable_bits[] = {
 		{ 0x40, 1, 0x02, 0x02 },
 		{ 0x40, 1, 0x01, 0x01 }
 	};
 
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 static void amd_error_handler(struct ata_port *ap)
@@ -221,25 +223,26 @@ static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 
 /**
  *	nv_probe_init	-	cable detection
- *	@ap: ATA port
+ *	@lin: ATA link
  *
  *	Perform cable detection. The BIOS stores this in PCI config
  *	space for us.
  */
 
-static int nv_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits nv_enable_bits[] = {
 		{ 0x50, 1, 0x02, 0x02 },
 		{ 0x50, 1, 0x01, 0x01 }
 	};
 
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 static void nv_error_handler(struct ata_port *ap)
@@ -268,6 +271,9 @@ static int nv_cable_detect(struct ata_port *ap)
  	pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
  	if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
 		cbl = ATA_CBL_PATA80;
+	/* And a triple check across suspend/resume with ACPI around */
+	if (ata_acpi_cbl_80wire(ap))
+		cbl = ATA_CBL_PATA80;
 	return cbl;
 }
 
@@ -327,7 +333,6 @@ static struct scsi_host_template amd_sht = {
 };
 
 static struct ata_port_operations amd33_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= amd33_set_piomode,
 	.set_dmamode	= amd33_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -356,13 +361,11 @@ static struct ata_port_operations amd33_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd66_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= amd66_set_piomode,
 	.set_dmamode	= amd66_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -391,13 +394,11 @@ static struct ata_port_operations amd66_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd100_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= amd100_set_piomode,
 	.set_dmamode	= amd100_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -426,13 +427,11 @@ static struct ata_port_operations amd100_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations amd133_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= amd133_set_piomode,
 	.set_dmamode	= amd133_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -461,13 +460,11 @@ static struct ata_port_operations amd133_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations nv100_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= nv100_set_piomode,
 	.set_dmamode	= nv100_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -496,13 +493,11 @@ static struct ata_port_operations nv100_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations nv133_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= nv133_set_piomode,
 	.set_dmamode	= nv133_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -531,17 +526,17 @@ static struct ata_port_operations nv133_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	u8 pdev_revision;
 	static const struct ata_port_info info[10] = {
 		{	/* 0: AMD 7401 */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,	/* No SWDMA */
 			.udma_mask = 0x07,	/* UDMA 33 */
@@ -549,91 +544,90 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		},
 		{	/* 1: Early AMD7409 - no swdma */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x1f,	/* UDMA 66 */
+			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
 			.port_ops = &amd66_port_ops
 		},
 		{	/* 2: AMD 7409, no swdma errata */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x1f,	/* UDMA 66 */
+			.udma_mask = ATA_UDMA4,	/* UDMA 66 */
 			.port_ops = &amd66_port_ops
 		},
 		{	/* 3: AMD 7411 */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,	/* UDMA 100 */
+			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
 			.port_ops = &amd100_port_ops
 		},
 		{	/* 4: AMD 7441 */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,	/* UDMA 100 */
+			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
 			.port_ops = &amd100_port_ops
 		},
 		{	/* 5: AMD 8111*/
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x7f,	/* UDMA 133, no swdma */
+			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
 			.port_ops = &amd133_port_ops
 		},
 		{	/* 6: AMD 8111 UDMA 100 (Serenade) */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,	/* UDMA 100, no swdma */
+			.udma_mask = ATA_UDMA5,	/* UDMA 100, no swdma */
 			.port_ops = &amd133_port_ops
 		},
 		{	/* 7: Nvidia Nforce */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,	/* UDMA 100 */
+			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
 			.port_ops = &nv100_port_ops
 		},
 		{	/* 8: Nvidia Nforce2 and later */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x7f,	/* UDMA 133, no swdma */
+			.udma_mask = ATA_UDMA6,	/* UDMA 133, no swdma */
 			.port_ops = &nv133_port_ops
 		},
 		{	/* 9: AMD CS5536 (Geode companion) */
 			.sht = &amd_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,	/* UDMA 100 */
+			.udma_mask = ATA_UDMA5,	/* UDMA 100 */
 			.port_ops = &amd100_port_ops
 		}
 	};
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 	static int printed_version;
 	int type = id->driver_data;
-	u8 rev;
 	u8 fifo;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
 	pci_read_config_byte(pdev, 0x41, &fifo);
 
 	/* Check for AMD7409 without swdma errata and if found adjust type */
-	if (type == 1 && rev > 0x7)
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
+	if (type == 1 && pdev_revision > 0x7)
 		type = 2;
 
 	/* Check for AMD7411 */
@@ -693,6 +687,8 @@ static const struct pci_device_id amd[] = {
 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE),	8 },
 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE),	8 },
 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
+	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
 
 	{ },
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 03b6ddd..d421831 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -2,6 +2,7 @@
  *    pata_artop.c - ARTOP ATA controller driver
  *
  *	(C) 2006 Red Hat <alan@redhat.com>
+ *	(C) 2007 Bartlomiej Zolnierkiewicz
  *
  *    Based in part on drivers/ide/pci/aec62xx.c
  *	Copyright (C) 1999-2002	Andre Hedrick <andre@linux-ide.org>
@@ -28,7 +29,7 @@
 #include <linux/ata.h>
 
 #define DRV_NAME	"pata_artop"
-#define DRV_VERSION	"0.4.3"
+#define DRV_VERSION	"0.4.4"
 
 /*
  *	The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
@@ -39,8 +40,9 @@
 
 static int clock = 0;
 
-static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	const struct pci_bits artop_enable_bits[] = {
 		{ 0x4AU, 1U, 0x02UL, 0x02UL },	/* port 0 */
@@ -50,7 +52,7 @@ static int artop6210_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -70,27 +72,28 @@ static void artop6210_error_handler(struct ata_port *ap)
 
 /**
  *	artop6260_pre_reset	-	check for 40/80 pin
- *	@ap: Port
+ *	@link: link
  *	@deadline: deadline jiffies for the operation
  *
  *	The ARTOP hardware reports the cable detect bits in register 0x49.
  *	Nothing complicated needed here.
  */
 
-static int artop6260_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits artop_enable_bits[] = {
 		{ 0x4AU, 1U, 0x02UL, 0x02UL },	/* port 0 */
 		{ 0x4AU, 1U, 0x04UL, 0x04UL },	/* port 1 */
 	};
 
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	/* Odd numbered device ids are the units with enable bits (the -R cards) */
 	if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -329,7 +332,6 @@ static struct scsi_host_template artop_sht = {
 };
 
 static const struct ata_port_operations artop6210_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= artop6210_set_piomode,
 	.set_dmamode		= artop6210_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -358,13 +360,11 @@ static const struct ata_port_operations artop6210_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations artop6260_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= artop6260_set_piomode,
 	.set_dmamode		= artop6260_set_dmamode,
 
@@ -391,9 +391,8 @@ static const struct ata_port_operations artop6260_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -416,7 +415,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 	static int printed_version;
 	static const struct ata_port_info info_6210 = {
 		.sht		= &artop_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask 	= ATA_UDMA2,
@@ -424,20 +423,28 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 	};
 	static const struct ata_port_info info_626x = {
 		.sht		= &artop_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask 	= ATA_UDMA4,
 		.port_ops	= &artop6260_ops,
 	};
-	static const struct ata_port_info info_626x_fast = {
+	static const struct ata_port_info info_628x = {
 		.sht		= &artop_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
 		.udma_mask 	= ATA_UDMA5,
 		.port_ops	= &artop6260_ops,
 	};
+	static const struct ata_port_info info_628x_fast = {
+		.sht		= &artop_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.udma_mask 	= ATA_UDMA6,
+		.port_ops	= &artop6260_ops,
+	};
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 
 	if (!printed_version++)
@@ -455,13 +462,13 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 	else if (id->driver_data == 1)	/* 6260 */
 		ppi[0] = &info_626x;
-	else if (id->driver_data == 2)	{ /* 6260 or 6260 + fast */
+	else if (id->driver_data == 2)	{ /* 6280 or 6280 + fast */
 		unsigned long io = pci_resource_start(pdev, 4);
 		u8 reg;
 
-		ppi[0] = &info_626x;
+		ppi[0] = &info_628x;
 		if (inb(io) & 0x10)
-			ppi[0] = &info_626x_fast;
+			ppi[0] = &info_628x_fast;
 		/* Mac systems come up with some registers not set as we
 		   will need them */
 
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
new file mode 100644
index 0000000..67e574d
--- /dev/null
+++ b/drivers/ata/pata_at32.c
@@ -0,0 +1,446 @@
+/*
+ * AVR32 SMC/CFC PATA Driver
+ *
+ * Copyright (C) 2007 Atmel Norway
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/smc.h>
+
+#define DRV_NAME "pata_at32"
+#define DRV_VERSION "0.0.3"
+
+/*
+ * CompactFlash controller memory layout relative to the base address:
+ *
+ *	Attribute memory:  0000 0000 -> 003f ffff
+ *	Common memory:	   0040 0000 -> 007f ffff
+ *	I/O memory:	   0080 0000 -> 00bf ffff
+ *	True IDE Mode:	   00c0 0000 -> 00df ffff
+ *	Alt IDE Mode:	   00e0 0000 -> 00ff ffff
+ *
+ * Only True IDE and Alt True IDE mode are needed for this driver.
+ *
+ *	True IDE mode	  => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
+ *	Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
+ */
+#define CF_IDE_OFFSET	  0x00c00000
+#define CF_ALT_IDE_OFFSET 0x00e00000
+#define CF_RES_SIZE	  2048
+
+/*
+ * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
+ * adaptor with a logic analyzer or similar.
+ */
+#undef DEBUG_BUS
+
+/*
+ * ATA PIO modes
+ *
+ *	Name	| Mb/s	| Min cycle time | Mask
+ *	--------+-------+----------------+--------
+ *	Mode 0	| 3.3	| 600 ns	 | 0x01
+ *	Mode 1	| 5.2	| 383 ns	 | 0x03
+ *	Mode 2	| 8.3	| 240 ns	 | 0x07
+ *	Mode 3	| 11.1	| 180 ns	 | 0x0f
+ *	Mode 4	| 16.7	| 120 ns	 | 0x1f
+ *
+ * Alter PIO_MASK below according to table to set maximal PIO mode.
+ */
+#define PIO_MASK (0x1f)
+
+/*
+ * Struct containing private information about device.
+ */
+struct at32_ide_info {
+	unsigned int		irq;
+	struct resource		res_ide;
+	struct resource		res_alt;
+	void __iomem		*ide_addr;
+	void __iomem		*alt_addr;
+	unsigned int		cs;
+	struct smc_config	smc;
+};
+
+/*
+ * Setup SMC for the given ATA timing.
+ */
+static int pata_at32_setup_timing(struct device *dev,
+				  struct at32_ide_info *info,
+				  const struct ata_timing *ata)
+{
+	struct smc_config *smc = &info->smc;
+	struct smc_timing timing;
+
+	int active;
+	int recover;
+
+	memset(&timing, 0, sizeof(struct smc_timing));
+
+	/* Total cycle time */
+	timing.read_cycle  = ata->cyc8b;
+
+	/* DIOR <= CFIOR timings */
+	timing.nrd_setup   = ata->setup;
+	timing.nrd_pulse   = ata->act8b;
+	timing.nrd_recover = ata->rec8b;
+
+	/* Convert nanosecond timing to clock cycles */
+	smc_set_timing(smc, &timing);
+
+	/* Add one extra cycle setup due to signal ring */
+	smc->nrd_setup = smc->nrd_setup + 1;
+
+	active  = smc->nrd_setup + smc->nrd_pulse;
+	recover = smc->read_cycle - active;
+
+	/* Need at least two cycles recovery */
+	if (recover < 2)
+	  smc->read_cycle = active + 2;
+
+	/* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
+	smc->ncs_read_setup = 1;
+	smc->ncs_read_pulse = smc->read_cycle - 2;
+
+	/* Write timings same as read timings */
+	smc->write_cycle = smc->read_cycle;
+	smc->nwe_setup = smc->nrd_setup;
+	smc->nwe_pulse = smc->nrd_pulse;
+	smc->ncs_write_setup = smc->ncs_read_setup;
+	smc->ncs_write_pulse = smc->ncs_read_pulse;
+
+	/* Do some debugging output of ATA and SMC timings */
+	dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
+		ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
+
+	dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
+		smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
+		smc->ncs_read_setup, smc->ncs_read_pulse);
+
+	/* Finally, configure the SMC */
+	return smc_set_configuration(info->cs, smc);
+}
+
+/*
+ * Procedures for libATA.
+ */
+static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	struct ata_timing timing;
+	struct at32_ide_info *info = ap->host->private_data;
+
+	int ret;
+
+	/* Compute ATA timing */
+	ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
+	if (ret) {
+		dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
+		return;
+	}
+
+	/* Setup SMC to ATA timing */
+	ret = pata_at32_setup_timing(ap->dev, info, &timing);
+	if (ret) {
+		dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
+		return;
+	}
+}
+
+static void pata_at32_irq_clear(struct ata_port *ap)
+{
+	/* No DMA controller yet */
+}
+
+static struct scsi_host_template at32_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations at32_port_ops = {
+	.set_piomode		= pata_at32_set_piomode,
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.data_xfer		= ata_data_xfer,
+
+	.irq_clear		= pata_at32_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	.port_start		= ata_sff_port_start,
+};
+
+static int __init pata_at32_init_one(struct device *dev,
+				     struct at32_ide_info *info)
+{
+	struct ata_host *host;
+	struct ata_port *ap;
+
+	host = ata_host_alloc(dev, 1);
+	if (!host)
+		return -ENOMEM;
+
+	ap = host->ports[0];
+
+	/* Setup ATA bindings */
+	ap->ops	     = &at32_port_ops;
+	ap->pio_mask = PIO_MASK;
+	ap->flags   |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS;
+
+	/*
+	 * Since all 8-bit taskfile transfers has to go on the lower
+	 * byte of the data bus and there is a bug in the SMC that
+	 * makes it impossible to alter the bus width during runtime,
+	 * we need to hardwire the address signals as follows:
+	 *
+	 *	A_IDE(2:0) <= A_EBI(3:1)
+	 *
+	 * This makes all addresses on the EBI even, thus all data
+	 * will be on the lower byte of the data bus.  All addresses
+	 * used by libATA need to be altered according to this.
+	 */
+	ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
+	ap->ioaddr.ctl_addr	  = info->alt_addr + (0x06 << 1);
+
+	ap->ioaddr.data_addr	  = info->ide_addr + (ATA_REG_DATA << 1);
+	ap->ioaddr.error_addr	  = info->ide_addr + (ATA_REG_ERR << 1);
+	ap->ioaddr.feature_addr	  = info->ide_addr + (ATA_REG_FEATURE << 1);
+	ap->ioaddr.nsect_addr	  = info->ide_addr + (ATA_REG_NSECT << 1);
+	ap->ioaddr.lbal_addr	  = info->ide_addr + (ATA_REG_LBAL << 1);
+	ap->ioaddr.lbam_addr	  = info->ide_addr + (ATA_REG_LBAM << 1);
+	ap->ioaddr.lbah_addr	  = info->ide_addr + (ATA_REG_LBAH << 1);
+	ap->ioaddr.device_addr	  = info->ide_addr + (ATA_REG_DEVICE << 1);
+	ap->ioaddr.status_addr	  = info->ide_addr + (ATA_REG_STATUS << 1);
+	ap->ioaddr.command_addr	  = info->ide_addr + (ATA_REG_CMD << 1);
+
+	/* Set info as private data of ATA host */
+	host->private_data = info;
+
+	/* Register ATA device and return */
+	return ata_host_activate(host, info->irq, ata_interrupt,
+				 IRQF_SHARED | IRQF_TRIGGER_RISING,
+				 &at32_sht);
+}
+
+/*
+ * This function may come in handy for people analyzing their own
+ * EBI -> PATA adaptors.
+ */
+#ifdef DEBUG_BUS
+
+static void __init pata_at32_debug_bus(struct device *dev,
+				       struct at32_ide_info *info)
+{
+	const int d1 = 0xff;
+	const int d2 = 0x00;
+
+	int i;
+
+	/* Write 8-bit values (registers) */
+	iowrite8(d1, info->alt_addr + (0x06 << 1));
+	iowrite8(d2, info->alt_addr + (0x06 << 1));
+
+	for (i = 0; i < 8; i++) {
+		iowrite8(d1, info->ide_addr + (i << 1));
+		iowrite8(d2, info->ide_addr + (i << 1));
+	}
+
+	/* Write 16 bit values (data) */
+	iowrite16(d1,	   info->ide_addr);
+	iowrite16(d1 << 8, info->ide_addr);
+
+	iowrite16(d1,	   info->ide_addr);
+	iowrite16(d1 << 8, info->ide_addr);
+}
+
+#endif
+
+static int __init pata_at32_probe(struct platform_device *pdev)
+{
+	const struct ata_timing initial_timing =
+		{XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+
+	struct device		 *dev = &pdev->dev;
+	struct at32_ide_info	 *info;
+	struct ide_platform_data *board = pdev->dev.platform_data;
+	struct resource		 *res;
+
+	int irq;
+	int ret;
+
+	if (!board)
+		return -ENXIO;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENXIO;
+
+	/* Retrive IRQ */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	/* Setup struct containing private infomation */
+	info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	memset(info, 0, sizeof(struct at32_ide_info));
+
+	info->irq = irq;
+	info->cs  = board->cs;
+
+	/* Request memory resources */
+	info->res_ide.start = res->start + CF_IDE_OFFSET;
+	info->res_ide.end   = info->res_ide.start + CF_RES_SIZE - 1;
+	info->res_ide.name  = "ide";
+	info->res_ide.flags = IORESOURCE_MEM;
+
+	ret = request_resource(res, &info->res_ide);
+	if (ret)
+		goto err_req_res_ide;
+
+	info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
+	info->res_alt.end   = info->res_alt.start + CF_RES_SIZE - 1;
+	info->res_alt.name  = "alt";
+	info->res_alt.flags = IORESOURCE_MEM;
+
+	ret = request_resource(res, &info->res_alt);
+	if (ret)
+		goto err_req_res_alt;
+
+	/* Setup non-timing elements of SMC */
+	info->smc.bus_width	 = 2; /* 16 bit data bus */
+	info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
+	info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
+	info->smc.nwait_mode	 = 3; /* NWAIT is in READY mode */
+	info->smc.byte_write	 = 0; /* Byte select access type */
+	info->smc.tdf_mode	 = 0; /* TDF optimization disabled */
+	info->smc.tdf_cycles	 = 0; /* No TDF wait cycles */
+
+	/* Setup SMC to ATA timing */
+	ret = pata_at32_setup_timing(dev, info, &initial_timing);
+	if (ret)
+		goto err_setup_timing;
+
+	/* Map ATA address space */
+	ret = -ENOMEM;
+	info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
+	info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
+	if (!info->ide_addr || !info->alt_addr)
+		goto err_ioremap;
+
+#ifdef DEBUG_BUS
+	pata_at32_debug_bus(dev, info);
+#endif
+
+	/* Setup and register ATA device */
+	ret = pata_at32_init_one(dev, info);
+	if (ret)
+		goto err_ata_device;
+
+	return 0;
+
+ err_ata_device:
+ err_ioremap:
+ err_setup_timing:
+	release_resource(&info->res_alt);
+ err_req_res_alt:
+	release_resource(&info->res_ide);
+ err_req_res_ide:
+	kfree(info);
+
+	return ret;
+}
+
+static int __exit pata_at32_remove(struct platform_device *pdev)
+{
+	struct ata_host *host = platform_get_drvdata(pdev);
+	struct at32_ide_info *info;
+
+	if (!host)
+		return 0;
+
+	info = host->private_data;
+	ata_host_detach(host);
+
+	if (!info)
+		return 0;
+
+	release_resource(&info->res_ide);
+	release_resource(&info->res_alt);
+
+	kfree(info);
+
+	return 0;
+}
+
+static struct platform_driver pata_at32_driver = {
+	.remove	       = __exit_p(pata_at32_remove),
+	.driver	       = {
+		.name  = "at32_ide",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pata_at32_init(void)
+{
+	return platform_driver_probe(&pata_at32_driver, pata_at32_probe);
+}
+
+static void __exit pata_at32_exit(void)
+{
+	platform_driver_unregister(&pata_at32_driver);
+}
+
+module_init(pata_at32_init);
+module_exit(pata_at32_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
+MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 8449146..9623f52 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -22,7 +22,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_atiixp"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.4.6"
 
 enum {
 	ATIIXP_IDE_PIO_TIMING	= 0x40,
@@ -33,8 +33,9 @@ enum {
 	ATIIXP_IDE_UDMA_MODE 	= 0x56
 };
 
-static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	static const struct pci_bits atiixp_enable_bits[] = {
 		{ 0x48, 1, 0x01, 0x00 },
 		{ 0x48, 1, 0x08, 0x00 }
@@ -44,7 +45,7 @@ static int atiixp_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 static void atiixp_error_handler(struct ata_port *ap)
@@ -172,6 +173,9 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  *
  *	When DMA begins we need to ensure that the UDMA control
  *	register for the channel is correctly set.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
  */
 
 static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
@@ -198,6 +202,9 @@ static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
  *
  *	DMA has completed. Clear the UDMA flag as the next operations will
  *	be PIO ones not UDMA data transfer.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
  */
 
 static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
@@ -232,7 +239,6 @@ static struct scsi_host_template atiixp_sht = {
 };
 
 static struct ata_port_operations atiixp_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= atiixp_set_piomode,
 	.set_dmamode	= atiixp_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -261,16 +267,15 @@ static struct ata_port_operations atiixp_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &atiixp_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x06,	/* No MWDMA0 support */
 		.udma_mask = 0x3F,
@@ -285,6 +290,7 @@ static const struct pci_device_id atiixp[] = {
 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
+	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
 
 	{ },
 };
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
new file mode 100644
index 0000000..088a41f
--- /dev/null
+++ b/drivers/ata/pata_bf54x.c
@@ -0,0 +1,1631 @@
+/*
+ * File:         drivers/ata/pata_bf54x.c
+ * Author:       Sonic Zhang <sonic.zhang@analog.com>
+ *
+ * Created:
+ * Description:  PATA Driver for blackfin 54x
+ *
+ * Modified:
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <asm/dma.h>
+#include <asm/gpio.h>
+#include <asm/portmux.h>
+
+#define DRV_NAME		"pata-bf54x"
+#define DRV_VERSION		"0.9"
+
+#define ATA_REG_CTRL		0x0E
+#define ATA_REG_ALTSTATUS	ATA_REG_CTRL
+
+/* These are the offset of the controller's registers */
+#define ATAPI_OFFSET_CONTROL		0x00
+#define ATAPI_OFFSET_STATUS		0x04
+#define ATAPI_OFFSET_DEV_ADDR		0x08
+#define ATAPI_OFFSET_DEV_TXBUF		0x0c
+#define ATAPI_OFFSET_DEV_RXBUF		0x10
+#define ATAPI_OFFSET_INT_MASK		0x14
+#define ATAPI_OFFSET_INT_STATUS		0x18
+#define ATAPI_OFFSET_XFER_LEN		0x1c
+#define ATAPI_OFFSET_LINE_STATUS	0x20
+#define ATAPI_OFFSET_SM_STATE		0x24
+#define ATAPI_OFFSET_TERMINATE		0x28
+#define ATAPI_OFFSET_PIO_TFRCNT		0x2c
+#define ATAPI_OFFSET_DMA_TFRCNT		0x30
+#define ATAPI_OFFSET_UMAIN_TFRCNT	0x34
+#define ATAPI_OFFSET_UDMAOUT_TFRCNT	0x38
+#define ATAPI_OFFSET_REG_TIM_0		0x40
+#define ATAPI_OFFSET_PIO_TIM_0		0x44
+#define ATAPI_OFFSET_PIO_TIM_1		0x48
+#define ATAPI_OFFSET_MULTI_TIM_0	0x50
+#define ATAPI_OFFSET_MULTI_TIM_1	0x54
+#define ATAPI_OFFSET_MULTI_TIM_2	0x58
+#define ATAPI_OFFSET_ULTRA_TIM_0	0x60
+#define ATAPI_OFFSET_ULTRA_TIM_1	0x64
+#define ATAPI_OFFSET_ULTRA_TIM_2	0x68
+#define ATAPI_OFFSET_ULTRA_TIM_3	0x6c
+
+
+#define ATAPI_GET_CONTROL(base)\
+	bfin_read16(base + ATAPI_OFFSET_CONTROL)
+#define ATAPI_SET_CONTROL(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
+#define ATAPI_GET_STATUS(base)\
+	bfin_read16(base + ATAPI_OFFSET_STATUS)
+#define ATAPI_GET_DEV_ADDR(base)\
+	bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
+#define ATAPI_SET_DEV_ADDR(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
+#define ATAPI_GET_DEV_TXBUF(base)\
+	bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
+#define ATAPI_SET_DEV_TXBUF(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
+#define ATAPI_GET_DEV_RXBUF(base)\
+	bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
+#define ATAPI_SET_DEV_RXBUF(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
+#define ATAPI_GET_INT_MASK(base)\
+	bfin_read16(base + ATAPI_OFFSET_INT_MASK)
+#define ATAPI_SET_INT_MASK(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
+#define ATAPI_GET_INT_STATUS(base)\
+	bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
+#define ATAPI_SET_INT_STATUS(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
+#define ATAPI_GET_XFER_LEN(base)\
+	bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
+#define ATAPI_SET_XFER_LEN(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
+#define ATAPI_GET_LINE_STATUS(base)\
+	bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
+#define ATAPI_GET_SM_STATE(base)\
+	bfin_read16(base + ATAPI_OFFSET_SM_STATE)
+#define ATAPI_GET_TERMINATE(base)\
+	bfin_read16(base + ATAPI_OFFSET_TERMINATE)
+#define ATAPI_SET_TERMINATE(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
+#define ATAPI_GET_PIO_TFRCNT(base)\
+	bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
+#define ATAPI_GET_DMA_TFRCNT(base)\
+	bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
+#define ATAPI_GET_UMAIN_TFRCNT(base)\
+	bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
+#define ATAPI_GET_UDMAOUT_TFRCNT(base)\
+	bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
+#define ATAPI_GET_REG_TIM_0(base)\
+	bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
+#define ATAPI_SET_REG_TIM_0(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
+#define ATAPI_GET_PIO_TIM_0(base)\
+	bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
+#define ATAPI_SET_PIO_TIM_0(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
+#define ATAPI_GET_PIO_TIM_1(base)\
+	bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
+#define ATAPI_SET_PIO_TIM_1(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
+#define ATAPI_GET_MULTI_TIM_0(base)\
+	bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
+#define ATAPI_SET_MULTI_TIM_0(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
+#define ATAPI_GET_MULTI_TIM_1(base)\
+	bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
+#define ATAPI_SET_MULTI_TIM_1(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
+#define ATAPI_GET_MULTI_TIM_2(base)\
+	bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
+#define ATAPI_SET_MULTI_TIM_2(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
+#define ATAPI_GET_ULTRA_TIM_0(base)\
+	bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
+#define ATAPI_SET_ULTRA_TIM_0(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
+#define ATAPI_GET_ULTRA_TIM_1(base)\
+	bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
+#define ATAPI_SET_ULTRA_TIM_1(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
+#define ATAPI_GET_ULTRA_TIM_2(base)\
+	bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
+#define ATAPI_SET_ULTRA_TIM_2(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
+#define ATAPI_GET_ULTRA_TIM_3(base)\
+	bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
+#define ATAPI_SET_ULTRA_TIM_3(base, val)\
+	bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
+
+/**
+ * PIO Mode - Frequency compatibility
+ */
+/* mode: 0         1         2         3         4 */
+static const u32 pio_fsclk[] =
+{ 33333333, 33333333, 33333333, 33333333, 33333333 };
+
+/**
+ * MDMA Mode - Frequency compatibility
+ */
+/*               mode:      0         1         2        */
+static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
+
+/**
+ * UDMA Mode - Frequency compatibility
+ *
+ * UDMA5 - 100 MB/s   - SCLK  = 133 MHz
+ * UDMA4 - 66 MB/s    - SCLK >=  80 MHz
+ * UDMA3 - 44.4 MB/s  - SCLK >=  50 MHz
+ * UDMA2 - 33 MB/s    - SCLK >=  40 MHz
+ */
+/* mode: 0         1         2         3         4          5 */
+static const u32 udma_fsclk[] =
+{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
+
+/**
+ * Register transfer timing table
+ */
+/*               mode:       0    1    2    3    4    */
+/* Cycle Time                     */
+static const u32 reg_t0min[]   = { 600, 383, 330, 180, 120 };
+/* DIOR/DIOW to end cycle         */
+static const u32 reg_t2min[]   = { 290, 290, 290, 70,  25  };
+/* DIOR/DIOW asserted pulse width */
+static const u32 reg_teocmin[] = { 290, 290, 290, 80,  70  };
+
+/**
+ * PIO timing table
+ */
+/*               mode:       0    1    2    3    4    */
+/* Cycle Time                     */
+static const u32 pio_t0min[]   = { 600, 383, 240, 180, 120 };
+/* Address valid to DIOR/DIORW    */
+static const u32 pio_t1min[]   = { 70,  50,  30,  30,  25  };
+/* DIOR/DIOW to end cycle         */
+static const u32 pio_t2min[]   = { 165, 125, 100, 80,  70  };
+/* DIOR/DIOW asserted pulse width */
+static const u32 pio_teocmin[] = { 165, 125, 100, 70,  25  };
+/* DIOW data hold                 */
+static const u32 pio_t4min[]   = { 30,  20,  15,  10,  10  };
+
+/* ******************************************************************
+ * Multiword DMA timing table
+ * ******************************************************************
+ */
+/*               mode:       0   1    2        */
+/* Cycle Time                     */
+static const u32 mdma_t0min[]  = { 480, 150, 120 };
+/* DIOR/DIOW asserted pulse width */
+static const u32 mdma_tdmin[]  = { 215, 80,  70  };
+/* DMACK to read data released    */
+static const u32 mdma_thmin[]  = { 20,  15,  10  };
+/* DIOR/DIOW to DMACK hold        */
+static const u32 mdma_tjmin[]  = { 20,  5,   5   };
+/* DIOR negated pulse width       */
+static const u32 mdma_tkrmin[] = { 50,  50,  25  };
+/* DIOR negated pulse width       */
+static const u32 mdma_tkwmin[] = { 215, 50,  25  };
+/* CS[1:0] valid to DIOR/DIOW     */
+static const u32 mdma_tmmin[]  = { 50,  30,  25  };
+/* DMACK to read data released    */
+static const u32 mdma_tzmax[]  = { 20,  25,  25  };
+
+/**
+ * Ultra DMA timing table
+ */
+/*               mode:         0    1    2    3    4    5       */
+static const u32 udma_tcycmin[]  = { 112, 73,  54,  39,  25,  17 };
+static const u32 udma_tdvsmin[]  = { 70,  48,  31,  20,  7,   5  };
+static const u32 udma_tenvmax[]  = { 70,  70,  70,  55,  55,  50 };
+static const u32 udma_trpmin[]   = { 160, 125, 100, 100, 100, 85 };
+static const u32 udma_tmin[]     = { 5,   5,   5,   5,   3,   3  };
+
+
+static const u32 udma_tmlimin = 20;
+static const u32 udma_tzahmin = 20;
+static const u32 udma_tenvmin = 20;
+static const u32 udma_tackmin = 20;
+static const u32 udma_tssmin = 50;
+
+/**
+ *
+ *	Function:       num_clocks_min
+ *
+ *	Description:
+ *	calculate number of SCLK cycles to meet minimum timing
+ */
+static unsigned short num_clocks_min(unsigned long tmin,
+				unsigned long fsclk)
+{
+	unsigned long tmp ;
+	unsigned short result;
+
+	tmp = tmin * (fsclk/1000/1000) / 1000;
+	result = (unsigned short)tmp;
+	if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
+		result++;
+	}
+
+	return result;
+}
+
+/**
+ *	bfin_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *
+ *	Set PIO mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	int mode = adev->pio_mode - XFER_PIO_0;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	unsigned int fsclk = get_sclk();
+	unsigned short teoc_reg, t2_reg, teoc_pio;
+	unsigned short t4_reg, t2_pio, t1_reg;
+	unsigned short n0, n6, t6min = 5;
+
+	/* the most restrictive timing value is t6 and tc, the DIOW - data hold
+	* If one SCLK pulse is longer than this minimum value then register
+	* transfers cannot be supported at this frequency.
+	*/
+	n6 = num_clocks_min(t6min, fsclk);
+	if (mode >= 0 && mode <= 4 && n6 >= 1) {
+		pr_debug("set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
+		/* calculate the timing values for register transfers. */
+		while (mode > 0 && pio_fsclk[mode] > fsclk)
+			mode--;
+
+		/* DIOR/DIOW to end cycle time */
+		t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
+		/* DIOR/DIOW asserted pulse width */
+		teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
+		/* Cycle Time */
+		n0  = num_clocks_min(reg_t0min[mode], fsclk);
+
+		/* increase t2 until we meed the minimum cycle length */
+		if (t2_reg + teoc_reg < n0)
+			t2_reg = n0 - teoc_reg;
+
+		/* calculate the timing values for pio transfers. */
+
+		/* DIOR/DIOW to end cycle time */
+		t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
+		/* DIOR/DIOW asserted pulse width */
+		teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
+		/* Cycle Time */
+		n0  = num_clocks_min(pio_t0min[mode], fsclk);
+
+		/* increase t2 until we meed the minimum cycle length */
+		if (t2_pio + teoc_pio < n0)
+			t2_pio = n0 - teoc_pio;
+
+		/* Address valid to DIOR/DIORW */
+		t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
+
+		/* DIOW data hold */
+		t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
+
+		ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
+		ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
+		ATAPI_SET_PIO_TIM_1(base, teoc_pio);
+		if (mode > 2) {
+			ATAPI_SET_CONTROL(base,
+				ATAPI_GET_CONTROL(base) | IORDY_EN);
+		} else {
+			ATAPI_SET_CONTROL(base,
+				ATAPI_GET_CONTROL(base) & ~IORDY_EN);
+		}
+
+		/* Disable host ATAPI PIO interrupts */
+		ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
+			& ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
+		SSYNC();
+	}
+}
+
+/**
+ *	bfin_set_dmamode - Initialize host controller PATA DMA timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: um
+ *	@udma: udma mode, 0 - 6
+ *
+ *	Set UDMA mode for device.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	int mode;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	unsigned long fsclk = get_sclk();
+	unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
+	unsigned short tm, td, tkr, tkw, teoc, th;
+	unsigned short n0, nf, tfmin = 5;
+	unsigned short nmin, tcyc;
+
+	mode = adev->dma_mode - XFER_UDMA_0;
+	if (mode >= 0 && mode <= 5) {
+		pr_debug("set udmamode: mode=%d\n", mode);
+		/* the most restrictive timing value is t6 and tc,
+		 * the DIOW - data hold. If one SCLK pulse is longer
+		 * than this minimum value then register
+		 * transfers cannot be supported at this frequency.
+		 */
+		while (mode > 0 && udma_fsclk[mode] > fsclk)
+			mode--;
+
+		nmin = num_clocks_min(udma_tmin[mode], fsclk);
+		if (nmin >= 1) {
+			/* calculate the timing values for Ultra DMA. */
+			tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
+			tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
+			tcyc_tdvs = 2;
+
+			/* increase tcyc - tdvs (tcyc_tdvs) until we meed
+			 * the minimum cycle length
+			 */
+			if (tdvs + tcyc_tdvs < tcyc)
+				tcyc_tdvs = tcyc - tdvs;
+
+			/* Mow assign the values required for the timing
+			 * registers
+			 */
+			if (tcyc_tdvs < 2)
+				tcyc_tdvs = 2;
+
+			if (tdvs < 2)
+				tdvs = 2;
+
+			tack = num_clocks_min(udma_tackmin, fsclk);
+			tss = num_clocks_min(udma_tssmin, fsclk);
+			tmli = num_clocks_min(udma_tmlimin, fsclk);
+			tzah = num_clocks_min(udma_tzahmin, fsclk);
+			trp = num_clocks_min(udma_trpmin[mode], fsclk);
+			tenv = num_clocks_min(udma_tenvmin, fsclk);
+			if (tenv <= udma_tenvmax[mode]) {
+				ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
+				ATAPI_SET_ULTRA_TIM_1(base,
+					(tcyc_tdvs<<8 | tdvs));
+				ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
+				ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
+
+				/* Enable host ATAPI Untra DMA interrupts */
+				ATAPI_SET_INT_MASK(base,
+					ATAPI_GET_INT_MASK(base)
+					| UDMAIN_DONE_MASK
+					| UDMAOUT_DONE_MASK
+					| UDMAIN_TERM_MASK
+					| UDMAOUT_TERM_MASK);
+			}
+		}
+	}
+
+	mode = adev->dma_mode - XFER_MW_DMA_0;
+	if (mode >= 0 && mode <= 2) {
+		pr_debug("set mdmamode: mode=%d\n", mode);
+		/* the most restrictive timing value is tf, the DMACK to
+		 * read data released. If one SCLK pulse is longer than
+		 * this maximum value then the MDMA mode
+		 * cannot be supported at this frequency.
+		 */
+		while (mode > 0 && mdma_fsclk[mode] > fsclk)
+			mode--;
+
+		nf = num_clocks_min(tfmin, fsclk);
+		if (nf >= 1) {
+			/* calculate the timing values for Multi-word DMA. */
+
+			/* DIOR/DIOW asserted pulse width */
+			td = num_clocks_min(mdma_tdmin[mode], fsclk);
+
+			/* DIOR negated pulse width */
+			tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
+
+			/* Cycle Time */
+			n0  = num_clocks_min(mdma_t0min[mode], fsclk);
+
+			/* increase tk until we meed the minimum cycle length */
+			if (tkw + td < n0)
+				tkw = n0 - td;
+
+			/* DIOR negated pulse width - read */
+			tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
+			/* CS{1:0] valid to DIOR/DIOW */
+			tm = num_clocks_min(mdma_tmmin[mode], fsclk);
+			/* DIOR/DIOW to DMACK hold */
+			teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
+			/* DIOW Data hold */
+			th = num_clocks_min(mdma_thmin[mode], fsclk);
+
+			ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
+			ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
+			ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
+
+			/* Enable host ATAPI Multi DMA interrupts */
+			ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
+				| MULTI_DONE_MASK | MULTI_TERM_MASK);
+			SSYNC();
+		}
+	}
+	return;
+}
+
+/**
+ *
+ *    Function:       wait_complete
+ *
+ *    Description:    Waits the interrupt from device
+ *
+ */
+static inline void wait_complete(void __iomem *base, unsigned short mask)
+{
+	unsigned short status;
+	unsigned int i = 0;
+
+#define PATA_BF54X_WAIT_TIMEOUT		10000
+
+	for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
+		status = ATAPI_GET_INT_STATUS(base) & mask;
+		if (status)
+			break;
+	}
+
+	ATAPI_SET_INT_STATUS(base, mask);
+}
+
+/**
+ *
+ *    Function:       write_atapi_register
+ *
+ *    Description:    Writes to ATA Device Resgister
+ *
+ */
+
+static void write_atapi_register(void __iomem *base,
+		unsigned long ata_reg, unsigned short value)
+{
+	/* Program the ATA_DEV_TXBUF register with write data (to be
+	 * written into the device).
+	 */
+	ATAPI_SET_DEV_TXBUF(base, value);
+
+	/* Program the ATA_DEV_ADDR register with address of the
+	 * device register (0x01 to 0x0F).
+	 */
+	ATAPI_SET_DEV_ADDR(base, ata_reg);
+
+	/* Program the ATA_CTRL register with dir set to write (1)
+	 */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
+
+	/* ensure PIO DMA is not set */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+	/* and start the transfer */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+	/* Wait for the interrupt to indicate the end of the transfer.
+	 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
+	 */
+	wait_complete(base, PIO_DONE_INT);
+}
+
+/**
+ *
+ *	Function:       read_atapi_register
+ *
+ *Description:    Reads from ATA Device Resgister
+ *
+ */
+
+static unsigned short read_atapi_register(void __iomem *base,
+		unsigned long ata_reg)
+{
+	/* Program the ATA_DEV_ADDR register with address of the
+	 * device register (0x01 to 0x0F).
+	 */
+	ATAPI_SET_DEV_ADDR(base, ata_reg);
+
+	/* Program the ATA_CTRL register with dir set to read (0) and
+	 */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
+
+	/* ensure PIO DMA is not set */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+	/* and start the transfer */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+	/* Wait for the interrupt to indicate the end of the transfer.
+	 * (PIO_DONE interrupt is set and it doesn't seem to matter
+	 * that we don't clear it)
+	 */
+	wait_complete(base, PIO_DONE_INT);
+
+	/* Read the ATA_DEV_RXBUF register with write data (to be
+	 * written into the device).
+	 */
+	return ATAPI_GET_DEV_RXBUF(base);
+}
+
+/**
+ *
+ *    Function:       write_atapi_register_data
+ *
+ *    Description:    Writes to ATA Device Resgister
+ *
+ */
+
+static void write_atapi_data(void __iomem *base,
+		int len, unsigned short *buf)
+{
+	int i;
+
+	/* Set transfer length to 1 */
+	ATAPI_SET_XFER_LEN(base, 1);
+
+	/* Program the ATA_DEV_ADDR register with address of the
+	 * ATA_REG_DATA
+	 */
+	ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
+
+	/* Program the ATA_CTRL register with dir set to write (1)
+	 */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
+
+	/* ensure PIO DMA is not set */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+	for (i = 0; i < len; i++) {
+		/* Program the ATA_DEV_TXBUF register with write data (to be
+		 * written into the device).
+		 */
+		ATAPI_SET_DEV_TXBUF(base, buf[i]);
+
+		/* and start the transfer */
+		ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+		/* Wait for the interrupt to indicate the end of the transfer.
+		 * (We need to wait on and clear rhe ATA_DEV_INT
+		 * interrupt status)
+		 */
+		wait_complete(base, PIO_DONE_INT);
+	}
+}
+
+/**
+ *
+ *	Function:       read_atapi_register_data
+ *
+ *	Description:    Reads from ATA Device Resgister
+ *
+ */
+
+static void read_atapi_data(void __iomem *base,
+		int len, unsigned short *buf)
+{
+	int i;
+
+	/* Set transfer length to 1 */
+	ATAPI_SET_XFER_LEN(base, 1);
+
+	/* Program the ATA_DEV_ADDR register with address of the
+	 * ATA_REG_DATA
+	 */
+	ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
+
+	/* Program the ATA_CTRL register with dir set to read (0) and
+	 */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
+
+	/* ensure PIO DMA is not set */
+	ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+	for (i = 0; i < len; i++) {
+		/* and start the transfer */
+		ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+		/* Wait for the interrupt to indicate the end of the transfer.
+		 * (PIO_DONE interrupt is set and it doesn't seem to matter
+		 * that we don't clear it)
+		 */
+		wait_complete(base, PIO_DONE_INT);
+
+		/* Read the ATA_DEV_RXBUF register with write data (to be
+		 * written into the device).
+		 */
+		buf[i] = ATAPI_GET_DEV_RXBUF(base);
+	}
+}
+
+/**
+ *	bfin_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Note: Original code is ata_tf_load().
+ */
+
+static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr) {
+		if (tf->flags & ATA_TFLAG_LBA48) {
+			write_atapi_register(base, ATA_REG_FEATURE,
+						tf->hob_feature);
+			write_atapi_register(base, ATA_REG_NSECT,
+						tf->hob_nsect);
+			write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
+			write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
+			write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
+			pr_debug("hob: feat 0x%X nsect 0x%X, lba 0x%X "
+				 "0x%X 0x%X\n",
+				tf->hob_feature,
+				tf->hob_nsect,
+				tf->hob_lbal,
+				tf->hob_lbam,
+				tf->hob_lbah);
+		}
+
+		write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
+		write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
+		write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
+		write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
+		write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
+		pr_debug("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		write_atapi_register(base, ATA_REG_DEVICE, tf->device);
+		pr_debug("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+/**
+ *	bfin_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Note: Original code is ata_check_status().
+ */
+
+static u8 bfin_check_status(struct ata_port *ap)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	return read_atapi_register(base, ATA_REG_STATUS);
+}
+
+/**
+ *	bfin_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Note: Original code is ata_tf_read().
+ */
+
+static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	tf->command = bfin_check_status(ap);
+	tf->feature = read_atapi_register(base, ATA_REG_ERR);
+	tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
+	tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
+	tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
+	tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
+	tf->device = read_atapi_register(base, ATA_REG_DEVICE);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
+		tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
+		tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
+		tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
+		tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
+		tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
+	}
+}
+
+/**
+ *	bfin_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Note: Original code is ata_exec_command().
+ */
+
+static void bfin_exec_command(struct ata_port *ap,
+			      const struct ata_taskfile *tf)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	pr_debug("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+	write_atapi_register(base, ATA_REG_CMD, tf->command);
+	ata_pause(ap);
+}
+
+/**
+ *	bfin_check_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ */
+
+static u8 bfin_check_altstatus(struct ata_port *ap)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	return read_atapi_register(base, ATA_REG_ALTSTATUS);
+}
+
+/**
+ *	bfin_std_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Note: Original code is ata_std_dev_select().
+ */
+
+static void bfin_std_dev_select(struct ata_port *ap, unsigned int device)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	write_atapi_register(base, ATA_REG_DEVICE, tmp);
+	ata_pause(ap);
+}
+
+/**
+ *	bfin_bmdma_setup - Set up IDE DMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_setup().
+ */
+
+static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	unsigned short config = WDSIZE_16;
+	struct scatterlist *sg;
+
+	pr_debug("in atapi dma setup\n");
+	/* Program the ATA_CTRL register with dir */
+	if (qc->tf.flags & ATA_TFLAG_WRITE) {
+		/* fill the ATAPI DMA controller */
+		set_dma_config(CH_ATAPI_TX, config);
+		set_dma_x_modify(CH_ATAPI_TX, 2);
+		ata_for_each_sg(sg, qc) {
+			set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
+			set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
+		}
+	} else {
+		config |= WNR;
+		/* fill the ATAPI DMA controller */
+		set_dma_config(CH_ATAPI_RX, config);
+		set_dma_x_modify(CH_ATAPI_RX, 2);
+		ata_for_each_sg(sg, qc) {
+			set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
+			set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
+		}
+	}
+}
+
+/**
+ *	bfin_bmdma_start - Start an IDE DMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	struct scatterlist *sg;
+
+	pr_debug("in atapi dma start\n");
+	if (!(ap->udma_mask || ap->mwdma_mask))
+		return;
+
+	/* start ATAPI DMA controller*/
+	if (qc->tf.flags & ATA_TFLAG_WRITE) {
+		/*
+		 * On blackfin arch, uncacheable memory is not
+		 * allocated with flag GFP_DMA. DMA buffer from
+		 * common kenel code should be flushed if WB
+		 * data cache is enabled. Otherwise, this loop
+		 * is an empty loop and optimized out.
+		 */
+		ata_for_each_sg(sg, qc) {
+			flush_dcache_range(sg_dma_address(sg),
+				sg_dma_address(sg) + sg_dma_len(sg));
+		}
+		enable_dma(CH_ATAPI_TX);
+		pr_debug("enable udma write\n");
+
+		/* Send ATA DMA write command */
+		bfin_exec_command(ap, &qc->tf);
+
+		/* set ATA DMA write direction */
+		ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
+			| XFER_DIR));
+	} else {
+		enable_dma(CH_ATAPI_RX);
+		pr_debug("enable udma read\n");
+
+		/* Send ATA DMA read command */
+		bfin_exec_command(ap, &qc->tf);
+
+		/* set ATA DMA read direction */
+		ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
+			& ~XFER_DIR));
+	}
+
+	/* Reset all transfer count */
+	ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
+
+		/* Set transfer length to buffer len */
+	ata_for_each_sg(sg, qc) {
+		ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
+	}
+
+	/* Enable ATA DMA operation*/
+	if (ap->udma_mask)
+		ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
+			| ULTRA_START);
+	else
+		ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
+			| MULTI_START);
+}
+
+/**
+ *	bfin_bmdma_stop - Stop IDE DMA transfer
+ *	@qc: Command we are ending DMA for
+ */
+
+static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+
+	pr_debug("in atapi dma stop\n");
+	if (!(ap->udma_mask || ap->mwdma_mask))
+		return;
+
+	/* stop ATAPI DMA controller*/
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		disable_dma(CH_ATAPI_TX);
+	else {
+		disable_dma(CH_ATAPI_RX);
+		if (ap->hsm_task_state & HSM_ST_LAST) {
+			/*
+			 * On blackfin arch, uncacheable memory is not
+			 * allocated with flag GFP_DMA. DMA buffer from
+			 * common kenel code should be invalidated if
+			 * data cache is enabled. Otherwise, this loop
+			 * is an empty loop and optimized out.
+			 */
+			ata_for_each_sg(sg, qc) {
+				invalidate_dcache_range(
+					sg_dma_address(sg),
+					sg_dma_address(sg)
+					+ sg_dma_len(sg));
+			}
+		}
+	}
+}
+
+/**
+ *	bfin_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	Note: Original code is ata_devchk().
+ */
+
+static unsigned int bfin_devchk(struct ata_port *ap,
+				unsigned int device)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	u8 nsect, lbal;
+
+	bfin_std_dev_select(ap, device);
+
+	write_atapi_register(base, ATA_REG_NSECT, 0x55);
+	write_atapi_register(base, ATA_REG_LBAL, 0xaa);
+
+	write_atapi_register(base, ATA_REG_NSECT, 0xaa);
+	write_atapi_register(base, ATA_REG_LBAL, 0x55);
+
+	write_atapi_register(base, ATA_REG_NSECT, 0x55);
+	write_atapi_register(base, ATA_REG_LBAL, 0xaa);
+
+	nsect = read_atapi_register(base, ATA_REG_NSECT);
+	lbal = read_atapi_register(base, ATA_REG_LBAL);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	bfin_bus_post_reset - PATA device post reset
+ *
+ *	Note: Original code is ata_bus_post_reset().
+ */
+
+static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	unsigned long timeout;
+
+	/* if device 0 was found in ata_devchk, wait for its
+	 * BSY bit to clear
+	 */
+	if (dev0)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* if device 1 was found in ata_devchk, wait for
+	 * register access, then wait for BSY to clear
+	 */
+	timeout = jiffies + ATA_TMOUT_BOOT;
+	while (dev1) {
+		u8 nsect, lbal;
+
+		bfin_std_dev_select(ap, 1);
+		nsect = read_atapi_register(base, ATA_REG_NSECT);
+		lbal = read_atapi_register(base, ATA_REG_LBAL);
+		if ((nsect == 1) && (lbal == 1))
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev1 = 0;
+			break;
+		}
+		msleep(50);	/* give drive a breather */
+	}
+	if (dev1)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* is all this really necessary? */
+	bfin_std_dev_select(ap, 0);
+	if (dev1)
+		bfin_std_dev_select(ap, 1);
+	if (dev0)
+		bfin_std_dev_select(ap, 0);
+}
+
+/**
+ *	bfin_bus_softreset - PATA device software reset
+ *
+ *	Note: Original code is ata_bus_softreset().
+ */
+
+static unsigned int bfin_bus_softreset(struct ata_port *ap,
+				       unsigned int devmask)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	/* software reset.  causes dev0 to be selected */
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+	udelay(20);
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
+	udelay(20);
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+
+	/* spec mandates ">= 2ms" before checking status.
+	 * We wait 150ms, because that was the magic delay used for
+	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 *
+	 * Old drivers/ide uses the 2mS rule and then waits for ready
+	 */
+	msleep(150);
+
+	/* Before we perform post reset processing we want to see if
+	 * the bus shows 0xFF because the odd clown forgets the D7
+	 * pulldown resistor.
+	 */
+	if (bfin_check_status(ap) == 0xFF)
+		return 0;
+
+	bfin_bus_post_reset(ap, devmask);
+
+	return 0;
+}
+
+/**
+ *	bfin_std_softreset - reset host port via ATA SRST
+ *	@ap: port to reset
+ *	@classes: resulting classes of attached devices
+ *
+ *	Note: Original code is ata_std_softreset().
+ */
+
+static int bfin_std_softreset(struct ata_link *link, unsigned int *classes,
+		unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	unsigned int devmask = 0, err_mask;
+	u8 err;
+
+	if (ata_link_offline(link)) {
+		classes[0] = ATA_DEV_NONE;
+		goto out;
+	}
+
+	/* determine if device 0/1 are present */
+	if (bfin_devchk(ap, 0))
+		devmask |= (1 << 0);
+	if (slave_possible && bfin_devchk(ap, 1))
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	bfin_std_dev_select(ap, 0);
+
+	/* issue bus reset */
+	err_mask = bfin_bus_softreset(ap, devmask);
+	if (err_mask) {
+		ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+				err_mask);
+		return -EIO;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_dev_try_classify(&ap->link.device[0],
+				devmask & (1 << 0), &err);
+	if (slave_possible && err != 0x81)
+		classes[1] = ata_dev_try_classify(&ap->link.device[1],
+					devmask & (1 << 1), &err);
+
+ out:
+	return 0;
+}
+
+/**
+ *	bfin_bmdma_status - Read IDE DMA status
+ *	@ap: Port associated with this ATA transaction.
+ */
+
+static unsigned char bfin_bmdma_status(struct ata_port *ap)
+{
+	unsigned char host_stat = 0;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	unsigned short int_status = ATAPI_GET_INT_STATUS(base);
+
+	if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON)) {
+		host_stat |= ATA_DMA_ACTIVE;
+	}
+	if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT)) {
+		host_stat |= ATA_DMA_INTR;
+	}
+	if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT)) {
+		host_stat |= ATA_DMA_ERR;
+	}
+
+	return host_stat;
+}
+
+/**
+ *	bfin_data_xfer - Transfer data by PIO
+ *	@adev: device for this I/O
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Note: Original code is ata_data_xfer().
+ */
+
+static void bfin_data_xfer(struct ata_device *adev, unsigned char *buf,
+			   unsigned int buflen, int write_data)
+{
+	struct ata_port *ap = adev->link->ap;
+	unsigned int words = buflen >> 1;
+	unsigned short *buf16 = (u16 *) buf;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		write_atapi_data(base, words, buf16);
+	} else {
+		read_atapi_data(base, words, buf16);
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		unsigned short align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			write_atapi_data(base, 1, align_buf);
+		} else {
+			read_atapi_data(base, 1, align_buf);
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	bfin_irq_clear - Clear ATAPI interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Note: Original code is ata_bmdma_irq_clear().
+ */
+
+static void bfin_irq_clear(struct ata_port *ap)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	pr_debug("in atapi irq clear\n");
+
+	ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
+		| MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
+		| MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
+}
+
+/**
+ *	bfin_irq_on - Enable interrupts on a port.
+ *	@ap: Port on which interrupts are enabled.
+ *
+ *	Note: Original code is ata_irq_on().
+ */
+
+static unsigned char bfin_irq_on(struct ata_port *ap)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+	u8 tmp;
+
+	pr_debug("in atapi irq on\n");
+	ap->ctl &= ~ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+	tmp = ata_wait_idle(ap);
+
+	bfin_irq_clear(ap);
+
+	return tmp;
+}
+
+/**
+ *	bfin_bmdma_freeze - Freeze DMA controller port
+ *	@ap: port to freeze
+ *
+ *	Note: Original code is ata_bmdma_freeze().
+ */
+
+static void bfin_bmdma_freeze(struct ata_port *ap)
+{
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	pr_debug("in atapi dma freeze\n");
+	ap->ctl |= ATA_NIEN;
+	ap->last_ctl = ap->ctl;
+
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+
+	/* Under certain circumstances, some controllers raise IRQ on
+	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
+	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
+	 */
+	ata_chk_status(ap);
+
+	bfin_irq_clear(ap);
+}
+
+/**
+ *	bfin_bmdma_thaw - Thaw DMA controller port
+ *	@ap: port to thaw
+ *
+ *	Note: Original code is ata_bmdma_thaw().
+ */
+
+void bfin_bmdma_thaw(struct ata_port *ap)
+{
+	bfin_check_status(ap);
+	bfin_irq_clear(ap);
+	bfin_irq_on(ap);
+}
+
+/**
+ *	bfin_std_postreset - standard postreset callback
+ *	@ap: the target ata_port
+ *	@classes: classes of attached devices
+ *
+ *	Note: Original code is ata_std_postreset().
+ */
+
+static void bfin_std_postreset(struct ata_link *link, unsigned int *classes)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+	/* re-enable interrupts */
+	bfin_irq_on(ap);
+
+	/* is double-select really necessary? */
+	if (classes[0] != ATA_DEV_NONE)
+		bfin_std_dev_select(ap, 1);
+	if (classes[1] != ATA_DEV_NONE)
+		bfin_std_dev_select(ap, 0);
+
+	/* bail out if no device is present */
+	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+		return;
+	}
+
+	/* set up device control */
+	write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+}
+
+/**
+ *	bfin_error_handler - Stock error handler for DMA controller
+ *	@ap: port to handle error for
+ */
+
+static void bfin_error_handler(struct ata_port *ap)
+{
+	ata_bmdma_drive_eh(ap, ata_std_prereset, bfin_std_softreset, NULL,
+			   bfin_std_postreset);
+}
+
+static void bfin_port_stop(struct ata_port *ap)
+{
+	pr_debug("in atapi port stop\n");
+	if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+		free_dma(CH_ATAPI_RX);
+		free_dma(CH_ATAPI_TX);
+	}
+}
+
+static int bfin_port_start(struct ata_port *ap)
+{
+	pr_debug("in atapi port start\n");
+	if (!(ap->udma_mask || ap->mwdma_mask))
+		return 0;
+
+	if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
+		if (request_dma(CH_ATAPI_TX,
+			"BFIN ATAPI TX DMA") >= 0)
+			return 0;
+
+		free_dma(CH_ATAPI_RX);
+	}
+
+	ap->udma_mask = 0;
+	ap->mwdma_mask = 0;
+	dev_err(ap->dev, "Unable to request ATAPI DMA!"
+		" Continue in PIO mode.\n");
+
+	return 0;
+}
+
+static struct scsi_host_template bfin_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= SG_NONE,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+#ifdef CONFIG_PM
+	.resume			= ata_scsi_device_resume,
+	.suspend		= ata_scsi_device_suspend,
+#endif
+};
+
+static const struct ata_port_operations bfin_pata_ops = {
+	.set_piomode		= bfin_set_piomode,
+	.set_dmamode		= bfin_set_dmamode,
+
+	.tf_load		= bfin_tf_load,
+	.tf_read		= bfin_tf_read,
+	.exec_command		= bfin_exec_command,
+	.check_status		= bfin_check_status,
+	.check_altstatus	= bfin_check_altstatus,
+	.dev_select		= bfin_std_dev_select,
+
+	.bmdma_setup		= bfin_bmdma_setup,
+	.bmdma_start		= bfin_bmdma_start,
+	.bmdma_stop		= bfin_bmdma_stop,
+	.bmdma_status		= bfin_bmdma_status,
+	.data_xfer		= bfin_data_xfer,
+
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.freeze			= bfin_bmdma_freeze,
+	.thaw			= bfin_bmdma_thaw,
+	.error_handler		= bfin_error_handler,
+	.post_internal_cmd	= bfin_bmdma_stop,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= bfin_irq_clear,
+	.irq_on			= bfin_irq_on,
+
+	.port_start		= bfin_port_start,
+	.port_stop		= bfin_port_stop,
+};
+
+static struct ata_port_info bfin_port_info[] = {
+	{
+		.sht		= &bfin_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS
+				| ATA_FLAG_MMIO
+				| ATA_FLAG_NO_LEGACY,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0,
+		.udma_mask	= 0,
+		.port_ops	= &bfin_pata_ops,
+	},
+};
+
+/**
+ *	bfin_reset_controller - initialize BF54x ATAPI controller.
+ */
+
+static int bfin_reset_controller(struct ata_host *host)
+{
+	void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
+	int count;
+	unsigned short status;
+
+	/* Disable all ATAPI interrupts */
+	ATAPI_SET_INT_MASK(base, 0);
+	SSYNC();
+
+	/* Assert the RESET signal 25us*/
+	ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
+	udelay(30);
+
+	/* Negate the RESET signal for 2ms*/
+	ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
+	msleep(2);
+
+	/* Wait on Busy flag to clear */
+	count = 10000000;
+	do {
+		status = read_atapi_register(base, ATA_REG_STATUS);
+	} while (count-- && (status & ATA_BUSY));
+
+	/* Enable only ATAPI Device interrupt */
+	ATAPI_SET_INT_MASK(base, 1);
+	SSYNC();
+
+	return (!count);
+}
+
+/**
+ *	atapi_io_port - define atapi peripheral port pins.
+ */
+static unsigned short atapi_io_port[] = {
+	P_ATAPI_RESET,
+	P_ATAPI_DIOR,
+	P_ATAPI_DIOW,
+	P_ATAPI_CS0,
+	P_ATAPI_CS1,
+	P_ATAPI_DMACK,
+	P_ATAPI_DMARQ,
+	P_ATAPI_INTRQ,
+	P_ATAPI_IORDY,
+	0
+};
+
+/**
+ *	bfin_atapi_probe	-	attach a bfin atapi interface
+ *	@pdev: platform device
+ *
+ *	Register a bfin atapi interface.
+ *
+ *
+ *	Platform devices are expected to contain 2 resources per port:
+ *
+ *		- I/O Base (IORESOURCE_IO)
+ *		- IRQ	   (IORESOURCE_IRQ)
+ *
+ */
+static int __devinit bfin_atapi_probe(struct platform_device *pdev)
+{
+	int board_idx = 0;
+	struct resource *res;
+	struct ata_host *host;
+	unsigned int fsclk = get_sclk();
+	int udma_mode = 5;
+	const struct ata_port_info *ppi[] =
+		{ &bfin_port_info[board_idx], NULL };
+
+	/*
+	 * Simple resource validation ..
+	 */
+	if (unlikely(pdev->num_resources != 2)) {
+		dev_err(&pdev->dev, "invalid number of resources\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the register base first
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -EINVAL;
+
+	while (bfin_port_info[board_idx].udma_mask>0 && udma_fsclk[udma_mode] > fsclk) {
+		udma_mode--;
+		bfin_port_info[board_idx].udma_mask >>= 1;
+	}
+
+	/*
+	 * Now that that's out of the way, wire up the port..
+	 */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
+	if (!host)
+		return -ENOMEM;
+
+	host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
+
+	if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
+		dev_err(&pdev->dev, "Requesting Peripherals faild\n");
+		return -EFAULT;
+	}
+
+	if (bfin_reset_controller(host)) {
+		peripheral_free_list(atapi_io_port);
+		dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
+		return -EFAULT;
+	}
+
+	if (ata_host_activate(host, platform_get_irq(pdev, 0),
+		ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
+		peripheral_free_list(atapi_io_port);
+		dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ *	bfin_atapi_remove	-	unplug a bfin atapi interface
+ *	@pdev: platform device
+ *
+ *	A bfin atapi device has been unplugged. Perform the needed
+ *	cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit bfin_atapi_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ata_host *host = dev_get_drvdata(dev);
+
+	ata_host_detach(host);
+
+	peripheral_free_list(atapi_io_port);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	return 0;
+}
+
+int bfin_atapi_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+static struct platform_driver bfin_atapi_driver = {
+	.probe			= bfin_atapi_probe,
+	.remove			= __devexit_p(bfin_atapi_remove),
+	.driver = {
+		.name		= DRV_NAME,
+		.owner		= THIS_MODULE,
+#ifdef CONFIG_PM
+		.suspend	= bfin_atapi_suspend,
+		.resume		= bfin_atapi_resume,
+#endif
+	},
+};
+
+#define ATAPI_MODE_SIZE		10
+static char bfin_atapi_mode[ATAPI_MODE_SIZE];
+
+static int __init bfin_atapi_init(void)
+{
+	pr_info("register bfin atapi driver\n");
+
+	switch(bfin_atapi_mode[0]) {
+	case 'p':
+	case 'P':
+		break;
+	case 'm':
+	case 'M':
+		bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
+		break;
+	default:
+		bfin_port_info[0].udma_mask = ATA_UDMA5;
+	};
+
+	return platform_driver_register(&bfin_atapi_driver);
+}
+
+static void __exit bfin_atapi_exit(void)
+{
+	platform_driver_unregister(&bfin_atapi_driver);
+}
+
+module_init(bfin_atapi_init);
+module_exit(bfin_atapi_exit);
+/*
+ * ATAPI mode:
+ * pio/PIO
+ * udma/UDMA (default)
+ * mwdma/MWDMA
+ */
+module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 31cbf8d..43d198f 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,7 +153,7 @@ static int cmd640_port_start(struct ata_port *ap)
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	struct cmd640_reg *timing;
 
-	int ret = ata_port_start(ap);
+	int ret = ata_sff_port_start(ap);
 	if (ret < 0)
 		return ret;
 
@@ -184,7 +184,6 @@ static struct scsi_host_template cmd640_sht = {
 };
 
 static struct ata_port_operations cmd640_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cmd640_set_piomode,
 	.mode_filter	= ata_pci_default_filter,
 	.tf_load	= ata_tf_load,
@@ -213,7 +212,6 @@ static struct ata_port_operations cmd640_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= cmd640_port_start,
 };
@@ -251,7 +249,7 @@ static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &cmd640_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &cmd640_port_ops
 	};
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 320a5b1..7acbbd9 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_cmd64x"
-#define DRV_VERSION "0.2.3"
+#define DRV_VERSION "0.2.5"
 
 /*
  * CMD64x specific registers definition.
@@ -88,14 +88,15 @@ static int cmd648_cable_detect(struct ata_port *ap)
 }
 
 /**
- *	cmd64x_set_piomode	-	set initial PIO mode data
+ *	cmd64x_set_piomode	-	set PIO and MWDMA timing
  *	@ap: ATA interface
  *	@adev: ATA device
+ *	@mode: mode
  *
- *	Called to do the PIO mode setup.
+ *	Called to do the PIO and MWDMA mode setup.
  */
 
-static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	struct ata_timing t;
@@ -117,8 +118,9 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
 	int arttim = arttim_port[ap->port_no][adev->devno];
 	int drwtim = drwtim_port[ap->port_no][adev->devno];
 
-
-	if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
+	/* ata_timing_compute is smart and will produce timings for MWDMA
+	   that don't violate the drives PIO capabilities. */
+	if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
 		printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
 		return;
 	}
@@ -168,6 +170,20 @@ static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
 }
 
 /**
+ *	cmd64x_set_piomode	-	set initial PIO mode data
+ *	@ap: ATA interface
+ *	@adev: ATA device
+ *
+ *	Used when configuring the devices ot set the PIO timings. All the
+ *	actual work is done by the PIO/MWDMA setting helper
+ */
+
+static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	cmd64x_set_timing(ap, adev, adev->pio_mode);
+}
+
+/**
  *	cmd64x_set_dmamode	-	set initial DMA mode data
  *	@ap: ATA interface
  *	@adev: ATA device
@@ -180,9 +196,6 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 	static const u8 udma_data[] = {
 		0x30, 0x20, 0x10, 0x20, 0x10, 0x00
 	};
-	static const u8 mwdma_data[] = {
-		0x30, 0x20, 0x10
-	};
 
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u8 regU, regD;
@@ -202,14 +215,16 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 	regU &= ~(0x05 << adev->devno);
 
 	if (adev->dma_mode >= XFER_UDMA_0) {
-		/* Merge thge timing value */
+		/* Merge the timing value */
 		regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
 		/* Merge the control bits */
 		regU |= 1 << adev->devno; /* UDMA on */
 		if (adev->dma_mode > 2)	/* 15nS timing */
 			regU |= 4 << adev->devno;
-	} else
-		regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift;
+	} else {
+		regU &= ~ (1 << adev->devno);	/* UDMA off */
+		cmd64x_set_timing(ap, adev, adev->dma_mode);
+	}
 
 	regD |= 0x20 << adev->devno;
 
@@ -269,7 +284,6 @@ static struct scsi_host_template cmd64x_sht = {
 };
 
 static struct ata_port_operations cmd64x_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cmd64x_set_piomode,
 	.set_dmamode	= cmd64x_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -298,13 +312,11 @@ static struct ata_port_operations cmd64x_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
 
 static struct ata_port_operations cmd646r1_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cmd64x_set_piomode,
 	.set_dmamode	= cmd64x_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -333,13 +345,11 @@ static struct ata_port_operations cmd646r1_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
 
 static struct ata_port_operations cmd648_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cmd64x_set_piomode,
 	.set_dmamode	= cmd64x_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -368,7 +378,6 @@ static struct ata_port_operations cmd648_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -380,47 +389,47 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	static const struct ata_port_info cmd_info[6] = {
 		{	/* CMD 643 - no UDMA */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 with broken UDMA */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 with working UDMA */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = ATA_UDMA1,
+			.udma_mask = ATA_UDMA2,
 			.port_ops = &cmd64x_port_ops
 		},
 		{	/* CMD 646 rev 1  */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.port_ops = &cmd646r1_port_ops
 		},
 		{	/* CMD 648 */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = ATA_UDMA2,
+			.udma_mask = ATA_UDMA4,
 			.port_ops = &cmd648_port_ops
 		},
 		{	/* CMD 649 */
 			.sht = &cmd64x_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = ATA_UDMA3,
+			.udma_mask = ATA_UDMA5,
 			.port_ops = &cmd648_port_ops
 		}
 	};
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 1aabe15..33f7f08 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_cs5520"
-#define DRV_VERSION	"0.6.5"
+#define DRV_VERSION	"0.6.6"
 
 struct pio_clocks
 {
@@ -146,7 +146,7 @@ static struct scsi_host_template cs5520_sht = {
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
+	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -158,7 +158,6 @@ static struct scsi_host_template cs5520_sht = {
 };
 
 static struct ata_port_operations cs5520_port_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= cs5520_set_piomode,
 	.set_dmamode		= cs5520_set_dmamode,
 
@@ -178,19 +177,20 @@ static struct ata_port_operations cs5520_port_ops = {
 	.bmdma_start		= ata_bmdma_start,
 	.bmdma_stop		= ata_bmdma_stop,
 	.bmdma_status		= ata_bmdma_status,
-	.qc_prep		= ata_qc_prep,
+	.qc_prep		= ata_dumb_qc_prep,
 	.qc_issue		= ata_qc_issue_prot,
 	.data_xfer		= ata_data_xfer,
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
+	static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
 	struct ata_port_info pi = {
 		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,
@@ -244,10 +244,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
 	}
 
 	/* Map IO ports and initialize host accordingly */
-	iomap[0] = devm_ioport_map(&pdev->dev, 0x1F0, 8);
-	iomap[1] = devm_ioport_map(&pdev->dev, 0x3F6, 1);
-	iomap[2] = devm_ioport_map(&pdev->dev, 0x170, 8);
-	iomap[3] = devm_ioport_map(&pdev->dev, 0x376, 1);
+	iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
+	iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
+	iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
+	iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
 	iomap[4] = pcim_iomap(pdev, 2, 0);
 
 	if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
@@ -260,6 +260,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
 	ioaddr->bmdma_addr = iomap[4];
 	ata_std_ports(ioaddr);
 
+	ata_port_desc(host->ports[0],
+		      "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
+	ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
+
 	ioaddr = &host->ports[1]->ioaddr;
 	ioaddr->cmd_addr = iomap[2];
 	ioaddr->ctl_addr = iomap[3];
@@ -267,6 +271,10 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
 	ioaddr->bmdma_addr = iomap[4] + 8;
 	ata_std_ports(ioaddr);
 
+	ata_port_desc(host->ports[1],
+		      "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
+	ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
+
 	/* activate the host */
 	pci_set_master(pdev);
 	rc = ata_host_start(host);
@@ -275,7 +283,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
 
 	for (i = 0; i < 2; i++) {
 		static const int irq[] = { 14, 15 };
-		struct ata_port *ap = host->ports[0];
+		struct ata_port *ap = host->ports[i];
 
 		if (ata_port_is_dummy(ap))
 			continue;
@@ -284,29 +292,13 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
 				      ata_interrupt, 0, DRV_NAME, host);
 		if (rc)
 			return rc;
+
+		ata_port_desc(ap, "irq %d", irq[i]);
 	}
 
 	return ata_host_register(host, &cs5520_sht);
 }
 
-/**
- *	cs5520_remove_one	-	device unload
- *	@pdev: PCI device being removed
- *
- *	Handle an unplug/unload event for a PCI device. Unload the
- *	PCI driver but do not use the default handler as we manage
- *	resources ourself and *MUST NOT* disable the device as it has
- *	other functions.
- */
-
-static void __devexit cs5520_remove_one(struct pci_dev *pdev)
-{
-	struct device *dev = pci_dev_to_dev(pdev);
-	struct ata_host *host = dev_get_drvdata(dev);
-
-	ata_host_detach(host);
-}
-
 #ifdef CONFIG_PM
 /**
  *	cs5520_reinit_one	-	device resume
@@ -363,7 +355,7 @@ static struct pci_driver cs5520_pci_driver = {
 	.name 		= DRV_NAME,
 	.id_table	= pata_cs5520,
 	.probe 		= cs5520_init_one,
-	.remove		= cs5520_remove_one,
+	.remove		= ata_pci_remove_one,
 #ifdef CONFIG_PM
 	.suspend	= cs5520_pci_device_suspend,
 	.resume		= cs5520_reinit_one,
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 848f030..c168893 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -35,7 +35,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME	"pata_cs5530"
-#define DRV_VERSION	"0.7.3"
+#define DRV_VERSION	"0.7.4"
 
 static void __iomem *cs5530_port_base(struct ata_port *ap)
 {
@@ -138,7 +138,7 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary.  Specifically we have a problem that there is only
+ *	necessary.  Specifically we have a problem that there is only
  *	one MWDMA/UDMA bit.
  */
 
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
+	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -179,7 +179,6 @@ static struct scsi_host_template cs5530_sht = {
 };
 
 static struct ata_port_operations cs5530_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cs5530_set_piomode,
 	.set_dmamode	= cs5530_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -201,7 +200,7 @@ static struct ata_port_operations cs5530_port_ops = {
 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
 	.cable_detect	= ata_cable_40wire,
 
-	.qc_prep 	= ata_qc_prep,
+	.qc_prep 	= ata_dumb_qc_prep,
 	.qc_issue	= cs5530_qc_issue_prot,
 
 	.data_xfer	= ata_data_xfer,
@@ -209,9 +208,8 @@ static struct ata_port_operations cs5530_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct dmi_system_id palmax_dmi_table[] = {
@@ -266,7 +264,7 @@ static int cs5530_init_chip(void)
 	}
 
 	pci_set_master(cs5530_0);
-	pci_set_mwi(cs5530_0);
+	pci_try_set_mwi(cs5530_0);
 
 	/*
 	 * Set PCI CacheLineSize to 16-bytes:
@@ -337,7 +335,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &cs5530_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x07,
@@ -346,7 +344,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* The docking connector doesn't do UDMA, and it seems not MWDMA */
 	static const struct ata_port_info info_palmax_secondary = {
 		.sht = &cs5530_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &cs5530_port_ops
 	};
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index aa3256f..0132453 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -25,7 +25,7 @@
  * Documentation:
  *	Available from AMD web site.
  * TODO
- *	Review errata to see if serializing is neccessary
+ *	Review errata to see if serializing is necessary
  */
 
 #include <linux/kernel.h>
@@ -176,7 +176,6 @@ static struct scsi_host_template cs5535_sht = {
 };
 
 static struct ata_port_operations cs5535_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cs5535_set_piomode,
 	.set_dmamode	= cs5535_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -206,9 +205,8 @@ static struct ata_port_operations cs5535_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -225,10 +223,10 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &cs5535_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x1f,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &cs5535_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
new file mode 100644
index 0000000..d753e56
--- /dev/null
+++ b/drivers/ata/pata_cs5536.c
@@ -0,0 +1,344 @@
+/*
+ * pata_cs5536.c	- CS5536 PATA for new ATA layer
+ *			  (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307	 USA
+ *
+ * Documentation:
+ *	Available from AMD web site.
+ *
+ * The IDE timing registers for the CS5536 live in the Geode Machine
+ * Specific Register file and not PCI config space.  Most BIOSes
+ * virtualize the PCI registers so the chip looks like a standard IDE
+ * controller.	Unfortunately not all implementations get this right.
+ * In particular some have problems with unaligned accesses to the
+ * virtualized PCI registers.  This driver always does full dword
+ * writes to work around the issue.  Also, in case of a bad BIOS this
+ * driver can be loaded with the "msr=1" parameter which forces using
+ * the Machine Specific Registers to configure the device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+#include <asm/msr.h>
+
+#define DRV_NAME	"pata_cs5536"
+#define DRV_VERSION	"0.0.6"
+
+enum {
+	CFG			= 0,
+	DTC			= 1,
+	CAST			= 2,
+	ETC			= 3,
+
+	MSR_IDE_BASE		= 0x51300000,
+	MSR_IDE_CFG		= (MSR_IDE_BASE + 0x10),
+	MSR_IDE_DTC		= (MSR_IDE_BASE + 0x12),
+	MSR_IDE_CAST		= (MSR_IDE_BASE + 0x13),
+	MSR_IDE_ETC		= (MSR_IDE_BASE + 0x14),
+
+	PCI_IDE_CFG		= 0x40,
+	PCI_IDE_DTC		= 0x48,
+	PCI_IDE_CAST		= 0x4c,
+	PCI_IDE_ETC		= 0x50,
+
+	IDE_CFG_CHANEN		= 0x2,
+	IDE_CFG_CABLE		= 0x10000,
+
+	IDE_D0_SHIFT		= 24,
+	IDE_D1_SHIFT		= 16,
+	IDE_DRV_MASK		= 0xff,
+
+	IDE_CAST_D0_SHIFT	= 6,
+	IDE_CAST_D1_SHIFT	= 4,
+	IDE_CAST_DRV_MASK	= 0x3,
+	IDE_CAST_CMD_MASK	= 0xff,
+	IDE_CAST_CMD_SHIFT	= 24,
+
+	IDE_ETC_NODMA		= 0x03,
+};
+
+static int use_msr;
+
+static const u32 msr_reg[4] = {
+	MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
+};
+
+static const u8 pci_reg[4] = {
+	PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+};
+
+static inline int cs5536_read(struct pci_dev *pdev, int reg, int *val)
+{
+	if (unlikely(use_msr)) {
+		u32 dummy;
+
+		rdmsr(msr_reg[reg], *val, dummy);
+		return 0;
+	}
+
+	return pci_read_config_dword(pdev, pci_reg[reg], val);
+}
+
+static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+{
+	if (unlikely(use_msr)) {
+		wrmsr(msr_reg[reg], val, 0);
+		return 0;
+	}
+
+	return pci_write_config_dword(pdev, pci_reg[reg], val);
+}
+
+/**
+ *	cs5536_cable_detect	-	detect cable type
+ *	@ap: Port to detect on
+ *	@deadline: deadline jiffies for the operation
+ *
+ *	Perform cable detection for ATA66 capable cable. Return a libata
+ *	cable type.
+ */
+
+static int cs5536_cable_detect(struct ata_port *ap)
+{
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 cfg;
+
+	cs5536_read(pdev, CFG, &cfg);
+
+	if (cfg & (IDE_CFG_CABLE << ap->port_no))
+		return ATA_CBL_PATA80;
+	else
+		return ATA_CBL_PATA40;
+}
+
+/**
+ *	cs5536_set_piomode		-	PIO setup
+ *	@ap: ATA interface
+ *	@adev: device on the interface
+ */
+
+static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 drv_timings[5] = {
+		0x98, 0x55, 0x32, 0x21, 0x20,
+	};
+
+	static const u8 addr_timings[5] = {
+		0x2, 0x1, 0x0, 0x0, 0x0,
+	};
+
+	static const u8 cmd_timings[5] = {
+		0x99, 0x92, 0x90, 0x22, 0x20,
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *pair = ata_dev_pair(adev);
+	int mode = adev->pio_mode - XFER_PIO_0;
+	int cmdmode = mode;
+	int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+	int cshift = ap->port_no ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
+	u32 dtc, cast, etc;
+
+	if (pair)
+		cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
+
+	cs5536_read(pdev, DTC, &dtc);
+	cs5536_read(pdev, CAST, &cast);
+	cs5536_read(pdev, ETC, &etc);
+
+	dtc &= ~(IDE_DRV_MASK << dshift);
+	dtc |= drv_timings[mode] << dshift;
+
+	cast &= ~(IDE_CAST_DRV_MASK << cshift);
+	cast |= addr_timings[mode] << cshift;
+
+	cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
+	cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
+
+	etc &= ~(IDE_DRV_MASK << dshift);
+	etc |= IDE_ETC_NODMA << dshift;
+
+	cs5536_write(pdev, DTC, dtc);
+	cs5536_write(pdev, CAST, cast);
+	cs5536_write(pdev, ETC, etc);
+}
+
+/**
+ *	cs5536_set_dmamode		-	DMA timing setup
+ *	@ap: ATA interface
+ *	@adev: Device being configured
+ *
+ */
+
+static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+	static const u8 udma_timings[6] = {
+		0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
+	};
+
+	static const u8 mwdma_timings[3] = {
+		0x67, 0x21, 0x20,
+	};
+
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 dtc, etc;
+	int mode = adev->dma_mode;
+	int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+
+	if (mode >= XFER_UDMA_0) {
+		cs5536_read(pdev, ETC, &etc);
+
+		etc &= ~(IDE_DRV_MASK << dshift);
+		etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
+
+		cs5536_write(pdev, ETC, etc);
+	} else { /* MWDMA */
+		cs5536_read(pdev, DTC, &dtc);
+
+		dtc &= ~(IDE_DRV_MASK << dshift);
+		dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
+
+		cs5536_write(pdev, DTC, dtc);
+	}
+}
+
+static struct scsi_host_template cs5536_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+static struct ata_port_operations cs5536_port_ops = {
+	.set_piomode		= cs5536_set_piomode,
+	.set_dmamode		= cs5536_set_dmamode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= cs5536_cable_detect,
+
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	.port_start		= ata_port_start,
+};
+
+/**
+ *	cs5536_init_one
+ *	@dev: PCI device
+ *	@id: Entry in match table
+ *
+ */
+
+static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	static const struct ata_port_info info = {
+		.sht = &cs5536_sht,
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
+		.udma_mask = ATA_UDMA5,
+		.port_ops = &cs5536_port_ops,
+	};
+
+	const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+	u32 cfg;
+
+	if (use_msr)
+		printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
+
+	cs5536_read(dev, CFG, &cfg);
+
+	if ((cfg & IDE_CFG_CHANEN) == 0) {
+		printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
+		return -ENODEV;
+	}
+
+	return ata_pci_init_one(dev, ppi);
+}
+
+static const struct pci_device_id cs5536[] = {
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE), },
+	{ },
+};
+
+static struct pci_driver cs5536_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= cs5536,
+	.probe		= cs5536_init_one,
+	.remove		= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend	= ata_pci_device_suspend,
+	.resume		= ata_pci_device_resume,
+#endif
+};
+
+static int __init cs5536_init(void)
+{
+	return pci_register_driver(&cs5536_pci_driver);
+}
+
+static void __exit cs5536_exit(void)
+{
+	pci_unregister_driver(&cs5536_pci_driver);
+}
+
+MODULE_AUTHOR("Martin K. Petersen");
+MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5536);
+MODULE_VERSION(DRV_VERSION);
+module_param_named(msr, use_msr, int, 0644);
+MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
+
+module_init(cs5536_init);
+module_exit(cs5536_exit);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index d41a769..fc5f9c4 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -128,7 +128,6 @@ static struct scsi_host_template cy82c693_sht = {
 };
 
 static struct ata_port_operations cy82c693_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= cy82c693_set_piomode,
 	.set_dmamode	= cy82c693_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -158,16 +157,15 @@ static struct ata_port_operations cy82c693_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &cy82c693_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &cy82c693_port_ops
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 079248a..043dcd3 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -26,25 +26,26 @@
 
 /**
  *	efar_pre_reset	-	Enable bits
- *	@ap: Port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Perform cable detection for the EFAR ATA interface. This is
  *	different to the PIIX arrangement
  */
 
-static int efar_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits efar_enable_bits[] = {
 		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
 		{ 0x43U, 1U, 0x80UL, 0x80UL },	/* port 1 */
 	};
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -250,7 +251,6 @@ static struct scsi_host_template efar_sht = {
 };
 
 static const struct ata_port_operations efar_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= efar_set_piomode,
 	.set_dmamode		= efar_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -278,9 +278,8 @@ static const struct ata_port_operations efar_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -303,7 +302,7 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht		= &efar_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
 		.udma_mask 	= 0x0f, /* UDMA 66 */
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 0c9cb60..0713872 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -312,7 +312,6 @@ static struct scsi_host_template hpt36x_sht = {
  */
 
 static struct ata_port_operations hpt366_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt366_set_piomode,
 	.set_dmamode	= hpt366_set_dmamode,
 	.mode_filter	= hpt366_filter,
@@ -342,9 +341,8 @@ static struct ata_port_operations hpt366_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -393,10 +391,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_hpt366 = {
 		.sht = &hpt36x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x1f,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &hpt366_port_ops
 	};
 	struct ata_port_info info = info_hpt366;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 6446735..46dc70e 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -8,12 +8,10 @@
  * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
  * Portions Copyright (C) 2003		Red Hat Inc
- * Portions Copyright (C) 2005-2006	MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2007	MontaVista Software, Inc.
  *
  * TODO
- *	PLL mode
- *	Look into engine reset on timeout errors. Should not be
- *		required.
+ *	Look into engine reset on timeout errors. Should not be	required.
  */
 
 #include <linux/kernel.h>
@@ -26,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt37x"
-#define DRV_VERSION	"0.6.6"
+#define DRV_VERSION	"0.6.9"
 
 struct hpt_clock {
 	u8	xfer_speed;
@@ -297,7 +295,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
 
 static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
 {
-	if (adev->class != ATA_DEV_ATA) {
+	if (adev->class == ATA_DEV_ATA) {
 		if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
 			mask &= ~ (0x1F << ATA_SHIFT_UDMA);
 	}
@@ -306,15 +304,16 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
 
 /**
  *	hpt37x_pre_reset	-	reset the hpt37x bus
- *	@ap: ATA port to reset
+ *	@link: ATA link to reset
  *	@deadline: deadline jiffies for the operation
  *
  *	Perform the initial reset handling for the 370/372 and 374 func 0
  */
 
-static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	u8 scr2, ata66;
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits hpt37x_enable_bits[] = {
 		{ 0x50, 1, 0x04, 0x04 },
@@ -330,7 +329,7 @@ static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline)
 	/* Restore state */
 	pci_write_config_byte(pdev, 0x5B, scr2);
 
-	if (ata66 & (1 << ap->port_no))
+	if (ata66 & (2 >> ap->port_no))
 		ap->cbl = ATA_CBL_PATA40;
 	else
 		ap->cbl = ATA_CBL_PATA80;
@@ -339,7 +338,7 @@ static int hpt37x_pre_reset(struct ata_port *ap, unsigned long deadline)
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -354,33 +353,31 @@ static void hpt37x_error_handler(struct ata_port *ap)
 	ata_bmdma_drive_eh(ap, hpt37x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
 }
 
-static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int hpt374_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits hpt37x_enable_bits[] = {
 		{ 0x50, 1, 0x04, 0x04 },
 		{ 0x54, 1, 0x04, 0x04 }
 	};
-	u16 mcr3, mcr6;
+	u16 mcr3;
 	u8 ata66;
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	unsigned int mcrbase = 0x50 + 4 * ap->port_no;
 
 	if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
 		return -ENOENT;
 
 	/* Do the extra channel work */
-	pci_read_config_word(pdev, 0x52, &mcr3);
-	pci_read_config_word(pdev, 0x56, &mcr6);
+	pci_read_config_word(pdev, mcrbase + 2, &mcr3);
 	/* Set bit 15 of 0x52 to enable TCBLID as input
-	   Set bit 15 of 0x56 to enable FCBLID as input
 	 */
-	pci_write_config_word(pdev, 0x52, mcr3 | 0x8000);
-	pci_write_config_word(pdev, 0x56, mcr6 | 0x8000);
+	pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000);
 	pci_read_config_byte(pdev, 0x5A, &ata66);
 	/* Reset TCBLID/FCBLID to output */
 	pci_write_config_word(pdev, 0x52, mcr3);
-	pci_write_config_word(pdev, 0x56, mcr6);
 
-	if (ata66 & (1 << ap->port_no))
+	if (ata66 & (2 >> ap->port_no))
 		ap->cbl = ATA_CBL_PATA40;
 	else
 		ap->cbl = ATA_CBL_PATA80;
@@ -389,7 +386,7 @@ static int hpt374_pre_reset(struct ata_port *ap, unsigned long deadline)
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -644,7 +641,6 @@ static struct scsi_host_template hpt37x_sht = {
  */
 
 static struct ata_port_operations hpt370_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt370_set_piomode,
 	.set_dmamode	= hpt370_set_dmamode,
 	.mode_filter	= hpt370_filter,
@@ -673,9 +669,8 @@ static struct ata_port_operations hpt370_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -683,7 +678,6 @@ static struct ata_port_operations hpt370_port_ops = {
  */
 
 static struct ata_port_operations hpt370a_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt370_set_piomode,
 	.set_dmamode	= hpt370_set_dmamode,
 	.mode_filter	= hpt370a_filter,
@@ -712,9 +706,8 @@ static struct ata_port_operations hpt370a_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -723,7 +716,6 @@ static struct ata_port_operations hpt370a_port_ops = {
  */
 
 static struct ata_port_operations hpt372_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt372_set_piomode,
 	.set_dmamode	= hpt372_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -752,9 +744,8 @@ static struct ata_port_operations hpt372_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /*
@@ -763,7 +754,6 @@ static struct ata_port_operations hpt372_port_ops = {
  */
 
 static struct ata_port_operations hpt374_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt372_set_piomode,
 	.set_dmamode	= hpt372_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -792,9 +782,8 @@ static struct ata_port_operations hpt374_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -852,6 +841,25 @@ static int hpt37x_calibrate_dpll(struct pci_dev *dev)
 	/* Never went stable */
 	return 0;
 }
+
+static u32 hpt374_read_freq(struct pci_dev *pdev)
+{
+	u32 freq;
+	unsigned long io_base = pci_resource_start(pdev, 4);
+	if (PCI_FUNC(pdev->devfn) & 1) {
+		struct pci_dev *pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
+		/* Someone hot plugged the controller on us ? */
+		if (pdev_0 == NULL)
+			return 0;
+		io_base = pci_resource_start(pdev_0, 4);
+		freq = inl(io_base + 0x90);
+		pci_dev_put(pdev_0);
+	}
+	else
+		freq = inl(io_base + 0x90);
+	return freq;
+}
+
 /**
  *	hpt37x_init_one		-	Initialise an HPT37X/302
  *	@dev: PCI device
@@ -889,55 +897,55 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 	/* HPT370 - UDMA100 */
 	static const struct ata_port_info info_hpt370 = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt370_port_ops
 	};
 	/* HPT370A - UDMA100 */
 	static const struct ata_port_info info_hpt370a = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt370a_port_ops
 	};
 	/* HPT370 - UDMA100 */
 	static const struct ata_port_info info_hpt370_33 = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x0f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt370_port_ops
 	};
 	/* HPT370A - UDMA100 */
 	static const struct ata_port_info info_hpt370a_33 = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x0f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt370a_port_ops
 	};
 	/* HPT371, 372 and friends - UDMA133 */
 	static const struct ata_port_info info_hpt372 = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt372_port_ops
 	};
-	/* HPT374 - UDMA133 */
+	/* HPT374 - UDMA100 */
 	static const struct ata_port_info info_hpt374 = {
 		.sht = &hpt37x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt374_port_ops
 	};
 
@@ -1055,9 +1063,16 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 		outb(0x0e, iobase + 0x9c);
 
 	/* Some devices do not let this value be accessed via PCI space
-	   according to the old driver */
+	   according to the old driver. In addition we must use the value
+	   from FN 0 on the HPT374 */
+
+	if (chip_table == &hpt374) {
+		freq = hpt374_read_freq(dev);
+		if (freq == 0)
+			return -ENODEV;
+	} else
+		freq = inl(iobase + 0x90);
 
-	freq = inl(iobase + 0x90);
 	if ((freq >> 12) != 0xABCDE) {
 		int i;
 		u8 sr;
@@ -1092,9 +1107,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 		int dpll, adjust;
 
 		/* Compute DPLL */
-		dpll = 2;
-		if (port->udma_mask & 0xE0)
-			dpll = 3;
+		dpll = (port->udma_mask & 0xC0) ? 3 : 2;
 
 		f_low = (MHz[clock_slot] * 48) / MHz[dpll];
 		f_high = f_low + 2;
@@ -1103,20 +1116,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 
 		/* Select the DPLL clock. */
 		pci_write_config_byte(dev, 0x5b, 0x21);
-		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
 
 		for(adjust = 0; adjust < 8; adjust++) {
 			if (hpt37x_calibrate_dpll(dev))
 				break;
 			/* See if it'll settle at a fractionally different clock */
-			if ((adjust & 3) == 3) {
-				f_low --;
-				f_high ++;
-			}
-			pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+			if (adjust & 1)
+				f_low -= adjust >> 1;
+			else
+				f_high += adjust >> 1;
+			pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
 		}
 		if (adjust == 8) {
-			printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n");
+			printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
 			return -ENODEV;
 		}
 		if (dpll == 3)
@@ -1124,7 +1137,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 		else
 			private_data = (void *)hpt37x_timings_50;
 
-		printk(KERN_INFO "hpt37x: Bus clock %dMHz, using DPLL.\n", MHz[dpll]);
+		printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
+		       MHz[clock_slot], MHz[dpll]);
 	} else {
 		private_data = (void *)chip_table->clocks[clock_slot];
 		/*
@@ -1137,7 +1151,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 			port = &info_hpt370_33;
 		if (clock_slot < 2 && port == &info_hpt370a)
 			port = &info_hpt370a_33;
-		printk(KERN_INFO "hpt37x: %s: Bus clock %dMHz.\n", chip_table->name, MHz[clock_slot]);
+		printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
+		       chip_table->name, MHz[clock_slot]);
 	}
 
 	/* Now kick off ATA set up */
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index e947433..9f1c084 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -8,7 +8,7 @@
  * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
  * Portions Copyright (C) 2003		Red Hat Inc
- * Portions Copyright (C) 2005-2006	MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2007	MontaVista Software, Inc.
  *
  *
  * TODO
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x2n"
-#define DRV_VERSION	"0.3.3"
+#define DRV_VERSION	"0.3.4"
 
 enum {
 	HPT_PCI_FAST	=	(1 << 31),
@@ -141,21 +141,22 @@ static int hpt3x2n_cable_detect(struct ata_port *ap)
 
 /**
  *	hpt3x2n_pre_reset	-	reset the hpt3x2n bus
- *	@ap: ATA port to reset
+ *	@link: ATA link to reset
  *	@deadline: deadline jiffies for the operation
  *
  *	Perform the initial reset handling for the 3x2n series controllers.
  *	Reset the hardware and state machine,
  */
 
-static int hpt3xn_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int hpt3xn_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	/* Reset the state machine */
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -360,7 +361,6 @@ static struct scsi_host_template hpt3x2n_sht = {
  */
 
 static struct ata_port_operations hpt3x2n_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt3x2n_set_piomode,
 	.set_dmamode	= hpt3x2n_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -390,9 +390,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -490,10 +489,10 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 	/* HPT372N and friends - UDMA133 */
 	static const struct ata_port_info info = {
 		.sht = &hpt3x2n_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt3x2n_port_ops
 	};
 	struct ata_port_info port = info;
@@ -579,10 +578,12 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
 	}
 	if (adjust == 8) {
-		printk(KERN_WARNING "hpt3x2n: DPLL did not stabilize.\n");
+		printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
 		return -ENODEV;
 	}
 
+	printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
+	       pci_mhz);
 	/* Set our private data up. We only need a few flags so we use
 	   it directly */
 	port.private_data = NULL;
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 8ce5e23..cb8bdb6 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x3"
-#define DRV_VERSION	"0.4.3"
+#define DRV_VERSION	"0.5.3"
 
 /**
  *	hpt3x3_set_piomode		-	PIO setup
@@ -52,6 +52,7 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
 	pci_write_config_dword(pdev, 0x48, r2);
 }
 
+#if defined(CONFIG_PATA_HPT3X3_DMA)
 /**
  *	hpt3x3_set_dmamode		-	DMA timing setup
  *	@ap: ATA interface
@@ -59,6 +60,9 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
  *
  *	Set up the channel for MWDMA or UDMA modes. Much the same as with
  *	PIO, load the mode number and then set MWDMA or UDMA flag.
+ *
+ *	0x44 : bit 0-2 master mode, 3-5 slave mode, etc
+ *	0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
  */
 
 static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
@@ -76,13 +80,26 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
 
 	if (adev->dma_mode >= XFER_UDMA_0)
-		r2 |= 0x01 << dn;	/* Ultra mode */
+		r2 |= (0x10 << dn);	/* Ultra mode */
 	else
-		r2 |= 0x10 << dn;	/* MWDMA */
+		r2 |= (0x01 << dn);	/* MWDMA */
 
 	pci_write_config_dword(pdev, 0x44, r1);
 	pci_write_config_dword(pdev, 0x48, r2);
 }
+#endif /* CONFIG_PATA_HPT3X3_DMA */
+
+/**
+ *	hpt3x3_atapi_dma	-	ATAPI DMA check
+ *	@qc: Queued command
+ *
+ *	Just say no - we don't do ATAPI DMA
+ */
+
+static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 1;
+}
 
 static struct scsi_host_template hpt3x3_sht = {
 	.module			= THIS_MODULE,
@@ -103,9 +120,10 @@ static struct scsi_host_template hpt3x3_sht = {
 };
 
 static struct ata_port_operations hpt3x3_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= hpt3x3_set_piomode,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
 	.set_dmamode	= hpt3x3_set_dmamode,
+#endif
 	.mode_filter	= ata_pci_default_filter,
 
 	.tf_load	= ata_tf_load,
@@ -124,6 +142,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
 	.bmdma_start 	= ata_bmdma_start,
 	.bmdma_stop	= ata_bmdma_stop,
 	.bmdma_status 	= ata_bmdma_status,
+	.check_atapi_dma= hpt3x3_atapi_dma,
 
 	.qc_prep 	= ata_qc_prep,
 	.qc_issue	= ata_qc_issue_prot,
@@ -133,9 +152,8 @@ static struct ata_port_operations hpt3x3_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -158,32 +176,83 @@ static void hpt3x3_init_chipset(struct pci_dev *dev)
 		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
 }
 
-
 /**
  *	hpt3x3_init_one		-	Initialise an HPT343/363
- *	@dev: PCI device
+ *	@pdev: PCI device
  *	@id: Entry in match table
  *
- *	Perform basic initialisation. The chip has a quirk that it won't
- *	function unless it is at XX00. The old ATA driver touched this up
- *	but we leave it for pci quirks to do properly.
+ *	Perform basic initialisation. We set the device up so we access all
+ *	ports via BAR4. This is neccessary to work around errata.
  */
 
-static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht = &hpt3x3_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+		/* Further debug needed */
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x07,
+#endif
 		.port_ops = &hpt3x3_port_ops
 	};
+	/* Register offsets of taskfiles in BAR4 area */
+	static const u8 offset_cmd[2] = { 0x20, 0x28 };
+	static const u8 offset_ctl[2] = { 0x36, 0x3E };
 	const struct ata_port_info *ppi[] = { &info, NULL };
-
-	hpt3x3_init_chipset(dev);
-	/* Now kick off ATA set up */
-	return ata_pci_init_one(dev, ppi);
+	struct ata_host *host;
+	int i, rc;
+	void __iomem *base;
+
+	hpt3x3_init_chipset(pdev);
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+	/* acquire resources and fill host */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* Everything is relative to BAR4 if we set up this way */
+	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+	host->iomap = pcim_iomap_table(pdev);
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+
+	base = host->iomap[4];	/* Bus mastering base */
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
+
+		ioaddr->cmd_addr = base + offset_cmd[i];
+		ioaddr->altstatus_addr =
+		ioaddr->ctl_addr = base + offset_ctl[i];
+		ioaddr->scr_addr = NULL;
+		ata_std_ports(ioaddr);
+		ioaddr->bmdma_addr = base + 8 * i;
+
+		ata_port_pbar_desc(ap, 4, -1, "ioport");
+		ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
+	}
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &hpt3x3_sht);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index c791a46..842fe08 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -70,6 +70,8 @@ struct pata_icside_info {
 	unsigned int		mwdma_mask;
 	unsigned int		nr_ports;
 	const struct portinfo	*port[2];
+	unsigned long		raw_base;
+	unsigned long		raw_ioc_base;
 };
 
 #define ICS_TYPE_A3IN	0
@@ -330,17 +332,13 @@ static void ata_dummy_noret(struct ata_port *port)
 {
 }
 
-/*
- * We need to shut down unused ports to prevent spurious interrupts.
- * FIXME: the libata core doesn't call this function for PATA interfaces.
- */
-static void pata_icside_port_disable(struct ata_port *ap)
+static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
 {
+	struct ata_port *ap = link->ap;
 	struct pata_icside_state *state = ap->host->private_data;
 
-	ata_port_printk(ap, KERN_ERR, "disabling icside port\n");
-
-	ata_port_disable(ap);
+	if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
+		return ata_std_postreset(link, classes);
 
 	state->port[ap->port_no].disabled = 1;
 
@@ -356,26 +354,13 @@ static void pata_icside_port_disable(struct ata_port *ap)
 	}
 }
 
-static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq)
+static void pata_icside_error_handler(struct ata_port *ap)
 {
-	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
-	u8 status;
-
-	status = ata_busy_wait(ap, bits, 1000);
-	if (status & bits)
-		if (ata_msg_err(ap))
-			printk(KERN_ERR "abnormal status 0x%X\n", status);
-
-	if (ata_msg_intr(ap))
-		printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n",
-			__FUNCTION__, status);
-
-	return status;
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
+			   pata_icside_postreset);
 }
 
 static struct ata_port_operations pata_icside_port_ops = {
-	.port_disable		= pata_icside_port_disable,
-
 	.set_dmamode		= pata_icside_set_dmamode,
 
 	.tf_load		= ata_tf_load,
@@ -397,12 +382,11 @@ static struct ata_port_operations pata_icside_port_ops = {
 
 	.freeze			= ata_bmdma_freeze,
 	.thaw			= ata_bmdma_thaw,
-	.error_handler		= ata_bmdma_error_handler,
+	.error_handler		= pata_icside_error_handler,
 	.post_internal_cmd	= pata_icside_bmdma_stop,
 
 	.irq_clear		= ata_dummy_noret,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= pata_icside_irq_ack,
 
 	.port_start		= pata_icside_port_start,
 
@@ -411,25 +395,34 @@ static struct ata_port_operations pata_icside_port_ops = {
 };
 
 static void __devinit
-pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base,
-			 const struct portinfo *info)
+pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
+			 struct pata_icside_info *info,
+			 const struct portinfo *port)
 {
-	void __iomem *cmd = base + info->dataoffset;
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	void __iomem *cmd = base + port->dataoffset;
 
 	ioaddr->cmd_addr	= cmd;
-	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << info->stepping);
-	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << info->stepping);
-	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << info->stepping);
-	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << info->stepping);
-	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << info->stepping);
-	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << info->stepping);
-	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << info->stepping);
-	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << info->stepping);
-	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << info->stepping);
-	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << info->stepping);
-
-	ioaddr->ctl_addr	= base + info->ctrloffset;
+	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << port->stepping);
+	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << port->stepping);
+	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << port->stepping);
+	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << port->stepping);
+	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << port->stepping);
+	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << port->stepping);
+	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << port->stepping);
+	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << port->stepping);
+	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << port->stepping);
+	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << port->stepping);
+
+	ioaddr->ctl_addr	= base + port->ctrloffset;
 	ioaddr->altstatus_addr	= ioaddr->ctl_addr;
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+		      info->raw_base + port->dataoffset,
+		      info->raw_base + port->ctrloffset);
+
+	if (info->raw_ioc_base)
+		ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
 }
 
 static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
@@ -450,6 +443,8 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
 	info->nr_ports = 1;
 	info->port[0] = &pata_icside_portinfo_v5;
 
+	info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
+
 	return 0;
 }
 
@@ -484,19 +479,15 @@ static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
 	state->port[0].port_sel = sel;
 	state->port[1].port_sel = sel | 1;
 
-	/*
-	 * FIXME: work around libata's aversion to calling port_disable.
-	 * This permanently disables interrupts on port 0 - bad luck if
-	 * you have a drive on that port.
-	 */
-	state->port[0].disabled = 1;
-
 	info->base = easi_base;
 	info->irqops = &pata_icside_ops_arcin_v6;
 	info->nr_ports = 2;
 	info->port[0] = &pata_icside_portinfo_v6_1;
 	info->port[1] = &pata_icside_portinfo_v6_2;
 
+	info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
+	info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
+
 	return icside_dma_init(info);
 }
 
@@ -530,10 +521,10 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
 
 		ap->pio_mask = 0x1f;
 		ap->mwdma_mask = info->mwdma_mask;
-		ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
+		ap->flags |= ATA_FLAG_SLAVE_POSS;
 		ap->ops = &pata_icside_port_ops;
 
-		pata_icside_setup_ioaddr(&ap->ioaddr, info->base, info->port[i]);
+		pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
 	}
 
 	return ata_host_activate(host, ec->irq, ata_interrupt, 0,
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 1f647b6..4320e79 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -17,7 +17,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_isapnp"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.2.2"
 
 static struct scsi_host_template isapnp_sht = {
 	.module			= THIS_MODULE,
@@ -38,7 +38,6 @@ static struct scsi_host_template isapnp_sht = {
 };
 
 static struct ata_port_operations isapnp_port_ops = {
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -58,9 +57,8 @@ static struct ata_port_operations isapnp_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -77,14 +75,16 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
 	struct ata_host *host;
 	struct ata_port *ap;
 	void __iomem *cmd_addr, *ctl_addr;
-	int rc;
+	int irq = 0;
+	irq_handler_t handler = NULL;
 
 	if (pnp_port_valid(idev, 0) == 0)
 		return -ENODEV;
 
-	/* FIXME: Should selected polled PIO here not fail */
-	if (pnp_irq_valid(idev, 0) == 0)
-		return -ENODEV;
+	if (pnp_irq_valid(idev, 0)) {
+		irq = pnp_irq(idev, 0);
+		handler = ata_interrupt;
+	}
 
 	/* allocate host */
 	host = ata_host_alloc(&idev->dev, 1);
@@ -113,8 +113,12 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
 
 	ata_std_ports(&ap->ioaddr);
 
+	ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+		      (unsigned long long)pnp_port_start(idev, 0),
+		      (unsigned long long)pnp_port_start(idev, 1));
+
 	/* activate */
-	return ata_host_activate(host, pnp_irq(idev, 0), ata_interrupt, 0,
+	return ata_host_activate(host, irq, handler, 0,
 				 &isapnp_sht);
 }
 
@@ -140,6 +144,8 @@ static struct pnp_device_id isapnp_devices[] = {
 	{.id = ""}
 };
 
+MODULE_DEVICE_TABLE(pnp, isapnp_devices);
+
 static struct pnp_driver isapnp_driver = {
 	.name		= DRV_NAME,
 	.id_table	= isapnp_devices,
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 95b0bb6..1eda821 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -23,23 +23,24 @@
 
 /**
  *	it8213_pre_reset	-	check for 40/80 pin
- *	@ap: Port
+ *	@link: link
  *	@deadline: deadline jiffies for the operation
  *
  *	Filter out ports by the enable bits before doing the normal reset
  *	and probe.
  */
 
-static int it8213_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int it8213_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits it8213_enable_bits[] = {
 		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
 	};
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -260,7 +261,6 @@ static struct scsi_host_template it8213_sht = {
 };
 
 static const struct ata_port_operations it8213_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= it8213_set_piomode,
 	.set_dmamode		= it8213_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -288,9 +288,8 @@ static const struct ata_port_operations it8213_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -313,10 +312,10 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht		= &it8213_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask 	= 0x1f, /* UDMA 100 */
+		.udma_mask 	= ATA_UDMA4, /* FIXME: want UDMA 100? */
 		.port_ops	= &it8213_ops,
 	};
 	/* Current IT8213 stuff is single port */
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index b3456d7..d8c4f4e 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -2,6 +2,7 @@
  * pata_it821x.c 	- IT821x PATA for new ATA layer
  *			  (C) 2005 Red Hat Inc
  *			  Alan Cox <alan@redhat.com>
+ *			  (C) 2007 Bartlomiej Zolnierkiewicz
  *
  * based upon
  *
@@ -79,7 +80,7 @@
 
 
 #define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.6"
+#define DRV_VERSION "0.3.8"
 
 struct it821x_dev
 {
@@ -104,7 +105,7 @@ struct it821x_dev
 
 /*
  *	We allow users to force the card into non raid mode without
- *	flashing the alternative BIOS. This is also neccessary right now
+ *	flashing the alternative BIOS. This is also necessary right now
  *	for embedded platforms that cannot run a PC BIOS but are using this
  *	device.
  */
@@ -382,7 +383,7 @@ static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
  *	@ap: ATA port
  *	@device: Device number (not pointer)
  *
- *	Device selection hook. If neccessary perform clock switching
+ *	Device selection hook. If necessary perform clock switching
  */
 
 static void it821x_passthru_dev_select(struct ata_port *ap,
@@ -390,7 +391,7 @@ static void it821x_passthru_dev_select(struct ata_port *ap,
 {
 	struct it821x_dev *itdev = ap->private_data;
 	if (itdev && device != itdev->last_device) {
-		struct ata_device *adev = &ap->device[device];
+		struct ata_device *adev = &ap->link.device[device];
 		it821x_program(ap, adev, itdev->pio[adev->devno]);
 		itdev->last_device = device;
 	}
@@ -449,7 +450,7 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
 
 /**
  *	it821x_smart_set_mode	-	mode setting
- *	@ap: interface to set up
+ *	@link: interface to set up
  *	@unused: device that failed (error only)
  *
  *	Use a non standard set_mode function. We don't want to be tuned.
@@ -458,25 +459,18 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
  *	and respect them.
  */
 
-static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused)
+static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused)
 {
-	int dma_enabled = 0;
-	int i;
+	struct ata_device *dev;
 
-	/* Bits 5 and 6 indicate if DMA is active on master/slave */
-	/* It is possible that BMDMA isn't allocated */
-	if (ap->ioaddr.bmdma_addr)
-		dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			/* We don't really care */
 			dev->pio_mode = XFER_PIO_0;
 			dev->dma_mode = XFER_MW_DMA_0;
 			/* We do need the right mode information for DMA or PIO
 			   and this comes from the current configuration flags */
-			if (dma_enabled & (1 << (5 + i))) {
+			if (ata_id_has_dma(dev->id)) {
 				ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
 				dev->xfer_mode = XFER_MW_DMA_0;
 				dev->xfer_shift = ATA_SHIFT_MWDMA;
@@ -538,6 +532,10 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
 	struct ata_port *ap = qc->ap;
 	struct it821x_dev *itdev = ap->private_data;
 
+	/* Only use dma for transfers to/from the media. */
+	if (qc->nbytes < 2048)
+		return -EOPNOTSUPP;
+
 	/* No ATAPI DMA in smart mode */
 	if (itdev->smart)
 		return -EOPNOTSUPP;
@@ -564,8 +562,9 @@ static int it821x_port_start(struct ata_port *ap)
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	struct it821x_dev *itdev;
 	u8 conf;
+	u8 pdev_revision;
 
-	int ret = ata_port_start(ap);
+	int ret = ata_sff_port_start(ap);
 	if (ret < 0)
 		return ret;
 
@@ -592,8 +591,8 @@ static int it821x_port_start(struct ata_port *ap)
 	itdev->want[1][1] = ATA_ANY;
 	itdev->last_device = -1;
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
-	if (conf == 0x10) {
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
+	if (pdev_revision == 0x10) {
 		itdev->timing10 = 1;
 		/* Need to disable ATAPI DMA for this case */
 		if (!itdev->smart)
@@ -623,7 +622,6 @@ static struct scsi_host_template it821x_sht = {
 
 static struct ata_port_operations it821x_smart_port_ops = {
 	.set_mode	= it821x_smart_set_mode,
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.mode_filter	= ata_pci_default_filter,
@@ -653,13 +651,11 @@ static struct ata_port_operations it821x_smart_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= it821x_port_start,
 };
 
 static struct ata_port_operations it821x_passthru_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= it821x_passthru_set_piomode,
 	.set_dmamode	= it821x_passthru_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -690,12 +686,11 @@ static struct ata_port_operations it821x_passthru_port_ops = {
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_handler	= ata_interrupt,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= it821x_port_start,
 };
 
-static void __devinit it821x_disable_raid(struct pci_dev *pdev)
+static void it821x_disable_raid(struct pci_dev *pdev)
 {
 	/* Reset local CPU, and set BIOS not ready */
 	pci_write_config_byte(pdev, 0x5E, 0x01);
@@ -719,17 +714,17 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	static const struct ata_port_info info_smart = {
 		.sht = &it821x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &it821x_smart_port_ops
 	};
 	static const struct ata_port_info info_passthru = {
 		.sht = &it821x_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA6,
 		.port_ops = &it821x_passthru_port_ops
 	};
 
@@ -799,7 +794,7 @@ MODULE_VERSION(DRV_VERSION);
 
 
 module_param_named(noraid, it8212_noraid, int, S_IRUGO);
-MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
+MODULE_PARM_DESC(noraid, "Force card into bypass mode");
 
 module_init(it821x_init);
 module_exit(it821x_exit);
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 8d2bc1e..fcd532a 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -1,13 +1,14 @@
 /*
  * ixp4xx PATA/Compact Flash driver
- * Copyright (c) 2006 Tower Technologies
+ * Copyright (C) 2006-07 Tower Technologies
  * Author: Alessandro Zummo <a.zummo@towertech.it>
  *
  * An ATA driver to handle a Compact Flash connected
  * to the ixp4xx expansion bus in TrueIDE mode. The CF
  * must have it chip selects connected to two CS lines
- * on the ixp4xx. The interrupt line is optional, if not
- * specified the driver will run in polling mode.
+ * on the ixp4xx. In the irq is not available, you might
+ * want to modify both this driver and libata to run in
+ * polling mode.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -23,14 +24,13 @@
 #include <scsi/scsi_host.h>
 
 #define DRV_NAME	"pata_ixp4xx_cf"
-#define DRV_VERSION	"0.1.3"
+#define DRV_VERSION	"0.2"
 
-static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error)
+static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
 {
-	int i;
+	struct ata_device *dev;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
 			dev->pio_mode = XFER_PIO_0;
@@ -42,21 +42,14 @@ static int ixp4xx_set_mode(struct ata_port *ap, struct ata_device **error)
 	return 0;
 }
 
-static void ixp4xx_phy_reset(struct ata_port *ap)
-{
-	ap->cbl = ATA_CBL_PATA40;
-	ata_port_probe(ap);
-	ata_bus_reset(ap);
-}
-
 static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
 				unsigned int buflen, int write_data)
 {
 	unsigned int i;
 	unsigned int words = buflen >> 1;
 	u16 *buf16 = (u16 *) buf;
-	struct ata_port *ap = adev->ap;
-	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+	struct ata_port *ap = adev->link->ap;
+	void __iomem *mmio = ap->ioaddr.data_addr;
 	struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
 
 	/* set the expansion bus in 16bit mode and restore
@@ -92,10 +85,6 @@ static void ixp4xx_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
 	*data->cs0_cfg |= 0x01;
 }
 
-static void ixp4xx_irq_clear(struct ata_port *ap)
-{
-}
-
 static struct scsi_host_template ixp4xx_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
@@ -115,34 +104,39 @@ static struct scsi_host_template ixp4xx_sht = {
 };
 
 static struct ata_port_operations ixp4xx_port_ops = {
-	.set_mode	= ixp4xx_set_mode,
-	.mode_filter	= ata_pci_default_filter,
-
-	.port_disable	= ata_port_disable,
-	.tf_load	= ata_tf_load,
-	.tf_read	= ata_tf_read,
-	.check_status 	= ata_check_status,
-	.exec_command	= ata_exec_command,
-	.dev_select 	= ata_std_dev_select,
-
-	.qc_prep 	= ata_qc_prep,
-	.qc_issue	= ata_qc_issue_prot,
-	.eng_timeout	= ata_eng_timeout,
-	.data_xfer	= ixp4xx_mmio_data_xfer,
-	.cable_detect	= ata_cable_40wire,
-
-	.irq_clear	= ixp4xx_irq_clear,
-	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
-
-	.port_start	= ata_port_start,
-
-	.phy_reset	= ixp4xx_phy_reset,
+	.set_mode		= ixp4xx_set_mode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.exec_command		= ata_exec_command,
+	.check_status 		= ata_check_status,
+	.dev_select 		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+
+	.qc_prep 		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ixp4xx_mmio_data_xfer,
+	.cable_detect		= ata_cable_40wire,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	.port_start		= ata_port_start,
 };
 
 static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
-				struct ixp4xx_pata_data *data)
+			      struct ixp4xx_pata_data *data,
+			      unsigned long raw_cs0, unsigned long raw_cs1)
 {
+	unsigned long raw_cmd = raw_cs0;
+	unsigned long raw_ctl = raw_cs1 + 0x06;
+
 	ioaddr->cmd_addr	= data->cs0;
 	ioaddr->altstatus_addr	= data->cs1 + 0x06;
 	ioaddr->ctl_addr	= data->cs1 + 0x06;
@@ -168,7 +162,12 @@ static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
 	*(unsigned long *)&ioaddr->device_addr		^= 0x03;
 	*(unsigned long *)&ioaddr->status_addr		^= 0x03;
 	*(unsigned long *)&ioaddr->command_addr		^= 0x03;
+
+	raw_cmd ^= 0x03;
+	raw_ctl ^= 0x03;
 #endif
+
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
 }
 
 static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
@@ -178,7 +177,6 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
 	struct ata_host *host;
 	struct ata_port *ap;
 	struct ixp4xx_pata_data *data = pdev->dev.platform_data;
-	int rc;
 
 	cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -197,6 +195,9 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
 	data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
 	data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
 
+	if (!data->cs0 || !data->cs1)
+		return -ENOMEM;
+
 	irq = platform_get_irq(pdev, 0);
 	if (irq)
 		set_irq_type(irq, IRQT_RISING);
@@ -211,11 +212,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
 	ap->pio_mask = 0x1f; /* PIO4 */
 	ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
 
-	/* run in polling mode if no irq has been assigned */
-	if (!irq)
-		ap->flags |= ATA_FLAG_PIO_POLLING;
-
-	ixp4xx_setup_port(&ap->ioaddr, data);
+	ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
 
 	dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
 
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 2af7ff8..5b8174d 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -29,7 +29,7 @@ typedef enum {
 
 /**
  *	jmicron_pre_reset	-	check for 40/80 pin
- *	@ap: Port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Perform the PATA port setup we need.
@@ -39,9 +39,9 @@ typedef enum {
  *	and setup here. We assume that has been done by init_one and the
  *	BIOS.
  */
-
-static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u32 control;
 	u32 control5;
@@ -80,11 +80,10 @@ static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
 	 *	actually do our cable checking etc. Thankfully we don't need
 	 *	to do the plumbing for other cases.
 	 */
-	switch (port_map[port])
-	{
+	switch (port_map[port]) {
 	case PORT_PATA0:
-		if (control & (1 << 5))
-			return 0;
+		if ((control & (1 << 5)) == 0)
+			return -ENOENT;
 		if (control & (1 << 3))	/* 40/80 pin primary */
 			ap->cbl = ATA_CBL_PATA40;
 		else
@@ -93,7 +92,7 @@ static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
 	case PORT_PATA1:
 		/* Bit 21 is set if the port is enabled */
 		if ((control5 & (1 << 21)) == 0)
-			return 0;
+			return -ENOENT;
 		if (control5 & (1 << 19))	/* 40/80 pin secondary */
 			ap->cbl = ATA_CBL_PATA40;
 		else
@@ -103,7 +102,7 @@ static int jmicron_pre_reset(struct ata_port *ap, unsigned long deadline)
 		ap->cbl = ATA_CBL_SATA;
 		break;
 	}
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -141,8 +140,6 @@ static struct scsi_host_template jmicron_sht = {
 };
 
 static const struct ata_port_operations jmicron_ops = {
-	.port_disable		= ata_port_disable,
-
 	/* Task file is PCI ATA format, use helpers */
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
@@ -168,7 +165,6 @@ static const struct ata_port_operations jmicron_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	/* Generic PATA PCI ATA helpers */
 	.port_start		= ata_port_start,
@@ -193,11 +189,11 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
 {
 	static const struct ata_port_info info = {
 		.sht		= &jmicron_sht,
-		.flags	= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags	= ATA_FLAG_SLAVE_POSS,
 
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask 	= 0x3f,
+		.udma_mask 	= ATA_UDMA5,
 
 		.port_ops	= &jmicron_ops,
 	};
@@ -207,17 +203,8 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
 }
 
 static const struct pci_device_id jmicron_pci_tbl[] = {
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 361 },
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 363 },
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 365 },
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 366 },
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 368 },
-
+	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
 	{ }	/* terminate list */
 };
 
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index edffc25..7bed8d8 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -96,7 +96,7 @@ static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
 
 /**
  *	legacy_set_mode		-	mode setting
- *	@ap: IDE interface
+ *	@link: IDE link
  *	@unused: Device that failed when error is returned
  *
  *	Use a non standard set_mode function. We don't want to be tuned.
@@ -107,12 +107,11 @@ static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
  *	expand on this as per hdparm in the base kernel.
  */
 
-static int legacy_set_mode(struct ata_port *ap, struct ata_device **unused)
+static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
 {
-	int i;
+	struct ata_device *dev;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
 			dev->pio_mode = XFER_PIO_0;
@@ -151,7 +150,6 @@ static struct scsi_host_template legacy_sht = {
  */
 
 static struct ata_port_operations simple_port_ops = {
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -172,7 +170,6 @@ static struct ata_port_operations simple_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -180,7 +177,6 @@ static struct ata_port_operations simple_port_ops = {
 static struct ata_port_operations legacy_port_ops = {
 	.set_mode	= legacy_set_mode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -201,7 +197,6 @@ static struct ata_port_operations legacy_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -256,7 +251,7 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
 
 static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
 {
-	struct ata_port *ap = adev->ap;
+	struct ata_port *ap = adev->link->ap;
 	int slop = buflen & 3;
 	unsigned long flags;
 
@@ -296,7 +291,6 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
 static struct ata_port_operations pdc20230_port_ops = {
 	.set_piomode	= pdc20230_set_piomode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -317,7 +311,6 @@ static struct ata_port_operations pdc20230_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -352,7 +345,6 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
 static struct ata_port_operations ht6560a_port_ops = {
 	.set_piomode	= ht6560a_set_piomode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -373,7 +365,6 @@ static struct ata_port_operations ht6560a_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -419,7 +410,6 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
 static struct ata_port_operations ht6560b_port_ops = {
 	.set_piomode	= ht6560b_set_piomode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -440,7 +430,6 @@ static struct ata_port_operations ht6560b_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -541,7 +530,6 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
 static struct ata_port_operations opti82c611a_port_ops = {
 	.set_piomode	= opti82c611a_set_piomode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -562,7 +550,6 @@ static struct ata_port_operations opti82c611a_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -675,7 +662,6 @@ static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
 static struct ata_port_operations opti82c46x_port_ops = {
 	.set_piomode	= opti82c46x_set_piomode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -696,7 +682,6 @@ static struct ata_port_operations opti82c46x_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_port_start,
 };
@@ -814,6 +799,8 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
 	ata_std_ports(&ap->ioaddr);
 	ap->private_data = ld;
 
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, ctrl);
+
 	ret = ata_host_activate(host, irq, ata_interrupt, 0, &legacy_sht);
 	if (ret)
 		goto fail;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index edbfe0d..9afc8a3 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -24,14 +24,15 @@
 
 /**
  *	marvell_pre_reset	-	check for 40/80 pin
- *	@ap: Port
+ *	@link: link
  *	@deadline: deadline jiffies for the operation
  *
  *	Perform the PATA port setup we need.
  */
 
-static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u32 devices;
 	void __iomem *barp;
@@ -44,17 +45,17 @@ static int marvell_pre_reset(struct ata_port *ap, unsigned long deadline)
 		return -ENOMEM;
 	printk("BAR5:");
 	for(i = 0; i <= 0x0F; i++)
-		printk("%02X:%02X ", i, readb(barp + i));
+		printk("%02X:%02X ", i, ioread8(barp + i));
 	printk("\n");
 
-	devices = readl(barp + 0x0C);
+	devices = ioread32(barp + 0x0C);
 	pci_iounmap(pdev, barp);
 
 	if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
 	    (!(devices & 0x10)))	/* PATA enable ? */
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 static int marvell_cable_detect(struct ata_port *ap)
@@ -110,8 +111,6 @@ static struct scsi_host_template marvell_sht = {
 };
 
 static const struct ata_port_operations marvell_ops = {
-	.port_disable		= ata_port_disable,
-
 	/* Task file is PCI ATA format, use helpers */
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
@@ -138,10 +137,9 @@ static const struct ata_port_operations marvell_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	/* Generic PATA PCI ATA helpers */
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -163,22 +161,22 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
 {
 	static const struct ata_port_info info = {
 		.sht		= &marvell_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask 	= 0x3f,
+		.udma_mask 	= ATA_UDMA5,
 
 		.port_ops	= &marvell_ops,
 	};
 	static const struct ata_port_info info_sata = {
 		.sht		= &marvell_sht,
 		/* Slave possible as its magically mapped not real */
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask 	= 0x7f,
+		.udma_mask 	= ATA_UDMA6,
 
 		.port_ops	= &marvell_ops,
 	};
@@ -192,6 +190,8 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
 
 static const struct pci_device_id marvell_pci_tbl[] = {
 	{ PCI_DEVICE(0x11AB, 0x6101), },
+	{ PCI_DEVICE(0x11AB, 0x6121), },
+	{ PCI_DEVICE(0x11AB, 0x6123), },
 	{ PCI_DEVICE(0x11AB, 0x6145), },
 	{ }	/* terminate list */
 };
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 368fac7..50c56e2 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -24,7 +24,7 @@
 
 
 #define DRV_NAME	"mpc52xx_ata"
-#define DRV_VERSION	"0.1.0ac2"
+#define DRV_VERSION	"0.1.2"
 
 
 /* Private structures used by the driver */
@@ -283,7 +283,6 @@ static struct scsi_host_template mpc52xx_ata_sht = {
 };
 
 static struct ata_port_operations mpc52xx_ata_port_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= mpc52xx_ata_set_piomode,
 	.dev_select		= mpc52xx_ata_dev_select,
 	.tf_load		= ata_tf_load,
@@ -299,17 +298,16 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.port_start		= ata_port_start,
 };
 
 static int __devinit
-mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
+mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
+		     unsigned long raw_ata_regs)
 {
 	struct ata_host *host;
 	struct ata_port *ap;
 	struct ata_ioports *aio;
-	int rc;
 
 	host = ata_host_alloc(dev, 1);
 	if (!host)
@@ -338,6 +336,8 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
 	aio->status_addr	= &priv->ata_regs->tf_command;
 	aio->command_addr	= &priv->ata_regs->tf_command;
 
+	ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
+
 	/* activate host */
 	return ata_host_activate(host, priv->ata_irq, ata_interrupt, 0,
 				 &mpc52xx_ata_sht);
@@ -434,7 +434,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
 	}
 
 	/* Register ourselves to libata */
-	rv = mpc52xx_ata_init_one(&op->dev, priv);
+	rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start);
 	if (rv) {
 		printk(KERN_ERR DRV_NAME ": "
 			"Error while registering to ATA layer\n");
@@ -467,13 +467,27 @@ mpc52xx_ata_remove(struct of_device *op)
 static int
 mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
 {
-	return 0;	/* FIXME : What to do here ? */
+	struct ata_host *host = dev_get_drvdata(&op->dev);
+
+	return ata_host_suspend(host, state);
 }
 
 static int
 mpc52xx_ata_resume(struct of_device *op)
 {
-	return 0;	/* FIXME : What to do here ? */
+	struct ata_host *host = dev_get_drvdata(&op->dev);
+	struct mpc52xx_ata_priv *priv = host->private_data;
+	int rv;
+
+	rv = mpc52xx_ata_hw_init(priv);
+	if (rv) {
+		printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+		return rv;
+	}
+
+	ata_host_resume(host);
+
+	return 0;
 }
 
 #endif
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 4ea4283..c0d9e0c 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -46,15 +46,16 @@ enum {
 	SECONDARY = (1 << 14)
 };
 
-static int mpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
 
 	if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -128,7 +129,7 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
  *	that, even if we get this wrong, cycles to the other device will
  *	be made PIO0.
  */
@@ -168,7 +169,6 @@ static struct scsi_host_template mpiix_sht = {
 };
 
 static struct ata_port_operations mpiix_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= mpiix_set_piomode,
 
 	.tf_load	= ata_tf_load,
@@ -189,9 +189,8 @@ static struct ata_port_operations mpiix_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
@@ -202,7 +201,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 	struct ata_port *ap;
 	void __iomem *cmd_addr, *ctl_addr;
 	u16 idetim;
-	int irq;
+	int cmd, ctl, irq;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
@@ -210,6 +209,7 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 	host = ata_host_alloc(&dev->dev, 1);
 	if (!host)
 		return -ENOMEM;
+	ap = host->ports[0];
 
 	/* MPIIX has many functions which can be turned on or off according
 	   to other devices present. Make sure IDE is enabled before we try
@@ -221,25 +221,28 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 
 	/* See if it's primary or secondary channel... */
 	if (!(idetim & SECONDARY)) {
+		cmd = 0x1F0;
+		ctl = 0x3F6;
 		irq = 14;
-		cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8);
-		ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1);
 	} else {
+		cmd = 0x170;
+		ctl = 0x376;
 		irq = 15;
-		cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8);
-		ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1);
 	}
 
+	cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
+	ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
 	if (!cmd_addr || !ctl_addr)
 		return -ENOMEM;
 
+	ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
+
 	/* We do our own plumbing to avoid leaking special cases for whacko
 	   ancient hardware into the core code. There are two issues to
 	   worry about.  #1 The chip is a bridge so if in legacy mode and
 	   without BARs set fools the setup.  #2 If you pci_disable_device
 	   the MPIIX your box goes castors up */
 
-	ap = host->ports[0];
 	ap->ops = &mpiix_port_ops;
 	ap->pio_mask = 0x1F;
 	ap->flags |= ATA_FLAG_SLAVE_POSS;
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 81f5634..25c922a 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -40,8 +40,6 @@ static struct scsi_host_template netcell_sht = {
 };
 
 static const struct ata_port_operations netcell_ops = {
-	.port_disable		= ata_port_disable,
-
 	/* Task file is PCI ATA format, use helpers */
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
@@ -68,10 +66,9 @@ static const struct ata_port_operations netcell_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	/* Generic PATA PCI ATA helpers */
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -94,12 +91,12 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht		= &netcell_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		/* Actually we don't really care about these as the
 		   firmware deals with it */
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask 	= 0x3f, /* UDMA 133 */
+		.udma_mask 	= ATA_UDMA5, /* UDMA 133 */
 		.port_ops	= &netcell_ops,
 	};
 	const struct ata_port_info *port_info[] = { &info, NULL };
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index ea70ec7..9fe66fd 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -32,14 +32,15 @@
 
 /**
  *	ns87410_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Check enabled ports
  */
 
-static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits ns87410_enable_bits[] = {
 		{ 0x43, 1, 0x08, 0x08 },
@@ -49,7 +50,7 @@ static int ns87410_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -123,7 +124,7 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary.
+ *	necessary.
  */
 
 static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
@@ -161,7 +162,6 @@ static struct scsi_host_template ns87410_sht = {
 };
 
 static struct ata_port_operations ns87410_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= ns87410_set_piomode,
 
 	.tf_load	= ata_tf_load,
@@ -184,16 +184,15 @@ static struct ata_port_operations ns87410_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &ns87410_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x0F,
 		.port_ops = &ns87410_port_ops
 	};
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
new file mode 100644
index 0000000..d0e2e50
--- /dev/null
+++ b/drivers/ata/pata_ns87415.c
@@ -0,0 +1,469 @@
+/*
+ *    pata_ns87415.c - NS87415 (non PARISC) PATA
+ *
+ *	(C) 2005 Red Hat <alan@redhat.com>
+ *
+ *    This is a fairly generic MWDMA controller. It has some limitations
+ *    as it requires timing reloads on PIO/DMA transitions but it is otherwise
+ *    fairly well designed.
+ *
+ *    This driver assumes the firmware has left the chip in a valid ST506
+ *    compliant state, either legacy IRQ 14/15 or native INTA shared. You
+ *    may need to add platform code if your system fails to do this.
+ *
+ *    The same cell appears in the 87560 controller used by some PARISC
+ *    systems. This has its own special mountain of errata.
+ *
+ *    TODO:
+ *	Test PARISC SuperIO
+ *	Get someone to test on SPARC
+ *	Implement lazy pio/dma switching for better performance
+ *	8bit shared timing.
+ *	See if we need to kill the FIFO for ATAPI
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME	"pata_ns87415"
+#define DRV_VERSION	"0.0.1"
+
+/**
+ *	ns87415_set_mode - Initialize host controller mode timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device whose timings we are configuring
+ *	@mode: Mode to set
+ *
+ *	Program the mode registers for this controller, channel and
+ *	device. Because the chip is quite an old design we have to do this
+ *	for PIO/DMA switches.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+	struct pci_dev *dev	= to_pci_dev(ap->host->dev);
+	int unit		= 2 * ap->port_no + adev->devno;
+	int timing		= 0x44 + 2 * unit;
+	unsigned long T		= 1000000000 / 33333;	/* PCI clocks */
+	struct ata_timing t;
+	u16 clocking;
+	u8 iordy;
+	u8 status;
+
+	/* Timing register format is 17 - low nybble read timing with
+	   the high nybble being 16 - x for recovery time in PCI clocks */
+
+	ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
+
+	clocking = 17 - FIT(t.active, 2, 17);
+	clocking |= (16 - FIT(t.recover, 1, 16)) << 4;
+ 	/* Use the same timing for read and write bytes */
+	clocking |= (clocking << 8);
+	pci_write_config_word(dev, timing, clocking);
+
+	/* Set the IORDY enable versus DMA enable on or off properly */
+	pci_read_config_byte(dev, 0x42, &iordy);
+	iordy &= ~(1 << (4 + unit));
+	if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev))
+		iordy |= (1 << (4 + unit));
+
+	/* Paranoia: We shouldn't ever get here with busy write buffers
+	   but if so wait */
+
+	pci_read_config_byte(dev, 0x43, &status);
+	while (status & 0x03) {
+		udelay(1);
+		pci_read_config_byte(dev, 0x43, &status);
+	}
+	/* Flip the IORDY/DMA bits now we are sure the write buffers are
+	   clear */
+	pci_write_config_byte(dev, 0x42, iordy);
+
+	/* TODO: Set byte 54 command timing to the best 8bit
+	   mode shared by all four devices */
+}
+
+/**
+ *	ns87415_set_piomode - Initialize host controller PATA PIO timings
+ *	@ap: Port whose timings we are configuring
+ *	@adev: Device to program
+ *
+ *	Set PIO mode for device, in host controller PCI config space.
+ *
+ *	LOCKING:
+ *	None (inherited from caller).
+ */
+
+static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+	ns87415_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ *	ns87415_bmdma_setup		-	Set up DMA
+ *	@qc: Command block
+ *
+ *	Set up for bus masterng DMA. We have to do this ourselves
+ *	rather than use the helper due to a chip erratum
+ */
+
+static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	/* Due to an erratum we need to write these bits to the wrong
+	   place - which does save us an I/O bizarrely */
+	dmactl |= ATA_DMA_INTR | ATA_DMA_ERR;
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ns87415_bmdma_start		-	Begin DMA transfer
+ *	@qc: Command block
+ *
+ *	Switch the timings for the chip and set up for a DMA transfer
+ *	before the DMA burst begins.
+ *
+ *	FIXME: We should do lazy switching on bmdma_start versus
+ *	ata_pio_data_xfer for better performance.
+ */
+
+static void ns87415_bmdma_start(struct ata_queued_cmd *qc)
+{
+	ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode);
+	ata_bmdma_start(qc);
+}
+
+/**
+ *	ns87415_bmdma_stop		-	End DMA transfer
+ *	@qc: Command block
+ *
+ *	End DMA mode and switch the controller back into PIO mode
+ */
+
+static void ns87415_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	ata_bmdma_stop(qc);
+	ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+/**
+ *	ns87415_bmdma_irq_clear		-	Clear interrupt
+ *	@ap: Channel to clear
+ *
+ *	Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the
+ *	error bits) are reset by writing to register 00 or 08.
+ */
+
+static void ns87415_bmdma_irq_clear(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	if (!mmio)
+		return;
+	iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR),
+			mmio + ATA_DMA_CMD);
+}
+
+/**
+ *	ns87415_check_atapi_dma		-	ATAPI DMA filter
+ *	@qc: Command block
+ *
+ *	Disable ATAPI DMA (for now). We may be able to do DMA if we
+ *	kill the prefetching. This isn't clear.
+ */
+
+static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return -EOPNOTSUPP;
+}
+
+#if defined(CONFIG_SUPERIO)
+
+/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
+ * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
+ * which use the integrated NS87514 cell for CD-ROM support.
+ * i.e we have to support for CD-ROM installs.
+ * See drivers/parisc/superio.c for more gory details.
+ *
+ * Workarounds taken from drivers/ide/pci/ns87415.c
+ */
+
+#include <asm/superio.h>
+
+#define SUPERIO_IDE_MAX_RETRIES 25
+
+/**
+ *	ns87560_read_buggy	-	workaround buggy Super I/O chip
+ *	@port: Port to read
+ *
+ *	Work around chipset problems in the 87560 SuperIO chip
+ */
+
+static u8 ns87560_read_buggy(void __iomem *port)
+{
+	u8 tmp;
+	int retries = SUPERIO_IDE_MAX_RETRIES;
+	do {
+		tmp = ioread8(port);
+		if (tmp != 0)
+			return tmp;
+		udelay(50);
+	} while(retries-- > 0);
+	return tmp;
+}
+
+/**
+ *	ns87560_check_status
+ *	@ap: channel to check
+ *
+ *	Return the status of the channel working around the
+ *	87560 flaws.
+ */
+
+static u8 ns87560_check_status(struct ata_port *ap)
+{
+	return ns87560_read_buggy(ap->ioaddr.status_addr);
+}
+
+/**
+ *	ns87560_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf. Work around the 87560 bugs.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ns87560_check_status(ap);
+	tf->feature = ioread8(ioaddr->error_addr);
+	tf->nsect = ioread8(ioaddr->nsect_addr);
+	tf->lbal = ioread8(ioaddr->lbal_addr);
+	tf->lbam = ioread8(ioaddr->lbam_addr);
+	tf->lbah = ioread8(ioaddr->lbah_addr);
+	tf->device = ns87560_read_buggy(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = ioread8(ioaddr->error_addr);
+		tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+		iowrite8(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+	}
+}
+
+/**
+ *	ns87560_bmdma_status
+ *	@ap: channel to check
+ *
+ *	Return the DMA status of the channel working around the
+ *	87560 flaws.
+ */
+
+static u8 ns87560_bmdma_status(struct ata_port *ap)
+{
+	return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+
+static const struct ata_port_operations ns87560_pata_ops = {
+	.set_piomode		= ns87415_set_piomode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ns87560_tf_read,
+	.check_status		= ns87560_check_status,
+	.check_atapi_dma	= ns87415_check_atapi_dma,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ns87415_bmdma_setup,
+	.bmdma_start		= ns87415_bmdma_start,
+	.bmdma_stop		= ns87415_bmdma_stop,
+	.bmdma_status		= ns87560_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ns87415_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	.port_start		= ata_sff_port_start,
+};
+
+#endif		/* 87560 SuperIO Support */
+
+
+static const struct ata_port_operations ns87415_pata_ops = {
+	.set_piomode		= ns87415_set_piomode,
+	.mode_filter		= ata_pci_default_filter,
+
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.check_status		= ata_check_status,
+	.check_atapi_dma	= ns87415_check_atapi_dma,
+	.exec_command		= ata_exec_command,
+	.dev_select		= ata_std_dev_select,
+
+	.freeze			= ata_bmdma_freeze,
+	.thaw			= ata_bmdma_thaw,
+	.error_handler		= ata_bmdma_error_handler,
+	.post_internal_cmd 	= ata_bmdma_post_internal_cmd,
+	.cable_detect		= ata_cable_40wire,
+
+	.bmdma_setup		= ns87415_bmdma_setup,
+	.bmdma_start		= ns87415_bmdma_start,
+	.bmdma_stop		= ns87415_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_prep		= ata_qc_prep,
+	.qc_issue		= ata_qc_issue_prot,
+	.data_xfer		= ata_data_xfer,
+
+	.irq_handler		= ata_interrupt,
+	.irq_clear		= ns87415_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+
+	.port_start		= ata_sff_port_start,
+};
+
+static struct scsi_host_template ns87415_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
+
+/**
+ *	ns87415_init_one - Register 87415 ATA PCI device with kernel services
+ *	@pdev: PCI device to register
+ *	@ent: Entry in ns87415_pci_tbl matching with @pdev
+ *
+ *	Called from kernel PCI layer.  We probe for combined mode (sigh),
+ *	and then hand over control to libata, for it to do the rest.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, or -ERRNO value.
+ */
+
+static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	static const struct ata_port_info info = {
+		.sht		= &ns87415_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.port_ops	= &ns87415_pata_ops,
+	};
+	const struct ata_port_info *ppi[] = { &info, NULL };
+#if defined(CONFIG_SUPERIO)
+	static const struct ata_port_info info87560 = {
+		.sht		= &ns87415_sht,
+		.flags		= ATA_FLAG_SLAVE_POSS,
+		.pio_mask	= 0x1f,	/* pio0-4 */
+		.mwdma_mask	= 0x07, /* mwdma0-2 */
+		.port_ops	= &ns87560_pata_ops,
+	};
+
+	if (PCI_SLOT(pdev->devfn) == 0x0E)
+		ppi[0] = &info87560;
+#endif
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev,
+			   "version " DRV_VERSION "\n");
+	/* Select 512 byte sectors */
+	pci_write_config_byte(pdev, 0x55, 0xEE);
+	/* Select PIO0 8bit clocking */
+	pci_write_config_byte(pdev, 0x54, 0xB7);
+	return ata_pci_init_one(pdev, ppi);
+}
+
+static const struct pci_device_id ns87415_pci_tbl[] = {
+	{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), },
+
+	{ }	/* terminate list */
+};
+
+static struct pci_driver ns87415_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= ns87415_pci_tbl,
+	.probe			= ns87415_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= ata_pci_device_suspend,
+	.resume			= ata_pci_device_resume,
+#endif
+};
+
+static int __init ns87415_init(void)
+{
+	return pci_register_driver(&ns87415_pci_driver);
+}
+
+static void __exit ns87415_exit(void)
+{
+	pci_unregister_driver(&ns87415_pci_driver);
+}
+
+module_init(ns87415_init);
+module_exit(ns87415_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 29c23dd..44da09a 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -29,14 +29,15 @@
 
 /**
  *	oldpiix_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits oldpiix_enable_bits[] = {
 		{ 0x41U, 1U, 0x80UL, 0x80UL },	/* port 0 */
@@ -46,7 +47,7 @@ static int oldpiix_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -199,7 +200,7 @@ static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
  *	that, even if we get this wrong, cycles to the other device will
  *	be made PIO0.
  */
@@ -237,7 +238,6 @@ static struct scsi_host_template oldpiix_sht = {
 };
 
 static const struct ata_port_operations oldpiix_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= oldpiix_set_piomode,
 	.set_dmamode		= oldpiix_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -265,9 +265,8 @@ static const struct ata_port_operations oldpiix_pata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -291,7 +290,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht		= &oldpiix_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
 		.port_ops	= &oldpiix_pata_ops,
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 1c44653..8f79447 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -46,14 +46,15 @@ enum {
 
 /**
  *	opti_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int opti_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits opti_enable_bits[] = {
 		{ 0x45, 1, 0x80, 0x00 },
@@ -63,7 +64,7 @@ static int opti_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -182,7 +183,6 @@ static struct scsi_host_template opti_sht = {
 };
 
 static struct ata_port_operations opti_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= opti_set_piomode,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
@@ -209,16 +209,15 @@ static struct ata_port_operations opti_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &opti_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &opti_port_ops
 	};
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 3093b02..f9b485a 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -47,14 +47,15 @@ static int pci_clock;	/* 0 = 33 1 = 25 */
 
 /**
  *	optidma_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int optidma_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const struct pci_bits optidma_enable_bits = {
 		0x40, 1, 0x08, 0x00
@@ -63,7 +64,7 @@ static int optidma_pre_reset(struct ata_port *ap, unsigned long deadline)
 	if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -323,25 +324,26 @@ static u8 optidma_make_bits43(struct ata_device *adev)
 
 /**
  *	optidma_set_mode	-	mode setup
- *	@ap: port to set up
+ *	@link: link to set up
  *
  *	Use the standard setup to tune the chipset and then finalise the
  *	configuration by writing the nibble of extra bits of data into
  *	the chip.
  */
 
-static int optidma_set_mode(struct ata_port *ap, struct ata_device **r_failed)
+static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
 {
+	struct ata_port *ap = link->ap;
 	u8 r;
 	int nybble = 4 * ap->port_no;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	int rc  = ata_do_set_mode(ap, r_failed);
+	int rc  = ata_do_set_mode(link, r_failed);
 	if (rc == 0) {
 		pci_read_config_byte(pdev, 0x43, &r);
 
 		r &= (0x0F << nybble);
-		r |= (optidma_make_bits43(&ap->device[0]) +
-		     (optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
+		r |= (optidma_make_bits43(&link->device[0]) +
+		     (optidma_make_bits43(&link->device[0]) << 2)) << nybble;
 		pci_write_config_byte(pdev, 0x43, r);
 	}
 	return rc;
@@ -366,7 +368,6 @@ static struct scsi_host_template optidma_sht = {
 };
 
 static struct ata_port_operations optidma_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= optidma_set_pio_mode,
 	.set_dmamode	= optidma_set_dma_mode,
 
@@ -396,13 +397,11 @@ static struct ata_port_operations optidma_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations optiplus_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= optiplus_set_pio_mode,
 	.set_dmamode	= optiplus_set_dma_mode,
 
@@ -432,9 +431,8 @@ static struct ata_port_operations optiplus_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -451,7 +449,7 @@ static int optiplus_with_udma(struct pci_dev *pdev)
 
 	/* Find function 1 */
 	dev1 = pci_get_device(0x1045, 0xC701, NULL);
-	if(dev1 == NULL)
+	if (dev1 == NULL)
 		return 0;
 
 	/* Rev must be >= 0x10 */
@@ -484,14 +482,14 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info_82c700 = {
 		.sht = &optidma_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &optidma_port_ops
 	};
 	static const struct ata_port_info info_82c700_udma = {
 		.sht = &optidma_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x07,
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index a56257c..fd36099 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -42,7 +42,7 @@
 
 
 #define DRV_NAME "pata_pcmcia"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
 
 /*
  *	Private data structure to glue stuff together
@@ -56,7 +56,7 @@ struct ata_pcmcia_info {
 
 /**
  *	pcmcia_set_mode	-	PCMCIA specific mode setup
- *	@ap: Port
+ *	@link: link
  *	@r_failed_dev: Return pointer for failed device
  *
  *	Perform the tuning and setup of the devices and timings, which
@@ -65,17 +65,16 @@ struct ata_pcmcia_info {
  *	decode, which alas is embarrassingly common in the PC world
  */
 
-static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
 {
-	struct ata_device *master = &ap->device[0];
-	struct ata_device *slave = &ap->device[1];
+	struct ata_device *master = &link->device[0];
+	struct ata_device *slave = &link->device[1];
 
 	if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
-		return ata_do_set_mode(ap, r_failed_dev);
+		return ata_do_set_mode(link, r_failed_dev);
 
 	if (memcmp(master->id + ATA_ID_FW_REV,  slave->id + ATA_ID_FW_REV,
-			   ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0)
-	{
+			   ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
 		/* Suspicious match, but could be two cards from
 		   the same vendor - check serial */
 		if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
@@ -84,7 +83,7 @@ static int pcmcia_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev
 			ata_dev_disable(slave);
 		}
 	}
-	return ata_do_set_mode(ap, r_failed_dev);
+	return ata_do_set_mode(link, r_failed_dev);
 }
 
 static struct scsi_host_template pcmcia_sht = {
@@ -107,7 +106,6 @@ static struct scsi_host_template pcmcia_sht = {
 
 static struct ata_port_operations pcmcia_port_ops = {
 	.set_mode	= pcmcia_set_mode,
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -127,7 +125,6 @@ static struct ata_port_operations pcmcia_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
 	.port_start	= ata_sff_port_start,
 };
@@ -250,7 +247,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
 					goto next_entry;
 				io_base = pdev->io.BasePort1;
 				ctl_base = pdev->io.BasePort1 + 0x0e;
-			} else goto next_entry;
+			} else
+				goto next_entry;
 			/* If we've got this far, we're done */
 			break;
 		}
@@ -287,8 +285,8 @@ next_entry:
 		printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
 
 	/*
- 	 *	Having done the PCMCIA plumbing the ATA side is relatively
- 	 *	sane.
+	 *	Having done the PCMCIA plumbing the ATA side is relatively
+	 *	sane.
 	 */
 	ret = -ENOMEM;
 	host = ata_host_alloc(&pdev->dev, 1);
@@ -304,6 +302,8 @@ next_entry:
 	ap->ioaddr.ctl_addr = ctl_addr;
 	ata_std_ports(&ap->ioaddr);
 
+	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
+
 	/* activate */
 	ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_interrupt,
 				IRQF_SHARED, &pcmcia_sht);
@@ -353,6 +353,7 @@ static void pcmcia_remove_one(struct pcmcia_device *pdev)
 
 static struct pcmcia_device_id pcmcia_devices[] = {
 	PCMCIA_DEVICE_FUNC_ID(4),
+	PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000),	/* Corsair */
 	PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000),	/* Hitachi */
 	PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000),	/* I-O Data CFA */
 	PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001),	/* Mitsubishi CFA */
@@ -362,7 +363,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
 	PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000),	/* Toshiba */
 	PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
 	PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000),	/* Samsung */
- 	PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),	/* Hitachi */
+	PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000),	/* Hitachi */
 	PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
 	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100),	/* Viking CFA */
 	PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200),	/* Lexar, Viking CFA */
@@ -378,10 +379,12 @@ static struct pcmcia_device_id pcmcia_devices[] = {
 	PCMCIA_DEVICE_PROD_ID12("EXP   ", "CD-ROM", 0x0a5c52fd, 0x66536591),
 	PCMCIA_DEVICE_PROD_ID12("EXP   ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
 	PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
+	PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
 	PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
 	PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
 	PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
 	PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+	PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
 	PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
 	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
 	PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index a5a601c..f4b8bfd 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -35,7 +35,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_pdc2027x"
-#define DRV_VERSION	"0.9"
+#define DRV_VERSION	"1.0"
 #undef PDC_DEBUG
 
 #ifdef PDC_DEBUG
@@ -69,7 +69,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
 static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
 static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
 static int pdc2027x_cable_detect(struct ata_port *ap);
-static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed);
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
 
 /*
  * ATA Timing Tables based on 133MHz controller clock.
@@ -110,7 +110,17 @@ static struct pdc2027x_udma_timing {
 };
 
 static const struct pci_device_id pdc2027x_pci_tbl[] = {
+#if 0
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
+#endif
 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
+#if 0
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
+	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
+#endif
 
 	{ }	/* terminate list */
 };
@@ -141,7 +151,6 @@ static struct scsi_host_template pdc2027x_sht = {
 };
 
 static struct ata_port_operations pdc2027x_pata100_ops = {
-	.port_disable		= ata_port_disable,
 	.mode_filter		= ata_pci_default_filter,
 
 	.tf_load		= ata_tf_load,
@@ -167,13 +176,11 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static struct ata_port_operations pdc2027x_pata133_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= pdc2027x_set_piomode,
 	.set_dmamode		= pdc2027x_set_dmamode,
 	.set_mode		= pdc2027x_set_mode,
@@ -202,9 +209,8 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static struct ata_port_info pdc2027x_port_info[] = {
@@ -271,7 +277,7 @@ static int pdc2027x_cable_detect(struct ata_port *ap)
 	u32 cgcr;
 
 	/* check cable detect results */
-	cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL));
+	cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL));
 	if (cgcr & (1 << 26))
 		goto cbl40;
 
@@ -289,12 +295,12 @@ cbl40:
  */
 static inline int pdc2027x_port_enabled(struct ata_port *ap)
 {
-	return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
+	return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
 }
 
 /**
  *	pdc2027x_prereset - prereset for PATA host controller
- *	@ap: Target port
+ *	@link: Target link
  *	@deadline: deadline jiffies for the operation
  *
  *	Probeinit including cable detection.
@@ -303,12 +309,12 @@ static inline int pdc2027x_port_enabled(struct ata_port *ap)
  *	None (inherited from caller).
  */
 
-static int pdc2027x_prereset(struct ata_port *ap, unsigned long deadline)
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
 {
 	/* Check whether port enabled */
-	if (!pdc2027x_port_enabled(ap))
+	if (!pdc2027x_port_enabled(link->ap))
 		return -ENOENT;
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -346,7 +352,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
 	ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
 			  ATA_ID_PROD_LEN + 1);
 	/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
-	if(strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
+	if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
 		mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
 
 	return ata_pci_default_filter(adev, mask);
@@ -381,16 +387,16 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
 	/* Set the PIO timing registers using value table for 133MHz */
 	PDPRINTK("Set pio regs... \n");
 
-	ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
+	ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
 	ctcr0 &= 0xffff0000;
 	ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
 		(pdc2027x_pio_timing_tbl[pio].value1 << 8);
-	writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+	iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
 
-	ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
+	ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
 	ctcr1 &= 0x00ffffff;
 	ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
-	writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+	iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
 
 	PDPRINTK("Set pio regs done\n");
 
@@ -424,18 +430,18 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 			 * If tHOLD is '1', the hardware will add half clock for data hold time.
 			 * This code segment seems to be no effect. tHOLD will be overwritten below.
 			 */
-			ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
-			writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
+			ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+			iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
 		}
 
 		PDPRINTK("Set udma regs... \n");
 
-		ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
+		ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
 		ctcr1 &= 0xff000000;
 		ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
 			(pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
 			(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
-		writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+		iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
 
 		PDPRINTK("Set udma regs done\n");
 
@@ -447,13 +453,13 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 		unsigned int mdma_mode = dma_mode & 0x07;
 
 		PDPRINTK("Set mdma regs... \n");
-		ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
+		ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
 
 		ctcr0 &= 0x0000ffff;
 		ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
 			(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
 
-		writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+		iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
 		PDPRINTK("Set mdma regs done\n");
 
 		PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
@@ -464,24 +470,24 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 
 /**
  *	pdc2027x_set_mode - Set the timing registers back to correct values.
- *	@ap: Port to configure
+ *	@link: link to configure
  *	@r_failed: Returned device for failure
  *
  *	The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
  *	automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
  *	This function overwrites the possibly incorrect values set by the hardware to be correct.
  */
-static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed)
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed)
 {
-	int i;
-
-	i = ata_do_set_mode(ap, r_failed);
-	if (i < 0)
-		return i;
+	struct ata_port *ap = link->ap;
+	struct ata_device *dev;
+	int rc;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	rc = ata_do_set_mode(link, r_failed);
+	if (rc < 0)
+		return rc;
 
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 
 			pdc2027x_set_piomode(ap, dev);
@@ -490,9 +496,9 @@ static int pdc2027x_set_mode(struct ata_port *ap, struct ata_device **r_failed)
 			 * Enable prefetch if the device support PIO only.
 			 */
 			if (dev->xfer_shift == ATA_SHIFT_PIO) {
-				u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1));
+				u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
 				ctcr1 |= (1 << 25);
-				writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
+				iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
 
 				PDPRINTK("Turn on prefetch\n");
 			} else {
@@ -557,14 +563,12 @@ static long pdc_read_counter(struct ata_host *host)
 	u32 bccrl, bccrh, bccrlv, bccrhv;
 
 retry:
-	bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
-	bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
-	rmb();
+	bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+	bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
 
 	/* Read the counter values again for verification */
-	bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
-	bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
-	rmb();
+	bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+	bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
 
 	counter = (bccrh << 15) | bccrl;
 
@@ -613,7 +617,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
 	/* Show the current clock value of PLL control register
 	 * (maybe already configured by the firmware)
 	 */
-	pll_ctl = readw(mmio_base + PDC_PLL_CTL);
+	pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
 
 	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
 #endif
@@ -653,8 +657,8 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
 
 	PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
 
-	writew(pll_ctl, mmio_base + PDC_PLL_CTL);
-	readw(mmio_base + PDC_PLL_CTL); /* flush */
+	iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
+	ioread16(mmio_base + PDC_PLL_CTL); /* flush */
 
 	/* Wait the PLL circuit to be stable */
 	mdelay(30);
@@ -664,7 +668,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
 	 *  Show the current clock value of PLL control register
 	 * (maybe configured by the firmware)
 	 */
-	pll_ctl = readw(mmio_base + PDC_PLL_CTL);
+	pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
 
 	PDPRINTK("pll_ctl[%X]\n", pll_ctl);
 #endif
@@ -683,31 +687,38 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
 	void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
 	u32 scr;
 	long start_count, end_count;
-	long pll_clock;
-
-	/* Read current counter value */
-	start_count = pdc_read_counter(host);
+	struct timeval start_time, end_time;
+	long pll_clock, usec_elapsed;
 
 	/* Start the test mode */
-	scr = readl(mmio_base + PDC_SYS_CTL);
+	scr = ioread32(mmio_base + PDC_SYS_CTL);
 	PDPRINTK("scr[%X]\n", scr);
-	writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
-	readl(mmio_base + PDC_SYS_CTL); /* flush */
+	iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
+	ioread32(mmio_base + PDC_SYS_CTL); /* flush */
+
+	/* Read current counter value */
+	start_count = pdc_read_counter(host);
+	do_gettimeofday(&start_time);
 
 	/* Let the counter run for 100 ms. */
 	mdelay(100);
 
 	/* Read the counter values again */
 	end_count = pdc_read_counter(host);
+	do_gettimeofday(&end_time);
 
 	/* Stop the test mode */
-	scr = readl(mmio_base + PDC_SYS_CTL);
+	scr = ioread32(mmio_base + PDC_SYS_CTL);
 	PDPRINTK("scr[%X]\n", scr);
-	writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
-	readl(mmio_base + PDC_SYS_CTL); /* flush */
+	iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
+	ioread32(mmio_base + PDC_SYS_CTL); /* flush */
 
 	/* calculate the input clock in Hz */
-	pll_clock = (start_count - end_count) * 10;
+	usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
+		(end_time.tv_usec - start_time.tv_usec);
+
+	pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
+		(100000000 / usec_elapsed);
 
 	PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
 	PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
@@ -732,9 +743,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
 	 */
 	pll_clock = pdc_detect_pll_input_clock(host);
 
-	if (pll_clock < 0) /* counter overflow? Try again. */
-		pll_clock = pdc_detect_pll_input_clock(host);
-
 	dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
 
 	/* Adjust PLL control register */
@@ -778,12 +786,14 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
 static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
+	static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
+	static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
 	unsigned int board_idx = (unsigned int) ent->driver_data;
 	const struct ata_port_info *ppi[] =
 		{ &pdc2027x_port_info[board_idx], NULL };
 	struct ata_host *host;
 	void __iomem *mmio_base;
-	int rc;
+	int i, rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -821,10 +831,15 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
 
 	mmio_base = host->iomap[PDC_MMIO_BAR];
 
-	pdc_ata_setup_port(&host->ports[0]->ioaddr, mmio_base + 0x17c0);
-	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x1000;
-	pdc_ata_setup_port(&host->ports[1]->ioaddr, mmio_base + 0x15c0);
-	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x1008;
+	for (i = 0; i < 2; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]);
+		ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i];
+
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd");
+	}
 
 	//pci_enable_intx(pdev);
 
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index d277246..bc7c2d5 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -9,7 +9,7 @@
  * First cut with LBA48/ATAPI
  *
  * TODO:
- *	Channel interlock/reset on both required
+ *	Channel interlock/reset on both required ?
  */
 
 #include <linux/kernel.h>
@@ -22,7 +22,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_pdc202xx_old"
-#define DRV_VERSION "0.4.2"
+#define DRV_VERSION "0.4.3"
 
 static int pdc2026x_cable_detect(struct ata_port *ap)
 {
@@ -106,9 +106,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 		{ 0x20, 0x01 }
 	};
 	static u8 mdma_timing[3][2] = {
-		{ 0x60, 0x03 },
-		{ 0x60, 0x04 },
 		{ 0xe0, 0x0f },
+		{ 0x60, 0x04 },
+		{ 0x60, 0x03 },
 	};
 	u8 r_bp, r_cp;
 
@@ -139,6 +139,9 @@ static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  *
  *	In UDMA3 or higher we have to clock switch for the duration of the
  *	DMA transfer sequence.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
  */
 
 static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
@@ -187,6 +190,9 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
  *
  *	After a DMA completes we need to put the clock back to 33MHz for
  *	PIO timings.
+ *
+ *	Note: The host lock held by the libata layer protects
+ *	us from two channels both trying to set DMA bits at once
  */
 
 static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
@@ -206,7 +212,6 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
 		iowrite32(0, atapi_reg);
 		iowrite8(ioread8(clock) & ~sel66, clock);
 	}
-	/* Check we keep host level locking here */
 	/* Flip back to 33Mhz for PIO */
 	if (adev->dma_mode >= XFER_UDMA_2)
 		iowrite8(ioread8(clock) & ~sel66, clock);
@@ -247,7 +252,6 @@ static struct scsi_host_template pdc202xx_sht = {
 };
 
 static struct ata_port_operations pdc2024x_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= pdc202xx_set_piomode,
 	.set_dmamode	= pdc202xx_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -275,13 +279,11 @@ static struct ata_port_operations pdc2024x_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations pdc2026x_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= pdc202xx_set_piomode,
 	.set_dmamode	= pdc202xx_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -310,9 +312,8 @@ static struct ata_port_operations pdc2026x_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
@@ -320,7 +321,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
 	static const struct ata_port_info info[3] = {
 		{
 			.sht = &pdc202xx_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = ATA_UDMA2,
@@ -328,7 +329,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
 		},
 		{
 			.sht = &pdc202xx_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = ATA_UDMA4,
@@ -336,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
 		},
 		{
 			.sht = &pdc202xx_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = ATA_UDMA5,
@@ -350,9 +351,9 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
 		struct pci_dev *bridge = dev->bus->self;
 		/* Don't grab anything behind a Promise I2O RAID */
 		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
-			if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
+			if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
 				return -ENODEV;
-			if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
+			if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
 				return -ENODEV;
 		}
 	}
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index cbb7866..ac03a90 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -1,7 +1,7 @@
 /*
  * Generic platform device PATA driver
  *
- * Copyright (C) 2006  Paul Mundt
+ * Copyright (C) 2006 - 2007  Paul Mundt
  *
  * Based on pata_pcmcia:
  *
@@ -22,7 +22,7 @@
 #include <linux/pata_platform.h>
 
 #define DRV_NAME "pata_platform"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.2"
 
 static int pio_mask = 1;
 
@@ -30,13 +30,11 @@ static int pio_mask = 1;
  * Provide our own set_mode() as we don't want to change anything that has
  * already been configured..
  */
-static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unused)
+static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
 {
-	int i;
-
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	struct ata_device *dev;
 
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			/* We don't really care */
 			dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
@@ -71,7 +69,6 @@ static struct scsi_host_template pata_platform_sht = {
 static struct ata_port_operations pata_platform_port_ops = {
 	.set_mode		= pata_platform_set_mode,
 
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -91,7 +88,6 @@ static struct ata_port_operations pata_platform_port_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_dummy_ret0,
 };
@@ -124,27 +120,34 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
  *	Register a platform bus IDE interface. Such interfaces are PIO and we
  *	assume do not support IRQ sharing.
  *
- *	Platform devices are expected to contain 3 resources per port:
+ *	Platform devices are expected to contain at least 2 resources per port:
  *
  *		- I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
  *		- CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *
+ *	and optionally:
+ *
  *		- IRQ	   (IORESOURCE_IRQ)
  *
  *	If the base resources are both mem types, the ioremap() is handled
  *	here. For IORESOURCE_IO, it's assumed that there's no remapping
  *	necessary.
+ *
+ *	If no IRQ resource is present, PIO polling mode is used instead.
  */
 static int __devinit pata_platform_probe(struct platform_device *pdev)
 {
 	struct resource *io_res, *ctl_res;
 	struct ata_host *host;
 	struct ata_port *ap;
+	struct pata_platform_info *pp_info;
 	unsigned int mmio;
+	int irq;
 
 	/*
 	 * Simple resource validation ..
 	 */
-	if (unlikely(pdev->num_resources != 3)) {
+	if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
 		dev_err(&pdev->dev, "invalid number of resources\n");
 		return -EINVAL;
 	}
@@ -176,6 +179,13 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
 		(ctl_res->flags == IORESOURCE_MEM));
 
 	/*
+	 * And the IRQ
+	 */
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		irq = 0;	/* no irq */
+
+	/*
 	 * Now that that's out of the way, wire up the port..
 	 */
 	host = ata_host_alloc(&pdev->dev, 1);
@@ -188,6 +198,14 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
 	ap->flags |= ATA_FLAG_SLAVE_POSS;
 
 	/*
+	 * Use polling mode if there's no IRQ
+	 */
+	if (!irq) {
+		ap->flags |= ATA_FLAG_PIO_POLLING;
+		ata_port_desc(ap, "no IRQ, using PIO polling");
+	}
+
+	/*
 	 * Handle the MMIO case
 	 */
 	if (mmio) {
@@ -208,11 +226,17 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
 
 	ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
 
-	pata_platform_setup_port(&ap->ioaddr, pdev->dev.platform_data);
+	pp_info = pdev->dev.platform_data;
+	pata_platform_setup_port(&ap->ioaddr, pp_info);
+
+	ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
+		      (unsigned long long)io_res->start,
+		      (unsigned long long)ctl_res->start);
 
 	/* activate */
-	return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt,
-				 0, &pata_platform_sht);
+	return ata_host_activate(host, irq, irq ? ata_interrupt : NULL,
+				 pp_info ? pp_info->irq_flags : 0,
+				 &pata_platform_sht);
 }
 
 /**
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 1998c19..7d4c696 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -126,7 +126,7 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
 
 static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
 {
-	struct ata_port *ap = adev->ap;
+	struct ata_port *ap = adev->link->ap;
 	int slop = buflen & 3;
 
 	if (ata_id_has_dword_io(adev->id)) {
@@ -170,7 +170,6 @@ static struct scsi_host_template qdi_sht = {
 };
 
 static struct ata_port_operations qdi6500_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= qdi6500_set_piomode,
 
 	.tf_load	= ata_tf_load,
@@ -192,13 +191,11 @@ static struct ata_port_operations qdi6500_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations qdi6580_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= qdi6580_set_piomode,
 
 	.tf_load	= ata_tf_load,
@@ -220,9 +217,8 @@ static struct ata_port_operations qdi6580_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -238,6 +234,7 @@ static struct ata_port_operations qdi6580_port_ops = {
 
 static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
 {
+	unsigned long ctl = io + 0x206;
 	struct platform_device *pdev;
 	struct ata_host *host;
 	struct ata_port *ap;
@@ -254,7 +251,7 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
 
 	ret = -ENOMEM;
 	io_addr = devm_ioport_map(&pdev->dev, io, 8);
-	ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1);
+	ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1);
 	if (!io_addr || !ctl_addr)
 		goto fail;
 
@@ -279,6 +276,8 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
 	ap->ioaddr.ctl_addr = ctl_addr;
 	ata_std_ports(&ap->ioaddr);
 
+	ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
+
 	/*
 	 *	Hook in a private data structure per channel
 	 */
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index ba96b54..8109b08 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -161,7 +161,7 @@ static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary. Our logic also clears TIME0/TIME1 for the other device so
+ *	necessary. Our logic also clears TIME0/TIME1 for the other device so
  *	that, even if we get this wrong, cycles to the other device will
  *	be made PIO0.
  */
@@ -203,7 +203,6 @@ static struct scsi_host_template radisys_sht = {
 };
 
 static const struct ata_port_operations radisys_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= radisys_set_piomode,
 	.set_dmamode		= radisys_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -231,9 +230,8 @@ static const struct ata_port_operations radisys_pata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 
@@ -257,7 +255,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht		= &radisys_sht,
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags		= ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma1-2 */
 		.udma_mask	= 0x14, /* UDMA33/66 only */
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index a3488b4..ba8a31c 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -26,7 +26,7 @@
 
 /**
  *	rz1000_set_mode		-	mode setting function
- *	@ap: ATA interface
+ *	@link: ATA link
  *	@unused: returned device on set_mode failure
  *
  *	Use a non standard set_mode function. We don't want to be tuned. We
@@ -34,12 +34,11 @@
  *	whacked out.
  */
 
-static int rz1000_set_mode(struct ata_port *ap, struct ata_device **unused)
+static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
 {
-	int i;
+	struct ata_device *dev;
 
-	for (i = 0; i < ATA_MAX_DEVICES; i++) {
-		struct ata_device *dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (ata_dev_enabled(dev)) {
 			/* We don't really care */
 			dev->pio_mode = XFER_PIO_0;
@@ -74,7 +73,6 @@ static struct scsi_host_template rz1000_sht = {
 static struct ata_port_operations rz1000_port_ops = {
 	.set_mode	= rz1000_set_mode,
 
-	.port_disable	= ata_port_disable,
 	.tf_load	= ata_tf_load,
 	.tf_read	= ata_tf_read,
 	.check_status 	= ata_check_status,
@@ -100,9 +98,8 @@ static struct ata_port_operations rz1000_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int rz1000_fifo_disable(struct pci_dev *pdev)
@@ -133,7 +130,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
 	static int printed_version;
 	static const struct ata_port_info info = {
 		.sht = &rz1000_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &rz1000_port_ops
 	};
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 1233063..725a858 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -40,7 +40,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sc1200"
-#define DRV_VERSION	"0.2.5"
+#define DRV_VERSION	"0.2.6"
 
 #define SC1200_REV_A	0x00
 #define SC1200_REV_B1	0x01
@@ -156,7 +156,7 @@ static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
  *
  *	Called when the libata layer is about to issue a command. We wrap
  *	this interface so that we can load the correct ATA timings if
- *	neccessary.  Specifically we have a problem that there is only
+ *	necessary.  Specifically we have a problem that there is only
  *	one MWDMA/UDMA bit.
  */
 
@@ -185,7 +185,7 @@ static struct scsi_host_template sc1200_sht = {
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
+	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -197,7 +197,6 @@ static struct scsi_host_template sc1200_sht = {
 };
 
 static struct ata_port_operations sc1200_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= sc1200_set_piomode,
 	.set_dmamode	= sc1200_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -219,7 +218,7 @@ static struct ata_port_operations sc1200_port_ops = {
 	.bmdma_stop	= ata_bmdma_stop,
 	.bmdma_status 	= ata_bmdma_status,
 
-	.qc_prep 	= ata_qc_prep,
+	.qc_prep 	= ata_dumb_qc_prep,
 	.qc_issue	= sc1200_qc_issue_prot,
 
 	.data_xfer	= ata_data_xfer,
@@ -227,9 +226,8 @@ static struct ata_port_operations sc1200_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -245,7 +243,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &sc1200_sht,
-		.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = 0x07,
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 61502bc..ea2ef9f 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -43,7 +43,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME		"pata_scc"
-#define DRV_VERSION		"0.2"
+#define DRV_VERSION		"0.3"
 
 #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA		0x01b4
 
@@ -258,6 +258,17 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 		 JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
 }
 
+unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+	/* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
+	if (adev->class == ATA_DEV_ATAPI &&
+	    (mask & (0xE0 << ATA_SHIFT_UDMA))) {
+		printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
+		mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+	}
+	return ata_pci_default_filter(adev, mask);
+}
+
 /**
  *	scc_tf_load - send taskfile registers to host controller
  *	@ap: Port to which output is sent
@@ -352,6 +363,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
 		tf->hob_lbal = in_be32(ioaddr->lbal_addr);
 		tf->hob_lbam = in_be32(ioaddr->lbam_addr);
 		tf->hob_lbah = in_be32(ioaddr->lbah_addr);
+		out_be32(ioaddr->ctl_addr, tf->ctl);
+		ap->last_ctl = tf->ctl;
 	}
 }
 
@@ -557,17 +570,8 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
 	udelay(20);
 	out_be32(ioaddr->ctl_addr, ap->ctl);
 
-	/* spec mandates ">= 2ms" before checking status.
-	 * We wait 150ms, because that was the magic delay used for
-	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
-	 * between when the ATA command register is written, and then
-	 * status is checked.  Because waiting for "a while" before
-	 * checking status is fine, post SRST, we perform this magic
-	 * delay here as well.
-	 *
-	 * Old drivers/ide uses the 2mS rule and then waits for ready
-	 */
-	msleep(150);
+	/* wait a while before checking status */
+	ata_wait_after_reset(ap, deadline);
 
 	/* Before we perform post reset processing we want to see if
 	 * the bus shows 0xFF because the odd clown forgets the D7
@@ -590,16 +594,17 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
  *	Note: Original code is ata_std_softreset().
  */
 
-static int scc_std_softreset (struct ata_port *ap, unsigned int *classes,
-                              unsigned long deadline)
+static int scc_std_softreset(struct ata_link *link, unsigned int *classes,
+                             unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
 	unsigned int devmask = 0, err_mask;
 	u8 err;
 
 	DPRINTK("ENTER\n");
 
-	if (ata_port_offline(ap)) {
+	if (ata_link_offline(link)) {
 		classes[0] = ATA_DEV_NONE;
 		goto out;
 	}
@@ -623,9 +628,11 @@ static int scc_std_softreset (struct ata_port *ap, unsigned int *classes,
 	}
 
 	/* determine by signature whether we have ATA or ATAPI devices */
-	classes[0] = ata_dev_try_classify(ap, 0, &err);
+	classes[0] = ata_dev_try_classify(&ap->link.device[0],
+					  devmask & (1 << 0), &err);
 	if (slave_possible && err != 0x81)
-		classes[1] = ata_dev_try_classify(ap, 1, &err);
+		classes[1] = ata_dev_try_classify(&ap->link.device[1],
+						  devmask & (1 << 1), &err);
 
  out:
 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
@@ -688,7 +695,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
 			printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
 			out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
 			/* TBD: SW reset */
-			scc_std_softreset(ap, &classes, deadline);
+			scc_std_softreset(&ap->link, &classes, deadline);
 			continue;
 		}
 
@@ -724,22 +731,36 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
 
 static u8 scc_bmdma_status (struct ata_port *ap)
 {
-	u8 host_stat;
 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-	host_stat = in_be32(mmio + SCC_DMA_STATUS);
-
-	/* Workaround for PTERADD: emulate DMA_INTR when
-	 * - IDE_STATUS[ERR] = 1
-	 * - INT_STATUS[INTRQ] = 1
-	 * - DMA_STATUS[IORACTA] = 1
-	 */
-	if (!(host_stat & ATA_DMA_INTR)) {
-		u32 int_status = in_be32(mmio + SCC_DMA_INTST);
-		if (ata_altstatus(ap) & ATA_ERR &&
-		    int_status & INTSTS_INTRQ &&
-		    host_stat & ATA_DMA_ACTIVE)
-			host_stat |= ATA_DMA_INTR;
+	u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
+	u32 int_status = in_be32(mmio + SCC_DMA_INTST);
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	static int retry = 0;
+
+	/* return if IOS_SS is cleared */
+	if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
+		return host_stat;
+
+	/* errata A252,A308 workaround: Step4 */
+	if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ))
+		return (host_stat | ATA_DMA_INTR);
+
+	/* errata A308 workaround Step5 */
+	if (int_status & INTSTS_IOIRQS) {
+		host_stat |= ATA_DMA_INTR;
+
+		/* We don't check ATAPI DMA because it is limited to UDMA4 */
+		if ((qc->tf.protocol == ATA_PROT_DMA &&
+		     qc->dev->xfer_mode > XFER_UDMA_4)) {
+			if (!(int_status & INTSTS_ACTEINT)) {
+				printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
+				       ap->print_id);
+				host_stat |= ATA_DMA_ERR;
+				if (retry++)
+					ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
+			} else
+				retry = 0;
+		}
 	}
 
 	return host_stat;
@@ -758,7 +779,7 @@ static u8 scc_bmdma_status (struct ata_port *ap)
 static void scc_data_xfer (struct ata_device *adev, unsigned char *buf,
 			   unsigned int buflen, int write_data)
 {
-	struct ata_port *ap = adev->ap;
+	struct ata_port *ap = adev->link->ap;
 	unsigned int words = buflen >> 1;
 	unsigned int i;
 	u16 *buf16 = (u16 *) buf;
@@ -812,38 +833,6 @@ static u8 scc_irq_on (struct ata_port *ap)
 }
 
 /**
- *	scc_irq_ack - Acknowledge a device interrupt.
- *	@ap: Port on which interrupts are enabled.
- *
- *	Note: Original code is ata_irq_ack().
- */
-
-static u8 scc_irq_ack (struct ata_port *ap, unsigned int chk_drq)
-{
-	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
-	u8 host_stat, post_stat, status;
-
-	status = ata_busy_wait(ap, bits, 1000);
-	if (status & bits)
-		if (ata_msg_err(ap))
-			printk(KERN_ERR "abnormal status 0x%X\n", status);
-
-	/* get controller status; clear intr, err bits */
-	host_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
-	out_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS,
-		 host_stat | ATA_DMA_INTR | ATA_DMA_ERR);
-
-	post_stat = in_be32(ap->ioaddr.bmdma_addr + SCC_DMA_STATUS);
-
-	if (ata_msg_intr(ap))
-		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
-		       __FUNCTION__,
-		       host_stat, post_stat, status);
-
-	return status;
-}
-
-/**
  *	scc_bmdma_freeze - Freeze BMDMA controller port
  *	@ap: port to freeze
  *
@@ -874,10 +863,10 @@ static void scc_bmdma_freeze (struct ata_port *ap)
  *	@deadline: deadline jiffies for the operation
  */
 
-static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline)
+static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
 {
-	ap->cbl = ATA_CBL_PATA80;
-	return ata_std_prereset(ap, deadline);
+	link->ap->cbl = ATA_CBL_PATA80;
+	return ata_std_prereset(link, deadline);
 }
 
 /**
@@ -888,13 +877,11 @@ static int scc_pata_prereset(struct ata_port *ap, unsigned long deadline)
  *	Note: Original code is ata_std_postreset().
  */
 
-static void scc_std_postreset (struct ata_port *ap, unsigned int *classes)
+static void scc_std_postreset(struct ata_link *link, unsigned int *classes)
 {
-	DPRINTK("ENTER\n");
+	struct ata_port *ap = link->ap;
 
-	/* re-enable interrupts */
-	if (!ap->ops->error_handler)
-		ap->ops->irq_on(ap);
+	DPRINTK("ENTER\n");
 
 	/* is double-select really necessary? */
 	if (classes[0] != ATA_DEV_NONE)
@@ -997,10 +984,9 @@ static struct scsi_host_template scc_sht = {
 };
 
 static const struct ata_port_operations scc_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= scc_set_piomode,
 	.set_dmamode		= scc_set_dmamode,
-	.mode_filter		= ata_pci_default_filter,
+	.mode_filter		= scc_mode_filter,
 
 	.tf_load		= scc_tf_load,
 	.tf_read		= scc_tf_read,
@@ -1024,7 +1010,6 @@ static const struct ata_port_operations scc_pata_ops = {
 
 	.irq_clear		= scc_bmdma_irq_clear,
 	.irq_on			= scc_irq_on,
-	.irq_ack		= scc_irq_ack,
 
 	.port_start		= scc_port_start,
 	.port_stop		= scc_port_stop,
@@ -1170,6 +1155,9 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		return rc;
 	host->iomap = pcim_iomap_table(pdev);
 
+	ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
+	ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
+
 	rc = scc_host_init(host);
 	if (rc)
 		return rc;
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 1e8f421..04d794d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -41,7 +41,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_serverworks"
-#define DRV_VERSION "0.4.1"
+#define DRV_VERSION "0.4.2"
 
 #define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
 #define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
@@ -274,28 +274,27 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
 {
 	static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
 	int offset = 1 + 2 * ap->port_no - adev->devno;
-	int devbits = (2 * ap->port_no + adev->devno);
+	int devbits = 2 * ap->port_no + adev->devno;
 	u8 ultra;
 	u8 ultra_cfg;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	pci_read_config_byte(pdev, 0x54, &ultra_cfg);
+	pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
+	ultra &= ~(0x0F << (adev->devno * 4));
 
 	if (adev->dma_mode >= XFER_UDMA_0) {
 		pci_write_config_byte(pdev, 0x44 + offset,  0x20);
 
-		pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
-		ultra &= ~(0x0F << (ap->port_no * 4));
 		ultra |= (adev->dma_mode - XFER_UDMA_0)
-					<< (ap->port_no * 4);
-		pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
-
+					<< (adev->devno * 4);
 		ultra_cfg |=  (1 << devbits);
 	} else {
 		pci_write_config_byte(pdev, 0x44 + offset,
 			dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
 		ultra_cfg &= ~(1 << devbits);
 	}
+	pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
 	pci_write_config_byte(pdev, 0x54, ultra_cfg);
 }
 
@@ -318,7 +317,6 @@ static struct scsi_host_template serverworks_sht = {
 };
 
 static struct ata_port_operations serverworks_osb4_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= serverworks_set_piomode,
 	.set_dmamode	= serverworks_set_dmamode,
 	.mode_filter	= serverworks_osb4_filter,
@@ -348,13 +346,11 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations serverworks_csb_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= serverworks_set_piomode,
 	.set_dmamode	= serverworks_set_dmamode,
 	.mode_filter	= serverworks_csb_filter,
@@ -384,9 +380,8 @@ static struct ata_port_operations serverworks_csb_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int serverworks_fixup_osb4(struct pci_dev *pdev)
@@ -410,10 +405,8 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev)
 
 static int serverworks_fixup_csb(struct pci_dev *pdev)
 {
-	u8 rev;
 	u8 btr;
-
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	u8 pdev_revision;
 
 	/* Third Channel Test */
 	if (!(PCI_FUNC(pdev->devfn) & 1)) {
@@ -452,11 +445,12 @@ static int serverworks_fixup_csb(struct pci_dev *pdev)
 	 * 	11 : udma2/udma4/udma5
 	 */
 	pci_read_config_byte(pdev, 0x5A, &btr);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
 	btr &= ~0x40;
 	if (!(PCI_FUNC(pdev->devfn) & 1))
 		btr |= 0x2;
 	else
-		btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+		btr |= (pdev_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
 	pci_write_config_byte(pdev, 0x5A, btr);
 
 	return btr;
@@ -478,31 +472,31 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
 	static const struct ata_port_info info[4] = {
 		{ /* OSB4 */
 			.sht = &serverworks_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = 0x07,
 			.port_ops = &serverworks_osb4_port_ops
 		}, { /* OSB4 no UDMA */
 			.sht = &serverworks_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
 			.udma_mask = 0x00,
 			.port_ops = &serverworks_osb4_port_ops
 		}, { /* CSB5 */
 			.sht = &serverworks_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x1f,
+			.udma_mask = ATA_UDMA4,
 			.port_ops = &serverworks_csb_port_ops
 		}, { /* CSB5 - later revisions*/
 			.sht = &serverworks_sht,
-			.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+			.flags = ATA_FLAG_SLAVE_POSS,
 			.pio_mask = 0x1f,
 			.mwdma_mask = 0x07,
-			.udma_mask = 0x3f,
+			.udma_mask = ATA_UDMA5,
 			.port_ops = &serverworks_csb_port_ops
 		}
 	};
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 440e2cb..503245a 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -16,7 +16,7 @@
  *
  *	If you have strange problems with nVidia chipset systems please
  *	see the SI support documentation and update your system BIOS
- *	if neccessary
+ *	if necessary
  *
  * TODO
  *	If we know all our devices are LBA28 (or LBA28 sized)  we could use
@@ -33,7 +33,9 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.4.6"
+#define DRV_VERSION "0.4.8"
+
+#define SIL680_MMIO_BAR		5
 
 /**
  *	sil680_selreg		-	return register base
@@ -92,33 +94,6 @@ static int sil680_cable_detect(struct ata_port *ap) {
 }
 
 /**
- *	sil680_bus_reset	-	reset the SIL680 bus
- *	@ap: ATA port to reset
- *	@deadline: deadline jiffies for the operation
- *
- *	Perform the SIL680 housekeeping when doing an ATA bus reset
- */
-
-static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes,
-			    unsigned long deadline)
-{
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	unsigned long addr = sil680_selreg(ap, 0);
-	u8 reset;
-
-	pci_read_config_byte(pdev, addr, &reset);
-	pci_write_config_byte(pdev, addr, reset | 0x03);
-	udelay(25);
-	pci_write_config_byte(pdev, addr, reset);
-	return ata_std_softreset(ap, classes, deadline);
-}
-
-static void sil680_error_handler(struct ata_port *ap)
-{
-	ata_bmdma_drive_eh(ap, ata_std_prereset, sil680_bus_reset, NULL, ata_std_postreset);
-}
-
-/**
  *	sil680_set_piomode	-	set initial PIO mode data
  *	@ap: ATA interface
  *	@adev: ATA device
@@ -235,7 +210,6 @@ static struct scsi_host_template sil680_sht = {
 };
 
 static struct ata_port_operations sil680_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= sil680_set_piomode,
 	.set_dmamode	= sil680_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -247,7 +221,7 @@ static struct ata_port_operations sil680_port_ops = {
 
 	.freeze		= ata_bmdma_freeze,
 	.thaw		= ata_bmdma_thaw,
-	.error_handler	= sil680_error_handler,
+	.error_handler	= ata_bmdma_error_handler,
 	.post_internal_cmd = ata_bmdma_post_internal_cmd,
 	.cable_detect	= sil680_cable_detect,
 
@@ -264,9 +238,8 @@ static struct ata_port_operations sil680_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -278,7 +251,7 @@ static struct ata_port_operations sil680_port_ops = {
  *	Returns the final clock settings.
  */
 
-static u8 sil680_init_chip(struct pci_dev *pdev)
+static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
 {
 	u32 class_rev	= 0;
 	u8 tmpbyte	= 0;
@@ -293,8 +266,10 @@ static u8 sil680_init_chip(struct pci_dev *pdev)
 
 	pci_read_config_byte(pdev, 0x8A, &tmpbyte);
 
-	printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
-			tmpbyte & 1, tmpbyte & 0x30);
+	dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+		tmpbyte & 1, tmpbyte & 0x30);
+
+	*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
 
 	switch(tmpbyte & 0x30) {
 		case 0x00:
@@ -315,8 +290,8 @@ static u8 sil680_init_chip(struct pci_dev *pdev)
 	}
 
 	pci_read_config_byte(pdev,   0x8A, &tmpbyte);
-	printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
-			tmpbyte & 1, tmpbyte & 0x30);
+	dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+		tmpbyte & 1, tmpbyte & 0x30);
 
 	pci_write_config_byte(pdev,  0xA1, 0x72);
 	pci_write_config_word(pdev,  0xA2, 0x328A);
@@ -339,45 +314,97 @@ static u8 sil680_init_chip(struct pci_dev *pdev)
 	return tmpbyte & 0x30;
 }
 
-static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit sil680_init_one(struct pci_dev *pdev,
+				     const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &sil680_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,
+		.udma_mask = ATA_UDMA6,
 		.port_ops = &sil680_port_ops
 	};
 	static const struct ata_port_info info_slow = {
 		.sht = &sil680_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &sil680_port_ops
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
 	static int printed_version;
+	struct ata_host *host;
+	void __iomem *mmio_base;
+	int rc, try_mmio;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
-	switch(sil680_init_chip(pdev))
-	{
+	switch (sil680_init_chip(pdev, &try_mmio)) {
 		case 0:
 			ppi[0] = &info_slow;
 			break;
 		case 0x30:
 			return -ENODEV;
 	}
+
+	if (!try_mmio)
+		goto use_ioports;
+
+	/* Try to acquire MMIO resources and fallback to PIO if
+	 * that fails
+	 */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+	rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
+	if (rc)
+		goto use_ioports;
+
+	/* Allocate host and set it up */
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+	if (!host)
+		return -ENOMEM;
+	host->iomap = pcim_iomap_table(pdev);
+
+	/* Setup DMA masks */
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		return rc;
+	pci_set_master(pdev);
+
+	/* Get MMIO base and initialize port addresses */
+	mmio_base = host->iomap[SIL680_MMIO_BAR];
+	host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
+	host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
+	host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
+	host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
+	ata_std_ports(&host->ports[0]->ioaddr);
+	host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
+	host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
+	host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
+	host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
+	ata_std_ports(&host->ports[1]->ioaddr);
+
+	/* Register & activate */
+	return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+				 &sil680_sht);
+
+use_ioports:
 	return ata_pci_init_one(pdev, ppi);
 }
 
 #ifdef CONFIG_PM
 static int sil680_reinit_one(struct pci_dev *pdev)
 {
-	sil680_init_chip(pdev);
+	int try_mmio;
+
+	sil680_init_chip(pdev, &try_mmio);
 	return ata_pci_device_resume(pdev);
 }
 #endif
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index cfe4ec6..aff3889 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -2,6 +2,7 @@
  *    pata_sis.c - SiS ATA driver
  *
  *	(C) 2005 Red Hat <alan@redhat.com>
+ *	(C) 2007 Bartlomiej Zolnierkiewicz
  *
  *    Based upon linux/drivers/ide/pci/sis5513.c
  * Copyright (C) 1999-2000	Andre Hedrick <andre@linux-ide.org>
@@ -35,7 +36,7 @@
 #include "sis.h"
 
 #define DRV_NAME	"pata_sis"
-#define DRV_VERSION	"0.5.1"
+#define DRV_VERSION	"0.5.2"
 
 struct sis_chipset {
 	u16 device;				/* PCI host ID */
@@ -53,6 +54,8 @@ struct sis_laptop {
 static const struct sis_laptop sis_laptop[] = {
 	/* devid, subvendor, subdev */
 	{ 0x5513, 0x1043, 0x1107 },	/* ASUS A6K */
+	{ 0x5513, 0x1734, 0x105F },	/* FSC Amilo A1630 */
+	{ 0x5513, 0x1071, 0x8640 },     /* EasyNote K5305 */
 	/* end marker */
 	{ 0, }
 };
@@ -82,7 +85,7 @@ static int sis_short_ata40(struct pci_dev *dev)
 
 static int sis_old_port_base(struct ata_device *adev)
 {
-	return  0x40 + (4 * adev->ap->port_no) +  (2 * adev->devno);
+	return  0x40 + (4 * adev->link->ap->port_no) +  (2 * adev->devno);
 }
 
 /**
@@ -131,25 +134,29 @@ static int sis_66_cable_detect(struct ata_port *ap)
 
 /**
  *	sis_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int sis_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits sis_enable_bits[] = {
 		{ 0x4aU, 1U, 0x02UL, 0x02UL },	/* port 0 */
 		{ 0x4aU, 1U, 0x04UL, 0x04UL },	/* port 1 */
 	};
 
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	/* Clear the FIFO settings. We can't enable the FIFO until
+	   we know we are poking at a disk */
+	pci_write_config_byte(pdev, 0x4B, 0);
+	return ata_std_prereset(link, deadline);
 }
 
 
@@ -234,7 +241,7 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
 }
 
 /**
- *	sis_100_set_pioode - Initialize host controller PATA PIO timings
+ *	sis_100_set_piomode - Initialize host controller PATA PIO timings
  *	@ap: Port whose timings we are configuring
  *	@adev: Device we are configuring for.
  *
@@ -259,7 +266,7 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
 }
 
 /**
- *	sis_133_set_pioode - Initialize host controller PATA PIO timings
+ *	sis_133_set_piomode - Initialize host controller PATA PIO timings
  *	@ap: Port whose timings we are configuring
  *	@adev: Device we are configuring for.
  *
@@ -331,7 +338,7 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 	int drive_pci = sis_old_port_base(adev);
 	u16 timing;
 
-	const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
+	const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
 	const u16 udma_bits[]  = { 0xE000, 0xC000, 0xA000 };
 
 	pci_read_config_word(pdev, drive_pci, &timing);
@@ -339,15 +346,15 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 	if (adev->dma_mode < XFER_UDMA_0) {
 		/* bits 3-0 hold recovery timing bits 8-10 active timing and
 		   the higer bits are dependant on the device */
-		timing &= ~ 0x870F;
+		timing &= ~0x870F;
 		timing |= mwdma_bits[speed];
-		pci_write_config_word(pdev, drive_pci, timing);
 	} else {
 		/* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
 		speed = adev->dma_mode - XFER_UDMA_0;
 		timing &= ~0x6000;
 		timing |= udma_bits[speed];
 	}
+	pci_write_config_word(pdev, drive_pci, timing);
 }
 
 /**
@@ -370,8 +377,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 	int drive_pci = sis_old_port_base(adev);
 	u16 timing;
 
-	const u16 mwdma_bits[] = { 0x707, 0x202, 0x202 };
-	const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
+	/* MWDMA 0-2 and UDMA 0-5 */
+	const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+	const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
 
 	pci_read_config_word(pdev, drive_pci, &timing);
 
@@ -429,8 +437,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
  *	@adev: Device to program
  *
  *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
- *	Handles early SiS 961 bridges. Supports MWDMA as well unlike
- *	the old ide/pci driver.
+ *	Handles early SiS 961 bridges.
  *
  *	LOCKING:
  *	None (inherited from caller).
@@ -464,8 +471,6 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a
  *	@adev: Device to program
  *
  *	Set UDMA/MWDMA mode for device, in host controller PCI config space.
- *	Handles early SiS 961 bridges. Supports MWDMA as well unlike
- *	the old ide/pci driver.
  *
  *	LOCKING:
  *	None (inherited from caller).
@@ -527,7 +532,6 @@ static struct scsi_host_template sis_sht = {
 };
 
 static const struct ata_port_operations sis_133_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_133_set_piomode,
 	.set_dmamode		= sis_133_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -555,13 +559,11 @@ static const struct ata_port_operations sis_133_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations sis_133_for_sata_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_133_set_piomode,
 	.set_dmamode		= sis_133_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -589,13 +591,11 @@ static const struct ata_port_operations sis_133_for_sata_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations sis_133_early_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_100_set_piomode,
 	.set_dmamode		= sis_133_early_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -623,13 +623,11 @@ static const struct ata_port_operations sis_133_early_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations sis_100_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_100_set_piomode,
 	.set_dmamode		= sis_100_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -657,13 +655,11 @@ static const struct ata_port_operations sis_100_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations sis_66_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_old_set_piomode,
 	.set_dmamode		= sis_66_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -691,13 +687,11 @@ static const struct ata_port_operations sis_66_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_operations sis_old_ops = {
-	.port_disable		= ata_port_disable,
 	.set_piomode		= sis_old_set_piomode,
 	.set_dmamode		= sis_old_set_dmamode,
 	.mode_filter		= ata_pci_default_filter,
@@ -725,14 +719,13 @@ static const struct ata_port_operations sis_old_ops = {
 	.irq_handler		= ata_interrupt,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
-	.port_start		= ata_port_start,
+	.port_start		= ata_sff_port_start,
 };
 
 static const struct ata_port_info sis_info = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.mwdma_mask	= 0x07,
 	.udma_mask	= 0,
@@ -740,7 +733,7 @@ static const struct ata_port_info sis_info = {
 };
 static const struct ata_port_info sis_info33 = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.mwdma_mask	= 0x07,
 	.udma_mask	= ATA_UDMA2,	/* UDMA 33 */
@@ -748,28 +741,28 @@ static const struct ata_port_info sis_info33 = {
 };
 static const struct ata_port_info sis_info66 = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA4,	/* UDMA 66 */
 	.port_ops	= &sis_66_ops,
 };
 static const struct ata_port_info sis_info100 = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA5,
 	.port_ops	= &sis_100_ops,
 };
 static const struct ata_port_info sis_info100_early = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.udma_mask	= ATA_UDMA5,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.port_ops	= &sis_66_ops,
 };
 static const struct ata_port_info sis_info133 = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &sis_133_ops,
@@ -783,7 +776,7 @@ const struct ata_port_info sis_info133_for_sata = {
 };
 static const struct ata_port_info sis_info133_early = {
 	.sht		= &sis_sht,
-	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+	.flags		= ATA_FLAG_SLAVE_POSS,
 	.pio_mask	= 0x1f,	/* pio0-4 */
 	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &sis_133_early_ops,
@@ -869,6 +862,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct pci_dev *host = NULL;
 	struct sis_chipset *chipset = NULL;
 	struct sis_chipset *sets;
+	u8 revision;
 
 	static struct sis_chipset sis_chipsets[] = {
 
@@ -926,11 +920,10 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	for (sets = &sis_chipsets[0]; sets->device; sets++) {
 		host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
 		if (host != NULL) {
+			pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
 			chipset = sets;			/* Match found */
 			if (sets->device == 0x630) {	/* SIS630 */
-				u8 host_rev;
-				pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
-				if (host_rev >= 0x30)	/* 630 ET */
+				if (revision >= 0x30)	/* 630 ET */
 					chipset = &sis100_early;
 			}
 			break;
@@ -974,7 +967,6 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		u16 trueid;
 		u8 prefctl;
 		u8 idecfg;
-		u8 sbrev;
 
 		/* Try the second unmasking technique */
 		pci_read_config_byte(pdev, 0x4a, &idecfg);
@@ -987,11 +979,11 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 			lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
 			if (lpc_bridge == NULL)
 				break;
-			pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
 			pci_read_config_byte(pdev, 0x49, &prefctl);
+			pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &revision);
 			pci_dev_put(lpc_bridge);
 
-			if (sbrev == 0x10 && (prefctl & 0x80)) {
+			if (revision == 0x10 && (prefctl & 0x80)) {
 				chipset = &sis133_early;
 				break;
 			}
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index e5aaec4..e2d458b 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -26,7 +26,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sl82c105"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.2"
 
 enum {
 	/*
@@ -43,23 +43,24 @@ enum {
 
 /**
  *	sl82c105_pre_reset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int sl82c105_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits sl82c105_enable_bits[] = {
 		{ 0x40, 1, 0x01, 0x01 },
 		{ 0x40, 1, 0x10, 0x10 }
 	};
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
 		return -ENOENT;
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 
@@ -224,7 +225,6 @@ static struct scsi_host_template sl82c105_sht = {
 };
 
 static struct ata_port_operations sl82c105_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= sl82c105_set_piomode,
 	.mode_filter	= ata_pci_default_filter,
 
@@ -253,9 +253,8 @@ static struct ata_port_operations sl82c105_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -270,7 +269,7 @@ static struct ata_port_operations sl82c105_port_ops = {
 static int sl82c105_bridge_revision(struct pci_dev *pdev)
 {
 	struct pci_dev *bridge;
-	u8 rev;
+	u8 revision;
 
 	/*
 	 * The bridge should be part of the same device, but function 0.
@@ -292,10 +291,9 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev)
 	/*
 	 * We need to find function 0's revision, not function 1
 	 */
-	pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
-
 	pci_dev_put(bridge);
-	return rev;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
+	return revision;
 }
 
 
@@ -303,14 +301,14 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
 {
 	static const struct ata_port_info info_dma = {
 		.sht = &sl82c105_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &sl82c105_port_ops
 	};
 	static const struct ata_port_info info_early = {
 		.sht = &sl82c105_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.port_ops = &sl82c105_port_ops
 	};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index b1d3076..403eafc 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -47,25 +47,26 @@
 
 /**
  *	triflex_prereset		-	probe begin
- *	@ap: ATA port
+ *	@link: ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	Set up cable type and use generic probe init
  */
 
-static int triflex_prereset(struct ata_port *ap, unsigned long deadline)
+static int triflex_prereset(struct ata_link *link, unsigned long deadline)
 {
 	static const struct pci_bits triflex_enable_bits[] = {
 		{ 0x80, 1, 0x01, 0x01 },
 		{ 0x80, 1, 0x02, 0x02 }
 	};
 
+	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
 	if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
 		return -ENOENT;
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 
@@ -197,7 +198,6 @@ static struct scsi_host_template triflex_sht = {
 };
 
 static struct ata_port_operations triflex_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= triflex_set_piomode,
 	.mode_filter	= ata_pci_default_filter,
 
@@ -226,16 +226,15 @@ static struct ata_port_operations triflex_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.sht = &triflex_sht,
-		.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.port_ops = &triflex_port_ops
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 63eca29..325460d 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -63,7 +63,7 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME "pata_via"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.3.3"
 
 /*
  *	The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -97,6 +97,7 @@ static const struct via_isa_bridge {
 	u8 rev_max;
 	u16 flags;
 } via_isa_bridges[] = {
+	{ "vx800",	PCI_DEVICE_ID_VIA_VX800,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
 	{ "vt8237s",	PCI_DEVICE_ID_VIA_8237S,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
 	{ "vt8251",	PCI_DEVICE_ID_VIA_8251,     0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
 	{ "cx700",	PCI_DEVICE_ID_VIA_CX700,    0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -144,6 +145,9 @@ static int via_cable_override(struct pci_dev *pdev)
 	/* Systems by DMI */
 	if (dmi_check_system(cable_dmi_table))
 		return 1;
+	/* Arima W730-K8/Targa Visionary 811/... */
+	if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032)
+		return 1;
 	return 0;
 }
 
@@ -172,7 +176,7 @@ static int via_cable_detect(struct ata_port *ap) {
 	if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
 		return ATA_CBL_PATA40;
 	/* UDMA 66 chips have only drive side logic */
-	else if((config->flags & VIA_UDMA) < VIA_UDMA_100)
+	else if ((config->flags & VIA_UDMA) < VIA_UDMA_100)
 		return ATA_CBL_PATA_UNK;
 	/* UDMA 100 or later */
 	pci_read_config_dword(pdev, 0x50, &ata66);
@@ -180,11 +184,15 @@ static int via_cable_detect(struct ata_port *ap) {
 	   two drives */
 	if (ata66 & (0x10100000 >> (16 * ap->port_no)))
 		return ATA_CBL_PATA80;
+	/* Check with ACPI so we can spot BIOS reported SATA bridges */
+	if (ata_acpi_cbl_80wire(ap))
+		return ATA_CBL_PATA80;
 	return ATA_CBL_PATA40;
 }
 
-static int via_pre_reset(struct ata_port *ap, unsigned long deadline)
+static int via_pre_reset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	const struct via_isa_bridge *config = ap->host->private_data;
 
 	if (!(config->flags & VIA_NO_ENABLES)) {
@@ -197,7 +205,7 @@ static int via_pre_reset(struct ata_port *ap, unsigned long deadline)
 			return -ENOENT;
 	}
 
-	return ata_std_prereset(ap, deadline);
+	return ata_std_prereset(link, deadline);
 }
 
 
@@ -240,7 +248,6 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
 	int ut;
 	int offset = 3 - (2*ap->port_no) - adev->devno;
 
-
 	/* Calculate the timing values we require */
 	ata_timing_compute(adev, mode, &t, T, UT);
 
@@ -287,9 +294,17 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
 			ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
 			break;
 	}
+
 	/* Set UDMA unless device is not UDMA capable */
-	if (udma_type)
-		pci_write_config_byte(pdev, 0x50 + offset, ut);
+	if (udma_type && t.udma) {
+		u8 cable80_status;
+
+		/* Get 80-wire cable detection bit */
+		pci_read_config_byte(pdev, 0x50 + offset, &cable80_status);
+		cable80_status &= 0x10;
+
+		pci_write_config_byte(pdev, 0x50 + offset, ut | cable80_status);
+	}
 }
 
 static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -333,7 +348,6 @@ static struct scsi_host_template via_sht = {
 };
 
 static struct ata_port_operations via_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= via_set_piomode,
 	.set_dmamode	= via_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -363,13 +377,11 @@ static struct ata_port_operations via_port_ops = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 static struct ata_port_operations via_port_ops_noirq = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= via_set_piomode,
 	.set_dmamode	= via_set_dmamode,
 	.mode_filter	= ata_pci_default_filter,
@@ -399,9 +411,8 @@ static struct ata_port_operations via_port_ops_noirq = {
 	.irq_handler	= ata_interrupt,
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -409,7 +420,7 @@ static struct ata_port_operations via_port_ops_noirq = {
  *	@pdev: PCI device
  *	@flags: configuration flags
  *
- *	Set the FIFO properties for this device if neccessary. Used both on
+ *	Set the FIFO properties for this device if necessary. Used both on
  *	set up and on and the resume path
  */
 
@@ -471,7 +482,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7,
+		.udma_mask = ATA_UDMA2,
 		.port_ops = &via_port_ops
 	};
 	/* VIA UDMA 66 devices */
@@ -480,7 +491,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x1f,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &via_port_ops
 	};
 	/* VIA UDMA 100 devices */
@@ -489,7 +500,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x3f,
+		.udma_mask = ATA_UDMA5,
 		.port_ops = &via_port_ops
 	};
 	/* UDMA133 with bad AST (All current 133) */
@@ -498,7 +509,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
-		.udma_mask = 0x7f,	/* FIXME: should check north bridge */
+		.udma_mask = ATA_UDMA6,	/* FIXME: should check north bridge */
 		.port_ops = &via_port_ops
 	};
 	struct ata_port_info type;
@@ -506,9 +517,9 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	struct pci_dev *isa = NULL;
 	const struct via_isa_bridge *config;
 	static int printed_version;
-	u8 t;
 	u8 enable;
 	u32 timing;
+	u8 isa_revision;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -520,9 +531,9 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 			!!(config->flags & VIA_BAD_ID),
 			config->id, NULL))) {
 
-			pci_read_config_byte(isa, PCI_REVISION_ID, &t);
-			if (t >= config->rev_min &&
-			    t <= config->rev_max)
+			pci_read_config_byte(pdev, PCI_REVISION_ID, &isa_revision);
+			if (isa_revision >= config->rev_min &&
+			    isa_revision <= config->rev_max)
 				break;
 			pci_dev_put(isa);
 		}
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 83abfec..311cdb3 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -94,7 +94,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
 
 static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
 {
-	struct ata_port *ap = adev->ap;
+	struct ata_port *ap = adev->link->ap;
 	int slop = buflen & 3;
 
 	if (ata_id_has_dword_io(adev->id)) {
@@ -138,7 +138,6 @@ static struct scsi_host_template winbond_sht = {
 };
 
 static struct ata_port_operations winbond_port_ops = {
-	.port_disable	= ata_port_disable,
 	.set_piomode	= winbond_set_piomode,
 
 	.tf_load	= ata_tf_load,
@@ -160,9 +159,8 @@ static struct ata_port_operations winbond_port_ops = {
 
 	.irq_clear	= ata_bmdma_irq_clear,
 	.irq_on		= ata_irq_on,
-	.irq_ack	= ata_irq_ack,
 
-	.port_start	= ata_port_start,
+	.port_start	= ata_sff_port_start,
 };
 
 /**
@@ -199,6 +197,7 @@ static __init int winbond_init_one(unsigned long port)
 
 	for (i = 0; i < 2 ; i ++) {
 		unsigned long cmd_port = 0x1F0 - (0x80 * i);
+		unsigned long ctl_port = cmd_port + 0x206;
 		struct ata_host *host;
 		struct ata_port *ap;
 		void __iomem *cmd_addr, *ctl_addr;
@@ -214,14 +213,16 @@ static __init int winbond_init_one(unsigned long port)
 		host = ata_host_alloc(&pdev->dev, 1);
 		if (!host)
 			goto err_unregister;
+		ap = host->ports[0];
 
 		rc = -ENOMEM;
 		cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
-		ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1);
+		ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
 		if (!cmd_addr || !ctl_addr)
 			goto err_unregister;
 
-		ap = host->ports[0];
+		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
+
 		ap->ops = &winbond_port_ops;
 		ap->pio_mask = 0x1F;
 		ap->flags |= ATA_FLAG_SLAVE_POSS;
@@ -278,7 +279,7 @@ static __init int winbond_init(void)
 
 			if (request_region(port, 2, "pata_winbond")) {
 				ret = winbond_init_one(port);
-				if(ret <= 0)
+				if (ret <= 0)
 					release_region(port, 2);
 				else ct+= ret;
 			}
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5152394..6b8032d 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -44,13 +44,13 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pdc_adma"
-#define DRV_VERSION	"0.06"
+#define DRV_VERSION	"1.0"
 
 /* macro to calculate base address for ATA regs */
-#define ADMA_ATA_REGS(base,port_no)	((base) + ((port_no) * 0x40))
+#define ADMA_ATA_REGS(base, port_no)	((base) + ((port_no) * 0x40))
 
 /* macro to calculate base address for ADMA regs */
-#define ADMA_REGS(base,port_no)		((base) + 0x80 + ((port_no) * 0x20))
+#define ADMA_REGS(base, port_no)	((base) + 0x80 + ((port_no) * 0x20))
 
 /* macro to obtain addresses from ata_port */
 #define ADMA_PORT_REGS(ap) \
@@ -92,6 +92,8 @@ enum {
 
 	/* CPB bits */
 	cDONE			= (1 << 0),
+	cATERR			= (1 << 3),
+
 	cVLD			= (1 << 0),
 	cDAT			= (1 << 2),
 	cIEN			= (1 << 3),
@@ -126,54 +128,54 @@ struct adma_port_priv {
 	adma_state_t		state;
 };
 
-static int adma_ata_init_one (struct pci_dev *pdev,
+static int adma_ata_init_one(struct pci_dev *pdev,
 				const struct pci_device_id *ent);
 static int adma_port_start(struct ata_port *ap);
 static void adma_host_stop(struct ata_host *host);
 static void adma_port_stop(struct ata_port *ap);
-static void adma_phy_reset(struct ata_port *ap);
 static void adma_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
 static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
 static void adma_bmdma_stop(struct ata_queued_cmd *qc);
 static u8 adma_bmdma_status(struct ata_port *ap);
 static void adma_irq_clear(struct ata_port *ap);
-static void adma_eng_timeout(struct ata_port *ap);
+static void adma_freeze(struct ata_port *ap);
+static void adma_thaw(struct ata_port *ap);
+static void adma_error_handler(struct ata_port *ap);
 
 static struct scsi_host_template adma_ata_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
 	.ioctl			= ata_scsi_ioctl,
 	.queuecommand		= ata_scsi_queuecmd,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+	.proc_name		= DRV_NAME,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= LIBATA_MAX_PRD,
+	.dma_boundary		= ADMA_DMA_BOUNDARY,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
-	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.proc_name		= DRV_NAME,
-	.dma_boundary		= ADMA_DMA_BOUNDARY,
-	.slave_configure	= ata_scsi_slave_config,
-	.slave_destroy		= ata_scsi_slave_destroy,
-	.bios_param		= ata_std_bios_param,
+	.emulated		= ATA_SHT_EMULATED,
 };
 
 static const struct ata_port_operations adma_ata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
-	.check_status		= ata_check_status,
-	.check_atapi_dma	= adma_check_atapi_dma,
 	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
 	.dev_select		= ata_std_dev_select,
-	.phy_reset		= adma_phy_reset,
+	.check_atapi_dma	= adma_check_atapi_dma,
+	.data_xfer		= ata_data_xfer,
 	.qc_prep		= adma_qc_prep,
 	.qc_issue		= adma_qc_issue,
-	.eng_timeout		= adma_eng_timeout,
-	.data_xfer		= ata_data_xfer,
+	.freeze			= adma_freeze,
+	.thaw			= adma_thaw,
+	.error_handler		= adma_error_handler,
 	.irq_clear		= adma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.port_start		= adma_port_start,
 	.port_stop		= adma_port_stop,
 	.host_stop		= adma_host_stop,
@@ -184,11 +186,11 @@ static const struct ata_port_operations adma_ata_ops = {
 static struct ata_port_info adma_port_info[] = {
 	/* board_1841_idx */
 	{
-		.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
+		.flags		= ATA_FLAG_SLAVE_POSS |
 				  ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
 				  ATA_FLAG_PIO_POLLING,
 		.pio_mask	= 0x10, /* pio4 */
-		.udma_mask	= 0x1f, /* udma0-4 */
+		.udma_mask	= ATA_UDMA4,
 		.port_ops	= &adma_ata_ops,
 	},
 };
@@ -273,24 +275,42 @@ static inline void adma_enter_reg_mode(struct ata_port *ap)
 	readb(chan + ADMA_STATUS);	/* flush */
 }
 
-static void adma_phy_reset(struct ata_port *ap)
+static void adma_freeze(struct ata_port *ap)
 {
-	struct adma_port_priv *pp = ap->private_data;
+	void __iomem *chan = ADMA_PORT_REGS(ap);
 
-	pp->state = adma_state_idle;
+	/* mask/clear ATA interrupts */
+	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
+	ata_check_status(ap);
+
+	/* reset ADMA to idle state */
+	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+	udelay(2);
+	writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
+	udelay(2);
+}
+
+static void adma_thaw(struct ata_port *ap)
+{
 	adma_reinit_engine(ap);
-	ata_port_probe(ap);
-	ata_bus_reset(ap);
 }
 
-static void adma_eng_timeout(struct ata_port *ap)
+static int adma_prereset(struct ata_link *link, unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	struct adma_port_priv *pp = ap->private_data;
 
 	if (pp->state != adma_state_idle) /* healthy paranoia */
 		pp->state = adma_state_mmio;
 	adma_reinit_engine(ap);
-	ata_eng_timeout(ap);
+
+	return ata_std_prereset(link, deadline);
+}
+
+static void adma_error_handler(struct ata_port *ap)
+{
+	ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL,
+		  ata_std_postreset);
 }
 
 static int adma_fill_sg(struct ata_queued_cmd *qc)
@@ -298,7 +318,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
 	struct scatterlist *sg;
 	struct ata_port *ap = qc->ap;
 	struct adma_port_priv *pp = ap->private_data;
-	u8  *buf = pp->pkt;
+	u8  *buf = pp->pkt, *last_buf = NULL;
 	int i = (2 + buf[3]) * 8;
 	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
 
@@ -314,20 +334,23 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
 		*(__le32 *)(buf + i) = cpu_to_le32(len);
 		i += 4;
 
-		if (ata_sg_is_last(sg, qc))
-			pFLAGS |= pEND;
+		last_buf = &buf[i];
 		buf[i++] = pFLAGS;
 		buf[i++] = qc->dev->dma_mode & 0xf;
 		buf[i++] = 0;	/* pPKLW */
 		buf[i++] = 0;	/* reserved */
 
-		*(__le32 *)(buf + i)
-			= (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
+		*(__le32 *)(buf + i) =
+			(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
 		i += 4;
 
 		VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
 					(unsigned long)addr, len);
 	}
+
+	if (likely(last_buf))
+		*last_buf |= pEND;
+
 	return i;
 }
 
@@ -464,14 +487,33 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
 		pp = ap->private_data;
 		if (!pp || pp->state != adma_state_pkt)
 			continue;
-		qc = ata_qc_from_tag(ap, ap->active_tag);
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-			if ((status & (aPERR | aPSD | aUIRQ)))
+			if (status & aPERR)
+				qc->err_mask |= AC_ERR_HOST_BUS;
+			else if ((status & (aPSD | aUIRQ)))
 				qc->err_mask |= AC_ERR_OTHER;
+
+			if (pp->pkt[0] & cATERR)
+				qc->err_mask |= AC_ERR_DEV;
 			else if (pp->pkt[0] != cDONE)
 				qc->err_mask |= AC_ERR_OTHER;
 
-			ata_qc_complete(qc);
+			if (!qc->err_mask)
+				ata_qc_complete(qc);
+			else {
+				struct ata_eh_info *ehi = &ap->link.eh_info;
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi,
+					"ADMA-status 0x%02X", status);
+				ata_ehi_push_desc(ehi,
+					"pkt[0] 0x%02X", pp->pkt[0]);
+
+				if (qc->err_mask == AC_ERR_DEV)
+					ata_port_abort(ap);
+				else
+					ata_port_freeze(ap);
+			}
 		}
 	}
 	return handled;
@@ -489,7 +531,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
 			struct adma_port_priv *pp = ap->private_data;
 			if (!pp || pp->state != adma_state_mmio)
 				continue;
-			qc = ata_qc_from_tag(ap, ap->active_tag);
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
 
 				/* check main status, clearing INTRQ */
@@ -502,7 +544,20 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
 				/* complete taskfile transaction */
 				pp->state = adma_state_idle;
 				qc->err_mask |= ac_err_mask(status);
-				ata_qc_complete(qc);
+				if (!qc->err_mask)
+					ata_qc_complete(qc);
+				else {
+					struct ata_eh_info *ehi =
+						&ap->link.eh_info;
+					ata_ehi_clear_desc(ehi);
+					ata_ehi_push_desc(ehi,
+						"status 0x%02X", status);
+
+					if (qc->err_mask == AC_ERR_DEV)
+						ata_port_abort(ap);
+					else
+						ata_port_freeze(ap);
+				}
 				handled = 1;
 			}
 		}
@@ -562,7 +617,7 @@ static int adma_port_start(struct ata_port *ap)
 		return -ENOMEM;
 	/* paranoia? */
 	if ((pp->pkt_dma & 7) != 0) {
-		printk("bad alignment for pp->pkt_dma: %08x\n",
+		printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
 						(u32)pp->pkt_dma);
 		return -ENOMEM;
 	}
@@ -652,9 +707,16 @@ static int adma_ata_init_one(struct pci_dev *pdev,
 	if (rc)
 		return rc;
 
-	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
-		adma_ata_setup_port(&host->ports[port_no]->ioaddr,
-				    ADMA_ATA_REGS(mmio_base, port_no));
+	for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
+		struct ata_port *ap = host->ports[port_no];
+		void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
+		unsigned int offset = port_base - mmio_base;
+
+		adma_ata_setup_port(&ap->ioaddr, port_base);
+
+		ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
+	}
 
 	/* initialize adapter */
 	adma_host_init(host, board_idx);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
new file mode 100644
index 0000000..91954fa
--- /dev/null
+++ b/drivers/ata/sata_fsl.c
@@ -0,0 +1,1395 @@
+/*
+ * drivers/ata/sata_fsl.c
+ *
+ * Freescale 3.0Gbps SATA device driver
+ *
+ * Author: Ashish Kalra <ashish.kalra@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Copyright (c) 2006-2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+#include <linux/of_platform.h>
+
+/* Controller information */
+enum {
+	SATA_FSL_QUEUE_DEPTH	= 16,
+	SATA_FSL_MAX_PRD	= 63,
+	SATA_FSL_MAX_PRD_USABLE	= SATA_FSL_MAX_PRD - 1,
+	SATA_FSL_MAX_PRD_DIRECT	= 16,	/* Direct PRDT entries */
+
+	SATA_FSL_HOST_FLAGS	= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+				ATA_FLAG_NCQ),
+	SATA_FSL_HOST_LFLAGS	= ATA_LFLAG_SKIP_D2H_BSY,
+
+	SATA_FSL_MAX_CMDS	= SATA_FSL_QUEUE_DEPTH,
+	SATA_FSL_CMD_HDR_SIZE	= 16,	/* 4 DWORDS */
+	SATA_FSL_CMD_SLOT_SIZE  = (SATA_FSL_MAX_CMDS * SATA_FSL_CMD_HDR_SIZE),
+
+	/*
+	 * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
+	 * chained indirect PRDEs upto a max count of 63.
+	 * We are allocating an array of 63 PRDEs contigiously, but PRDE#15 will
+	 * be setup as an indirect descriptor, pointing to it's next
+	 * (contigious) PRDE. Though chained indirect PRDE arrays are
+	 * supported,it will be more efficient to use a direct PRDT and
+	 * a single chain/link to indirect PRDE array/PRDT.
+	 */
+
+	SATA_FSL_CMD_DESC_CFIS_SZ	= 32,
+	SATA_FSL_CMD_DESC_SFIS_SZ	= 32,
+	SATA_FSL_CMD_DESC_ACMD_SZ	= 16,
+	SATA_FSL_CMD_DESC_RSRVD		= 16,
+
+	SATA_FSL_CMD_DESC_SIZE	= (SATA_FSL_CMD_DESC_CFIS_SZ +
+				 SATA_FSL_CMD_DESC_SFIS_SZ +
+				 SATA_FSL_CMD_DESC_ACMD_SZ +
+				 SATA_FSL_CMD_DESC_RSRVD +
+				 SATA_FSL_MAX_PRD * 16),
+
+	SATA_FSL_CMD_DESC_OFFSET_TO_PRDT	=
+				(SATA_FSL_CMD_DESC_CFIS_SZ +
+				 SATA_FSL_CMD_DESC_SFIS_SZ +
+				 SATA_FSL_CMD_DESC_ACMD_SZ +
+				 SATA_FSL_CMD_DESC_RSRVD),
+
+	SATA_FSL_CMD_DESC_AR_SZ	= (SATA_FSL_CMD_DESC_SIZE * SATA_FSL_MAX_CMDS),
+	SATA_FSL_PORT_PRIV_DMA_SZ = (SATA_FSL_CMD_SLOT_SIZE +
+					SATA_FSL_CMD_DESC_AR_SZ),
+
+	/*
+	 * MPC8315 has two SATA controllers, SATA1 & SATA2
+	 * (one port per controller)
+	 * MPC837x has 2/4 controllers, one port per controller
+	 */
+
+	SATA_FSL_MAX_PORTS	= 1,
+
+	SATA_FSL_IRQ_FLAG	= IRQF_SHARED,
+};
+
+/*
+* Host Controller command register set - per port
+*/
+enum {
+	CQ = 0,
+	CA = 8,
+	CC = 0x10,
+	CE = 0x18,
+	DE = 0x20,
+	CHBA = 0x24,
+	HSTATUS = 0x28,
+	HCONTROL = 0x2C,
+	CQPMP = 0x30,
+	SIGNATURE = 0x34,
+	ICC = 0x38,
+
+	/*
+	 * Host Status Register (HStatus) bitdefs
+	 */
+	ONLINE = (1 << 31),
+	GOING_OFFLINE = (1 << 30),
+	BIST_ERR = (1 << 29),
+
+	FATAL_ERR_HC_MASTER_ERR = (1 << 18),
+	FATAL_ERR_PARITY_ERR_TX = (1 << 17),
+	FATAL_ERR_PARITY_ERR_RX = (1 << 16),
+	FATAL_ERR_DATA_UNDERRUN = (1 << 13),
+	FATAL_ERR_DATA_OVERRUN = (1 << 12),
+	FATAL_ERR_CRC_ERR_TX = (1 << 11),
+	FATAL_ERR_CRC_ERR_RX = (1 << 10),
+	FATAL_ERR_FIFO_OVRFL_TX = (1 << 9),
+	FATAL_ERR_FIFO_OVRFL_RX = (1 << 8),
+
+	FATAL_ERROR_DECODE = FATAL_ERR_HC_MASTER_ERR |
+	    FATAL_ERR_PARITY_ERR_TX |
+	    FATAL_ERR_PARITY_ERR_RX |
+	    FATAL_ERR_DATA_UNDERRUN |
+	    FATAL_ERR_DATA_OVERRUN |
+	    FATAL_ERR_CRC_ERR_TX |
+	    FATAL_ERR_CRC_ERR_RX |
+	    FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX,
+
+	INT_ON_FATAL_ERR = (1 << 5),
+	INT_ON_PHYRDY_CHG = (1 << 4),
+
+	INT_ON_SIGNATURE_UPDATE = (1 << 3),
+	INT_ON_SNOTIFY_UPDATE = (1 << 2),
+	INT_ON_SINGL_DEVICE_ERR = (1 << 1),
+	INT_ON_CMD_COMPLETE = 1,
+
+	INT_ON_ERROR = INT_ON_FATAL_ERR |
+	    INT_ON_PHYRDY_CHG | INT_ON_SINGL_DEVICE_ERR,
+
+	/*
+	 * Host Control Register (HControl) bitdefs
+	 */
+	HCONTROL_ONLINE_PHY_RST = (1 << 31),
+	HCONTROL_FORCE_OFFLINE = (1 << 30),
+	HCONTROL_PARITY_PROT_MOD = (1 << 14),
+	HCONTROL_DPATH_PARITY = (1 << 12),
+	HCONTROL_SNOOP_ENABLE = (1 << 10),
+	HCONTROL_PMP_ATTACHED = (1 << 9),
+	HCONTROL_COPYOUT_STATFIS = (1 << 8),
+	IE_ON_FATAL_ERR = (1 << 5),
+	IE_ON_PHYRDY_CHG = (1 << 4),
+	IE_ON_SIGNATURE_UPDATE = (1 << 3),
+	IE_ON_SNOTIFY_UPDATE = (1 << 2),
+	IE_ON_SINGL_DEVICE_ERR = (1 << 1),
+	IE_ON_CMD_COMPLETE = 1,
+
+	DEFAULT_PORT_IRQ_ENABLE_MASK = IE_ON_FATAL_ERR | IE_ON_PHYRDY_CHG |
+	    IE_ON_SIGNATURE_UPDATE |
+	    IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
+
+	EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
+	DATA_SNOOP_ENABLE = (1 << 22),
+};
+
+/*
+ * SATA Superset Registers
+ */
+enum {
+	SSTATUS = 0,
+	SERROR = 4,
+	SCONTROL = 8,
+	SNOTIFY = 0xC,
+};
+
+/*
+ * Control Status Register Set
+ */
+enum {
+	TRANSCFG = 0,
+	TRANSSTATUS = 4,
+	LINKCFG = 8,
+	LINKCFG1 = 0xC,
+	LINKCFG2 = 0x10,
+	LINKSTATUS = 0x14,
+	LINKSTATUS1 = 0x18,
+	PHYCTRLCFG = 0x1C,
+	COMMANDSTAT = 0x20,
+};
+
+/* PHY (link-layer) configuration control */
+enum {
+	PHY_BIST_ENABLE = 0x01,
+};
+
+/*
+ * Command Header Table entry, i.e, command slot
+ * 4 Dwords per command slot, command header size ==  64 Dwords.
+ */
+struct cmdhdr_tbl_entry {
+	u32 cda;
+	u32 prde_fis_len;
+	u32 ttl;
+	u32 desc_info;
+};
+
+/*
+ * Description information bitdefs
+ */
+enum {
+	VENDOR_SPECIFIC_BIST = (1 << 10),
+	CMD_DESC_SNOOP_ENABLE = (1 << 9),
+	FPDMA_QUEUED_CMD = (1 << 8),
+	SRST_CMD = (1 << 7),
+	BIST = (1 << 6),
+	ATAPI_CMD = (1 << 5),
+};
+
+/*
+ * Command Descriptor
+ */
+struct command_desc {
+	u8 cfis[8 * 4];
+	u8 sfis[8 * 4];
+	u8 acmd[4 * 4];
+	u8 fill[4 * 4];
+	u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
+	u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
+};
+
+/*
+ * Physical region table descriptor(PRD)
+ */
+
+struct prde {
+	u32 dba;
+	u8 fill[2 * 4];
+	u32 ddc_and_ext;
+};
+
+/*
+ * ata_port private data
+ * This is our per-port instance data.
+ */
+struct sata_fsl_port_priv {
+	struct cmdhdr_tbl_entry *cmdslot;
+	dma_addr_t cmdslot_paddr;
+	struct command_desc *cmdentry;
+	dma_addr_t cmdentry_paddr;
+
+	/*
+	 * SATA FSL controller has a Status FIS which should contain the
+	 * received D2H FIS & taskfile registers. This SFIS is present in
+	 * the command descriptor, and to have a ready reference to it,
+	 * we are caching it here, quite similar to what is done in H/W on
+	 * AHCI compliant devices by copying taskfile fields to a 32-bit
+	 * register.
+	 */
+
+	struct ata_taskfile tf;
+};
+
+/*
+ * ata_port->host_set private data
+ */
+struct sata_fsl_host_priv {
+	void __iomem *hcr_base;
+	void __iomem *ssr_base;
+	void __iomem *csr_base;
+	int irq;
+};
+
+static inline unsigned int sata_fsl_tag(unsigned int tag,
+					void __iomem *hcr_base)
+{
+	/* We let libATA core do actual (queue) tag allocation */
+
+	/* all non NCQ/queued commands should have tag#0 */
+	if (ata_tag_internal(tag)) {
+		DPRINTK("mapping internal cmds to tag#0\n");
+		return 0;
+	}
+
+	if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
+		DPRINTK("tag %d invalid : out of range\n", tag);
+		return 0;
+	}
+
+	if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
+		DPRINTK("tag %d invalid : in use!!\n", tag);
+		return 0;
+	}
+
+	return tag;
+}
+
+static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
+					 unsigned int tag, u32 desc_info,
+					 u32 data_xfer_len, u8 num_prde,
+					 u8 fis_len)
+{
+	dma_addr_t cmd_descriptor_address;
+
+	cmd_descriptor_address = pp->cmdentry_paddr +
+	    tag * SATA_FSL_CMD_DESC_SIZE;
+
+	/* NOTE: both data_xfer_len & fis_len are Dword counts */
+
+	pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address);
+	pp->cmdslot[tag].prde_fis_len =
+	    cpu_to_le32((num_prde << 16) | (fis_len << 2));
+	pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
+	pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
+
+	VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
+		pp->cmdslot[tag].cda,
+		pp->cmdslot[tag].prde_fis_len,
+		pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info);
+
+}
+
+static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
+				     u32 *ttl, dma_addr_t cmd_desc_paddr)
+{
+	struct scatterlist *sg;
+	unsigned int num_prde = 0;
+	u32 ttl_dwords = 0;
+
+	/*
+	 * NOTE : direct & indirect prdt's are contigiously allocated
+	 */
+	struct prde *prd = (struct prde *)&((struct command_desc *)
+					    cmd_desc)->prdt;
+
+	struct prde *prd_ptr_to_indirect_ext = NULL;
+	unsigned indirect_ext_segment_sz = 0;
+	dma_addr_t indirect_ext_segment_paddr;
+
+	VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd);
+
+	indirect_ext_segment_paddr = cmd_desc_paddr +
+	    SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
+
+	ata_for_each_sg(sg, qc) {
+		dma_addr_t sg_addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%x, sg_len = %d\n",
+			sg_addr, sg_len);
+
+		/* warn if each s/g element is not dword aligned */
+		if (sg_addr & 0x03)
+			ata_port_printk(qc->ap, KERN_ERR,
+					"s/g addr unaligned : 0x%x\n", sg_addr);
+		if (sg_len & 0x03)
+			ata_port_printk(qc->ap, KERN_ERR,
+					"s/g len unaligned : 0x%x\n", sg_len);
+
+		if ((num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1)) &&
+		    (qc->n_iter + 1 != qc->n_elem)) {
+			VPRINTK("setting indirect prde\n");
+			prd_ptr_to_indirect_ext = prd;
+			prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
+			indirect_ext_segment_sz = 0;
+			++prd;
+			++num_prde;
+		}
+
+		ttl_dwords += sg_len;
+		prd->dba = cpu_to_le32(sg_addr);
+		prd->ddc_and_ext =
+		    cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03));
+
+		VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
+			ttl_dwords, prd->dba, prd->ddc_and_ext);
+
+		++num_prde;
+		++prd;
+		if (prd_ptr_to_indirect_ext)
+			indirect_ext_segment_sz += sg_len;
+	}
+
+	if (prd_ptr_to_indirect_ext) {
+		/* set indirect extension flag along with indirect ext. size */
+		prd_ptr_to_indirect_ext->ddc_and_ext =
+		    cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
+				 DATA_SNOOP_ENABLE |
+				 (indirect_ext_segment_sz & ~0x03)));
+	}
+
+	*ttl = ttl_dwords;
+	return num_prde;
+}
+
+static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+	struct command_desc *cd;
+	u32 desc_info = CMD_DESC_SNOOP_ENABLE;
+	u32 num_prde = 0;
+	u32 ttl_dwords = 0;
+	dma_addr_t cd_paddr;
+
+	cd = (struct command_desc *)pp->cmdentry + tag;
+	cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE;
+
+	ata_tf_to_fis(&qc->tf, 0, 1, (u8 *) &cd->cfis);
+
+	VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
+		cd->cfis[0], cd->cfis[1], cd->cfis[2]);
+
+	if (qc->tf.protocol == ATA_PROT_NCQ) {
+		VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n",
+			cd->cfis[3], cd->cfis[11]);
+	}
+
+	/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
+	if (is_atapi_taskfile(&qc->tf)) {
+		desc_info |= ATAPI_CMD;
+		memset((void *)&cd->acmd, 0, 32);
+		memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+	}
+
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		num_prde = sata_fsl_fill_sg(qc, (void *)cd,
+					    &ttl_dwords, cd_paddr);
+
+	if (qc->tf.protocol == ATA_PROT_NCQ)
+		desc_info |= FPDMA_QUEUED_CMD;
+
+	sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords,
+				     num_prde, 5);
+
+	VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+		desc_info, ttl_dwords, num_prde);
+}
+
+static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+
+	VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base),
+		ioread32(CE + hcr_base), ioread32(CC + hcr_base));
+
+	/* Simply queue command to the controller/device */
+	iowrite32(1 << tag, CQ + hcr_base);
+
+	VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n",
+		tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
+
+	VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
+		ioread32(CE + hcr_base),
+		ioread32(DE + hcr_base),
+		ioread32(CC + hcr_base), ioread32(COMMANDSTAT + csr_base));
+
+	return 0;
+}
+
+static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
+			       u32 val)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *ssr_base = host_priv->ssr_base;
+	unsigned int sc_reg;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_ERROR:
+	case SCR_CONTROL:
+	case SCR_ACTIVE:
+		sc_reg = sc_reg_in;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg);
+
+	iowrite32(val, ssr_base + (sc_reg * 4));
+	return 0;
+}
+
+static int sata_fsl_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
+			u32 *val)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *ssr_base = host_priv->ssr_base;
+	unsigned int sc_reg;
+
+	switch (sc_reg_in) {
+	case SCR_STATUS:
+	case SCR_ERROR:
+	case SCR_CONTROL:
+	case SCR_ACTIVE:
+		sc_reg = sc_reg_in;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg);
+
+	*val = ioread32(ssr_base + (sc_reg * 4));
+	return 0;
+}
+
+static void sata_fsl_freeze(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base),
+		ioread32(CE + hcr_base), ioread32(DE + hcr_base));
+	VPRINTK("CmdStat = 0x%x\n", ioread32(csr_base + COMMANDSTAT));
+
+	/* disable interrupts on the controller/port */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+	VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n",
+		ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+static void sata_fsl_thaw(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/* ack. any pending IRQs for this controller/port */
+	temp = ioread32(hcr_base + HSTATUS);
+
+	VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F));
+
+	if (temp & 0x3F)
+		iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+	/* enable interrupts on the controller/port */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
+
+	VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n",
+		ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+/*
+ * NOTE : 1st D2H FIS from device does not update sfis in command descriptor.
+ */
+static inline void sata_fsl_cache_taskfile_from_d2h_fis(struct ata_queued_cmd
+							*qc,
+							struct ata_port *ap)
+{
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+	struct command_desc *cd;
+
+	cd = pp->cmdentry + tag;
+
+	ata_tf_from_fis(cd->sfis, &pp->tf);
+}
+
+static u8 sata_fsl_check_status(struct ata_port *ap)
+{
+	struct sata_fsl_port_priv *pp = ap->private_data;
+
+	return pp->tf.command;
+}
+
+static void sata_fsl_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct sata_fsl_port_priv *pp = ap->private_data;
+
+	*tf = pp->tf;
+}
+
+static int sata_fsl_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct sata_fsl_port_priv *pp;
+	int retval;
+	void *mem;
+	dma_addr_t mem_dma;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	/*
+	 * allocate per command dma alignment pad buffer, which is used
+	 * internally by libATA to ensure that all transfers ending on
+	 * unaligned boundaries are padded, to align on Dword boundaries
+	 */
+	retval = ata_pad_alloc(ap, dev);
+	if (retval) {
+		kfree(pp);
+		return retval;
+	}
+
+	mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+				 GFP_KERNEL);
+	if (!mem) {
+		ata_pad_free(ap, dev);
+		kfree(pp);
+		return -ENOMEM;
+	}
+	memset(mem, 0, SATA_FSL_PORT_PRIV_DMA_SZ);
+
+	pp->cmdslot = mem;
+	pp->cmdslot_paddr = mem_dma;
+
+	mem += SATA_FSL_CMD_SLOT_SIZE;
+	mem_dma += SATA_FSL_CMD_SLOT_SIZE;
+
+	pp->cmdentry = mem;
+	pp->cmdentry_paddr = mem_dma;
+
+	ap->private_data = pp;
+
+	VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n",
+		pp->cmdslot_paddr, pp->cmdentry_paddr);
+
+	/* Now, update the CHBA register in host controller cmd register set */
+	iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+	/*
+	 * Now, we can bring the controller on-line & also initiate
+	 * the COMINIT sequence, we simply return here and the boot-probing
+	 * & device discovery process is re-initiated by libATA using a
+	 * Softreset EH (dummy) session. Hence, boot probing and device
+	 * discovey will be part of sata_fsl_softreset() callback.
+	 */
+
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
+
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+	VPRINTK("CHBA  = 0x%x\n", ioread32(hcr_base + CHBA));
+
+#ifdef CONFIG_MPC8315_DS
+	/*
+	 * Workaround for 8315DS board 3gbps link-up issue,
+	 * currently limit SATA port to GEN1 speed
+	 */
+	sata_fsl_scr_read(ap, SCR_CONTROL, &temp);
+	temp &= ~(0xF << 4);
+	temp |= (0x1 << 4);
+	sata_fsl_scr_write(ap, SCR_CONTROL, temp);
+
+	sata_fsl_scr_read(ap, SCR_CONTROL, &temp);
+	dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
+			temp);
+#endif
+
+	return 0;
+}
+
+static void sata_fsl_port_stop(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/*
+	 * Force host controller to go off-line, aborting current operations
+	 */
+	temp = ioread32(hcr_base + HCONTROL);
+	temp &= ~HCONTROL_ONLINE_PHY_RST;
+	temp |= HCONTROL_FORCE_OFFLINE;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	/* Poll for controller to go offline - should happen immediately */
+	ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1);
+
+	ap->private_data = NULL;
+	dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
+			  pp->cmdslot, pp->cmdslot_paddr);
+
+	ata_pad_free(ap, dev);
+	kfree(pp);
+}
+
+static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
+{
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	struct ata_taskfile tf;
+	u32 temp;
+
+	temp = ioread32(hcr_base + SIGNATURE);
+
+	VPRINTK("raw sig = 0x%x\n", temp);
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	tf.lbah = (temp >> 24) & 0xff;
+	tf.lbam = (temp >> 16) & 0xff;
+	tf.lbal = (temp >> 8) & 0xff;
+	tf.nsect = temp & 0xff;
+
+	return ata_dev_classify(&tf);
+}
+
+static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
+			      unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct sata_fsl_port_priv *pp = ap->private_data;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+	struct ata_taskfile tf;
+	u8 *cfis;
+	u32 Serror;
+	int i = 0;
+	unsigned long start_jiffies;
+
+	DPRINTK("in xx_softreset\n");
+
+try_offline_again:
+	/*
+	 * Force host controller to go off-line, aborting current operations
+	 */
+	temp = ioread32(hcr_base + HCONTROL);
+	temp &= ~HCONTROL_ONLINE_PHY_RST;
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	/* Poll for controller to go offline */
+	temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 500);
+
+	if (temp & ONLINE) {
+		ata_port_printk(ap, KERN_ERR,
+				"Softreset failed, not off-lined %d\n", i);
+
+		/*
+		 * Try to offline controller atleast twice
+		 */
+		i++;
+		if (i == 2)
+			goto err;
+		else
+			goto try_offline_again;
+	}
+
+	DPRINTK("softreset, controller off-lined\n");
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	/*
+	 * PHY reset should remain asserted for atleast 1ms
+	 */
+	msleep(1);
+
+	/*
+	 * Now, bring the host controller online again, this can take time
+	 * as PHY reset and communication establishment, 1st D2H FIS and
+	 * device signature update is done, on safe side assume 500ms
+	 * NOTE : Host online status may be indicated immediately!!
+	 */
+
+	temp = ioread32(hcr_base + HCONTROL);
+	temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE);
+	iowrite32(temp, hcr_base + HCONTROL);
+
+	temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500);
+
+	if (!(temp & ONLINE)) {
+		ata_port_printk(ap, KERN_ERR,
+				"Softreset failed, not on-lined\n");
+		goto err;
+	}
+
+	DPRINTK("softreset, controller off-lined & on-lined\n");
+	VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	/*
+	 * First, wait for the PHYRDY change to occur before waiting for
+	 * the signature, and also verify if SStatus indicates device
+	 * presence
+	 */
+
+	temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0, 1, 500);
+	if ((!(temp & 0x10)) || ata_link_offline(link)) {
+		ata_port_printk(ap, KERN_WARNING,
+				"No Device OR PHYRDY change,Hstatus = 0x%x\n",
+				ioread32(hcr_base + HSTATUS));
+		goto err;
+	}
+
+	/*
+	 * Wait for the first D2H from device,i.e,signature update notification
+	 */
+	start_jiffies = jiffies;
+	temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0x10,
+			500, jiffies_to_msecs(deadline - start_jiffies));
+
+	if ((temp & 0xFF) != 0x18) {
+		ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
+		goto err;
+	} else {
+		ata_port_printk(ap, KERN_INFO,
+				"Signature Update detected @ %d msecs\n",
+				jiffies_to_msecs(jiffies - start_jiffies));
+	}
+
+	/*
+	 * Send a device reset (SRST) explicitly on command slot #0
+	 * Check : will the command queue (reg) be cleared during offlining ??
+	 * Also we will be online only if Phy commn. has been established
+	 * and device presence has been detected, therefore if we have
+	 * reached here, we can send a command to the target device
+	 */
+
+	DPRINTK("Sending SRST/device reset\n");
+
+	ata_tf_init(link->device, &tf);
+	cfis = (u8 *) &pp->cmdentry->cfis;
+
+	/* device reset/SRST is a control register update FIS, uses tag0 */
+	sata_fsl_setup_cmd_hdr_entry(pp, 0,
+				     SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
+
+	tf.ctl |= ATA_SRST;	/* setup SRST bit in taskfile control reg */
+	ata_tf_to_fis(&tf, 0, 0, cfis);
+
+	DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
+		cfis[0], cfis[1], cfis[2], cfis[3]);
+
+	/*
+	 * Queue SRST command to the controller/device, ensure that no
+	 * other commands are active on the controller/device
+	 */
+
+	DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
+		ioread32(CQ + hcr_base),
+		ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+	iowrite32(0xFFFF, CC + hcr_base);
+	iowrite32(1, CQ + hcr_base);
+
+	temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
+	if (temp & 0x1) {
+		ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n");
+
+		DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
+			ioread32(CQ + hcr_base),
+			ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+		sata_fsl_scr_read(ap, SCR_ERROR, &Serror);
+
+		DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+		DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+		DPRINTK("Serror = 0x%x\n", Serror);
+		goto err;
+	}
+
+	msleep(1);
+
+	/*
+	 * SATA device enters reset state after receving a Control register
+	 * FIS with SRST bit asserted and it awaits another H2D Control reg.
+	 * FIS with SRST bit cleared, then the device does internal diags &
+	 * initialization, followed by indicating it's initialization status
+	 * using ATA signature D2H register FIS to the host controller.
+	 */
+
+	sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
+
+	tf.ctl &= ~ATA_SRST;	/* 2nd H2D Ctl. register FIS */
+	ata_tf_to_fis(&tf, 0, 0, cfis);
+
+	iowrite32(1, CQ + hcr_base);
+	msleep(150);		/* ?? */
+
+	/*
+	 * The above command would have signalled an interrupt on command
+	 * complete, which needs special handling, by clearing the Nth
+	 * command bit of the CCreg
+	 */
+	iowrite32(0x01, CC + hcr_base);	/* We know it will be cmd#0 always */
+
+	DPRINTK("SATA FSL : Now checking device signature\n");
+
+	*class = ATA_DEV_NONE;
+
+	/* Verify if SStatus indicates device presence */
+	if (ata_link_online(link)) {
+		/*
+		 * if we are here, device presence has been detected,
+		 * 1st D2H FIS would have been received, but sfis in
+		 * command desc. is not updated, but signature register
+		 * would have been updated
+		 */
+
+		*class = sata_fsl_dev_classify(ap);
+
+		DPRINTK("class = %d\n", *class);
+		VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC));
+		VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
+	}
+
+	return 0;
+
+err:
+	return -EIO;
+}
+
+static void sata_fsl_error_handler(struct ata_port *ap)
+{
+
+	DPRINTK("in xx_error_handler\n");
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, sata_fsl_softreset, sata_std_hardreset,
+		  ata_std_postreset);
+}
+
+static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+	if (qc->flags & ATA_QCFLAG_FAILED)
+		qc->err_mask |= AC_ERR_OTHER;
+
+	if (qc->err_mask) {
+		/* make DMA engine forget about the failed command */
+
+	}
+}
+
+static void sata_fsl_irq_clear(struct ata_port *ap)
+{
+	/* unused */
+}
+
+static void sata_fsl_error_intr(struct ata_port *ap)
+{
+	struct ata_link *link = &ap->link;
+	struct ata_eh_info *ehi = &link->eh_info;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 hstatus, dereg, cereg = 0, SError = 0;
+	unsigned int err_mask = 0, action = 0;
+	struct ata_queued_cmd *qc;
+	int freeze = 0;
+
+	hstatus = ioread32(hcr_base + HSTATUS);
+	cereg = ioread32(hcr_base + CE);
+
+	ata_ehi_clear_desc(ehi);
+
+	/*
+	 * Handle & Clear SError
+	 */
+
+	sata_fsl_scr_read(ap, SCR_ERROR, &SError);
+	if (unlikely(SError & 0xFFFF0000)) {
+		sata_fsl_scr_write(ap, SCR_ERROR, SError);
+		err_mask |= AC_ERR_ATA_BUS;
+	}
+
+	DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
+		hstatus, cereg, ioread32(hcr_base + DE), SError);
+
+	/* handle single device errors */
+	if (cereg) {
+		/*
+		 * clear the command error, also clears queue to the device
+		 * in error, and we can (re)issue commands to this device.
+		 * When a device is in error all commands queued into the
+		 * host controller and at the device are considered aborted
+		 * and the queue for that device is stopped. Now, after
+		 * clearing the device error, we can issue commands to the
+		 * device to interrogate it to find the source of the error.
+		 */
+		dereg = ioread32(hcr_base + DE);
+		iowrite32(dereg, hcr_base + DE);
+		iowrite32(cereg, hcr_base + CE);
+
+		DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
+			ioread32(hcr_base + CE), ioread32(hcr_base + DE));
+		/*
+		 * We should consider this as non fatal error, and TF must
+		 * be updated as done below.
+		 */
+
+		err_mask |= AC_ERR_DEV;
+	}
+
+	/* handle fatal errors */
+	if (hstatus & FATAL_ERROR_DECODE) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_SOFTRESET;
+		/* how will fatal error interrupts be completed ?? */
+		freeze = 1;
+	}
+
+	/* Handle PHYRDY change notification */
+	if (hstatus & INT_ON_PHYRDY_CHG) {
+		DPRINTK("SATA FSL: PHYRDY change indication\n");
+
+		/* Setup a soft-reset EH action */
+		ata_ehi_hotplugged(ehi);
+		freeze = 1;
+	}
+
+	/* record error info */
+	qc = ata_qc_from_tag(ap, link->active_tag);
+
+	if (qc) {
+		sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap);
+		qc->err_mask |= err_mask;
+	} else
+		ehi->err_mask |= err_mask;
+
+	ehi->action |= action;
+	ehi->serror |= SError;
+
+	/* freeze or abort */
+	if (freeze)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void sata_fsl_qc_complete(struct ata_queued_cmd *qc)
+{
+	if (qc->flags & ATA_QCFLAG_RESULT_TF) {
+		DPRINTK("xx_qc_complete called\n");
+		sata_fsl_cache_taskfile_from_d2h_fis(qc, qc->ap);
+	}
+}
+
+static void sata_fsl_host_intr(struct ata_port *ap)
+{
+	struct ata_link *link = &ap->link;
+	struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 hstatus, qc_active = 0;
+	struct ata_queued_cmd *qc;
+	u32 SError;
+
+	hstatus = ioread32(hcr_base + HSTATUS);
+
+	sata_fsl_scr_read(ap, SCR_ERROR, &SError);
+
+	if (unlikely(SError & 0xFFFF0000)) {
+		DPRINTK("serror @host_intr : 0x%x\n", SError);
+		sata_fsl_error_intr(ap);
+
+	}
+
+	if (unlikely(hstatus & INT_ON_ERROR)) {
+		DPRINTK("error interrupt!!\n");
+		sata_fsl_error_intr(ap);
+		return;
+	}
+
+	if (link->sactive) {	/* only true for NCQ commands */
+		int i;
+		/* Read command completed register */
+		qc_active = ioread32(hcr_base + CC);
+		/* clear CC bit, this will also complete the interrupt */
+		iowrite32(qc_active, hcr_base + CC);
+
+		DPRINTK("Status of all queues :\n");
+		DPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+			qc_active, ioread32(hcr_base + CA),
+			ioread32(hcr_base + CE));
+
+		for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
+			if (qc_active & (1 << i)) {
+				qc = ata_qc_from_tag(ap, i);
+				if (qc) {
+					sata_fsl_qc_complete(qc);
+					ata_qc_complete(qc);
+				}
+				DPRINTK
+				    ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
+				     i, ioread32(hcr_base + CC),
+				     ioread32(hcr_base + CA));
+			}
+		}
+		return;
+
+	} else if (ap->qc_active) {
+		iowrite32(1, hcr_base + CC);
+		qc = ata_qc_from_tag(ap, link->active_tag);
+
+		DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n",
+			link->active_tag, ioread32(hcr_base + CC));
+
+		if (qc) {
+			sata_fsl_qc_complete(qc);
+			ata_qc_complete(qc);
+		}
+	} else {
+		/* Spurious Interrupt!! */
+		DPRINTK("spurious interrupt!!, CC = 0x%x\n",
+			ioread32(hcr_base + CC));
+		return;
+	}
+}
+
+static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
+{
+	struct ata_host *host = dev_instance;
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 interrupt_enables;
+	unsigned handled = 0;
+	struct ata_port *ap;
+
+	/* ack. any pending IRQs for this controller/port */
+	interrupt_enables = ioread32(hcr_base + HSTATUS);
+	interrupt_enables &= 0x3F;
+
+	DPRINTK("interrupt status 0x%x\n", interrupt_enables);
+
+	if (!interrupt_enables)
+		return IRQ_NONE;
+
+	spin_lock(&host->lock);
+
+	/* Assuming one port per host controller */
+
+	ap = host->ports[0];
+	if (ap) {
+		sata_fsl_host_intr(ap);
+	} else {
+		dev_printk(KERN_WARNING, host->dev,
+			   "interrupt on disabled port 0\n");
+	}
+
+	iowrite32(interrupt_enables, hcr_base + HSTATUS);
+	handled = 1;
+
+	spin_unlock(&host->lock);
+
+	return IRQ_RETVAL(handled);
+}
+
+/*
+ * Multiple ports are represented by multiple SATA controllers with
+ * one port per controller
+ */
+static int sata_fsl_init_controller(struct ata_host *host)
+{
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *hcr_base = host_priv->hcr_base;
+	u32 temp;
+
+	/*
+	 * NOTE : We cannot bring the controller online before setting
+	 * the CHBA, hence main controller initialization is done as
+	 * part of the port_start() callback
+	 */
+
+	/* ack. any pending IRQs for this controller/port */
+	temp = ioread32(hcr_base + HSTATUS);
+	if (temp & 0x3F)
+		iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+	/* Keep interrupts disabled on the controller */
+	temp = ioread32(hcr_base + HCONTROL);
+	iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+	/* Disable interrupt coalescing control(icc), for the moment */
+	DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC));
+	iowrite32(0x01000000, hcr_base + ICC);
+
+	/* clear error registers, SError is cleared by libATA  */
+	iowrite32(0x00000FFFF, hcr_base + CE);
+	iowrite32(0x00000FFFF, hcr_base + DE);
+
+	/* initially assuming no Port multiplier, set CQPMP to 0 */
+	iowrite32(0x0, hcr_base + CQPMP);
+
+	/*
+	 * host controller will be brought on-line, during xx_port_start()
+	 * callback, that should also initiate the OOB, COMINIT sequence
+	 */
+
+	DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+	DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+	return 0;
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_fsl_sht = {
+	.module = THIS_MODULE,
+	.name = "sata_fsl",
+	.ioctl = ata_scsi_ioctl,
+	.queuecommand = ata_scsi_queuecmd,
+	.change_queue_depth = ata_scsi_change_queue_depth,
+	.can_queue = SATA_FSL_QUEUE_DEPTH,
+	.this_id = ATA_SHT_THIS_ID,
+	.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
+	.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+	.emulated = ATA_SHT_EMULATED,
+	.use_clustering = ATA_SHT_USE_CLUSTERING,
+	.proc_name = "sata_fsl",
+	.dma_boundary = ATA_DMA_BOUNDARY,
+	.slave_configure = ata_scsi_slave_config,
+	.slave_destroy = ata_scsi_slave_destroy,
+	.bios_param = ata_std_bios_param,
+};
+
+static const struct ata_port_operations sata_fsl_ops = {
+	.check_status = sata_fsl_check_status,
+	.check_altstatus = sata_fsl_check_status,
+	.dev_select = ata_noop_dev_select,
+
+	.tf_read = sata_fsl_tf_read,
+
+	.qc_prep = sata_fsl_qc_prep,
+	.qc_issue = sata_fsl_qc_issue,
+	.irq_clear = sata_fsl_irq_clear,
+
+	.scr_read = sata_fsl_scr_read,
+	.scr_write = sata_fsl_scr_write,
+
+	.freeze = sata_fsl_freeze,
+	.thaw = sata_fsl_thaw,
+	.error_handler = sata_fsl_error_handler,
+	.post_internal_cmd = sata_fsl_post_internal_cmd,
+
+	.port_start = sata_fsl_port_start,
+	.port_stop = sata_fsl_port_stop,
+};
+
+static const struct ata_port_info sata_fsl_port_info[] = {
+	{
+	 .flags = SATA_FSL_HOST_FLAGS,
+	 .link_flags = SATA_FSL_HOST_LFLAGS,
+	 .pio_mask = 0x1f,	/* pio 0-4 */
+	 .udma_mask = 0x7f,	/* udma 0-6 */
+	 .port_ops = &sata_fsl_ops,
+	 },
+};
+
+static int sata_fsl_probe(struct of_device *ofdev,
+			const struct of_device_id *match)
+{
+	int retval = 0;
+	void __iomem *hcr_base = NULL;
+	void __iomem *ssr_base = NULL;
+	void __iomem *csr_base = NULL;
+	struct sata_fsl_host_priv *host_priv = NULL;
+	struct resource *r;
+	int irq;
+	struct ata_host *host;
+
+	struct ata_port_info pi = sata_fsl_port_info[0];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+
+	dev_printk(KERN_INFO, &ofdev->dev,
+		   "Sata FSL Platform/CSB Driver init\n");
+
+	r = kmalloc(sizeof(struct resource), GFP_KERNEL);
+
+	hcr_base = of_iomap(ofdev->node, 0);
+	if (!hcr_base)
+		goto error_exit_with_cleanup;
+
+	ssr_base = hcr_base + 0x100;
+	csr_base = hcr_base + 0x140;
+
+	DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
+	DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
+	DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
+
+	host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
+	if (!host_priv)
+		goto error_exit_with_cleanup;
+
+	host_priv->hcr_base = hcr_base;
+	host_priv->ssr_base = ssr_base;
+	host_priv->csr_base = csr_base;
+
+	irq = irq_of_parse_and_map(ofdev->node, 0);
+	if (irq < 0) {
+		dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
+		goto error_exit_with_cleanup;
+	}
+	host_priv->irq = irq;
+
+	/* allocate host structure */
+	host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
+
+	/* host->iomap is not used currently */
+	host->private_data = host_priv;
+
+	/* setup port(s) */
+
+	host->ports[0]->ioaddr.cmd_addr = host_priv->hcr_base;
+	host->ports[0]->ioaddr.scr_addr = host_priv->ssr_base;
+
+	/* initialize host controller */
+	sata_fsl_init_controller(host);
+
+	/*
+	 * Now, register with libATA core, this will also initiate the
+	 * device discovery process, invoking our port_start() handler &
+	 * error_handler() to execute a dummy Softreset EH session
+	 */
+	ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG,
+			  &sata_fsl_sht);
+
+	dev_set_drvdata(&ofdev->dev, host);
+
+	return 0;
+
+error_exit_with_cleanup:
+
+	if (hcr_base)
+		iounmap(hcr_base);
+	if (host_priv)
+		kfree(host_priv);
+
+	return retval;
+}
+
+static int sata_fsl_remove(struct of_device *ofdev)
+{
+	struct ata_host *host = dev_get_drvdata(&ofdev->dev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+
+	ata_host_detach(host);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+
+	irq_dispose_mapping(host_priv->irq);
+	iounmap(host_priv->hcr_base);
+	kfree(host_priv);
+
+	return 0;
+}
+
+static struct of_device_id fsl_sata_match[] = {
+	{
+		.compatible = "fsl,mpc8315-sata",
+	},
+	{
+		.compatible = "fsl,mpc8379-sata",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_sata_match);
+
+static struct of_platform_driver fsl_sata_driver = {
+	.name		= "fsl-sata",
+	.match_table	= fsl_sata_match,
+	.probe		= sata_fsl_probe,
+	.remove		= sata_fsl_remove,
+};
+
+static int __init sata_fsl_init(void)
+{
+	of_register_platform_driver(&fsl_sata_driver);
+	return 0;
+}
+
+static void __exit sata_fsl_exit(void)
+{
+	of_unregister_platform_driver(&fsl_sata_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
+MODULE_VERSION("1.10");
+
+module_init(sata_fsl_init);
+module_exit(sata_fsl_exit);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 02ef635..c0c9a05 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -28,7 +28,7 @@
 #include <scsi/scsi_device.h>
 
 #define DRV_NAME	"sata_inic162x"
-#define DRV_VERSION	"0.2"
+#define DRV_VERSION	"0.3"
 
 enum {
 	MMIO_BAR		= 5,
@@ -143,7 +143,7 @@ static const int scr_map[] = {
 	[SCR_CONTROL]	= 2,
 };
 
-static void __iomem * inic_port_base(struct ata_port *ap)
+static void __iomem *inic_port_base(struct ata_port *ap)
 {
 	return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
 }
@@ -190,34 +190,34 @@ static void inic_reset_port(void __iomem *port_base)
 	writew(ctl, idma_ctl);
 }
 
-static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg)
+static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
 {
-	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->ioaddr.scr_addr;
 	void __iomem *addr;
-	u32 val;
 
 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
-		return 0xffffffffU;
+		return -EINVAL;
 
 	addr = scr_addr + scr_map[sc_reg] * 4;
-	val = readl(scr_addr + scr_map[sc_reg] * 4);
+	*val = readl(scr_addr + scr_map[sc_reg] * 4);
 
 	/* this controller has stuck DIAG.N, ignore it */
 	if (sc_reg == SCR_ERROR)
-		val &= ~SERR_PHYRDY_CHG;
-	return val;
+		*val &= ~SERR_PHYRDY_CHG;
+	return 0;
 }
 
-static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
 {
-	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
+	void __iomem *scr_addr = ap->ioaddr.scr_addr;
 	void __iomem *addr;
 
 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
-		return;
+		return -EINVAL;
 
 	addr = scr_addr + scr_map[sc_reg] * 4;
 	writel(val, scr_addr + scr_map[sc_reg] * 4);
+	return 0;
 }
 
 /*
@@ -285,7 +285,7 @@ static void inic_irq_clear(struct ata_port *ap)
 static void inic_host_intr(struct ata_port *ap)
 {
 	void __iomem *port_base = inic_port_base(ap);
-	struct ata_eh_info *ehi = &ap->eh_info;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
 	u8 irq_stat;
 
 	/* fetch and clear irq */
@@ -293,7 +293,8 @@ static void inic_host_intr(struct ata_port *ap)
 	writeb(irq_stat, port_base + PORT_IRQ_STAT);
 
 	if (likely(!(irq_stat & PIRQ_ERR))) {
-		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+		struct ata_queued_cmd *qc =
+			ata_qc_from_tag(ap, ap->link.active_tag);
 
 		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 			ata_chk_status(ap);	/* clear ATA interrupt */
@@ -416,12 +417,13 @@ static void inic_thaw(struct ata_port *ap)
  * SRST and SControl hardreset don't give valid signature on this
  * controller.  Only controller specific hardreset mechanism works.
  */
-static int inic_hardreset(struct ata_port *ap, unsigned int *class,
+static int inic_hardreset(struct ata_link *link, unsigned int *class,
 			  unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	void __iomem *port_base = inic_port_base(ap);
 	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
-	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
+	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
 	u16 val;
 	int rc;
 
@@ -434,24 +436,24 @@ static int inic_hardreset(struct ata_port *ap, unsigned int *class,
 	msleep(1);
 	writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
 
-	rc = sata_phy_resume(ap, timing, deadline);
+	rc = sata_link_resume(link, timing, deadline);
 	if (rc) {
-		ata_port_printk(ap, KERN_WARNING, "failed to resume "
+		ata_link_printk(link, KERN_WARNING, "failed to resume "
 				"link after reset (errno=%d)\n", rc);
 		return rc;
 	}
 
 	*class = ATA_DEV_NONE;
-	if (ata_port_online(ap)) {
+	if (ata_link_online(link)) {
 		struct ata_taskfile tf;
 
 		/* wait a while before checking status */
-		msleep(150);
+		ata_wait_after_reset(ap, deadline);
 
 		rc = ata_wait_ready(ap, deadline);
 		/* link occupied, -ENODEV too is an error */
 		if (rc) {
-			ata_port_printk(ap, KERN_WARNING, "device not ready "
+			ata_link_printk(link, KERN_WARNING, "device not ready "
 					"after hardreset (errno=%d)\n", rc);
 			return rc;
 		}
@@ -550,7 +552,6 @@ static int inic_port_start(struct ata_port *ap)
 }
 
 static struct ata_port_operations inic_port_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -567,7 +568,6 @@ static struct ata_port_operations inic_port_ops = {
 
 	.irq_clear		= inic_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.qc_prep	 	= ata_qc_prep,
 	.qc_issue		= inic_qc_issue,
@@ -594,7 +594,7 @@ static struct ata_port_info inic_port_info = {
 	.flags			= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
 	.pio_mask		= 0x1f,	/* pio0-4 */
 	.mwdma_mask		= 0x07, /* mwdma0-2 */
-	.udma_mask		= 0x7f,	/* udma0-6 */
+	.udma_mask		= ATA_UDMA6,
 	.port_ops		= &inic_port_ops
 };
 
@@ -693,16 +693,24 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	host->iomap = iomap = pcim_iomap_table(pdev);
 
 	for (i = 0; i < NR_PORTS; i++) {
-		struct ata_ioports *port = &host->ports[i]->ioaddr;
-		void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE;
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *port = &ap->ioaddr;
+		unsigned int offset = i * PORT_SIZE;
 
 		port->cmd_addr = iomap[2 * i];
 		port->altstatus_addr =
 		port->ctl_addr = (void __iomem *)
 			((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
-		port->scr_addr = port_base + PORT_SCR;
+		port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
 
 		ata_std_ports(port);
+
+		ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
+		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+		  (unsigned long long)pci_resource_start(pdev, 2 * i),
+		  (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
+				      ATA_PCI_CTL_OFS);
 	}
 
 	hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d9537ab..4f4ab43 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -29,19 +29,12 @@
   I distinctly remember a couple workarounds (one related to PCI-X)
   are still needed.
 
-  2) Convert to LibATA new EH.  Required for hotplug, NCQ, and sane
-  probing/error handling in general.  MUST HAVE.
-
-  3) Add hotplug support (easy, once new-EH support appears)
-
   4) Add NCQ support (easy to intermediate, once new-EH support appears)
 
   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
 
   6) Add port multiplier support (intermediate)
 
-  7) Test and verify 3.0 Gbps support
-
   8) Develop a low-power-consumption strategy, and implement it.
 
   9) [Experiment, low priority] See if ATAPI can be supported using
@@ -76,10 +69,11 @@
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_mv"
-#define DRV_VERSION	"0.81"
+#define DRV_VERSION	"1.01"
 
 enum {
 	/* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -108,8 +102,6 @@ enum {
 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
 
-	MV_USE_Q_DEPTH		= ATA_DEF_QUEUE,
-
 	MV_MAX_Q_DEPTH		= 32,
 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
 
@@ -133,18 +125,22 @@ enum {
 	/* Host Flags */
 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
-	MV_COMMON_FLAGS		= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				   ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
-				   ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
+	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
+				  ATA_FLAG_PIO_POLLING,
 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
 
 	CRQB_FLAG_READ		= (1 << 0),
 	CRQB_TAG_SHIFT		= 1,
+	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
+	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
 	CRQB_CMD_ADDR_SHIFT	= 8,
 	CRQB_CMD_CS		= (0x2 << 11),
 	CRQB_CMD_LAST		= (1 << 15),
 
 	CRPB_FLAG_STATUS_SHIFT	= 8,
+	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
+	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
 
 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
 
@@ -168,10 +164,14 @@ enum {
 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
 	MV_PCI_ERR_COMMAND	= 0x1d50,
 
-	PCI_IRQ_CAUSE_OFS		= 0x1d58,
-	PCI_IRQ_MASK_OFS		= 0x1d5c,
+	PCI_IRQ_CAUSE_OFS	= 0x1d58,
+	PCI_IRQ_MASK_OFS	= 0x1d5c,
 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
 
+	PCIE_IRQ_CAUSE_OFS	= 0x1900,
+	PCIE_IRQ_MASK_OFS	= 0x1910,
+	PCIE_UNMASK_ALL_IRQS	= 0x70a,	/* assorted bits */
+
 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
 	PORT0_ERR		= (1 << 0),	/* shift by port # */
@@ -230,31 +230,53 @@ enum {
 
 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
-	EDMA_ERR_D_PAR		= (1 << 0),
-	EDMA_ERR_PRD_PAR	= (1 << 1),
-	EDMA_ERR_DEV		= (1 << 2),
-	EDMA_ERR_DEV_DCON	= (1 << 3),
-	EDMA_ERR_DEV_CON	= (1 << 4),
-	EDMA_ERR_SERR		= (1 << 5),
-	EDMA_ERR_SELF_DIS	= (1 << 7),
-	EDMA_ERR_BIST_ASYNC	= (1 << 8),
-	EDMA_ERR_CRBQ_PAR	= (1 << 9),
-	EDMA_ERR_CRPB_PAR	= (1 << 10),
-	EDMA_ERR_INTRL_PAR	= (1 << 11),
-	EDMA_ERR_IORDY		= (1 << 12),
-	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
+	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
+	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
+	EDMA_ERR_DEV		= (1 << 2),	/* device error */
+	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
+	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
+	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
+	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
+	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
+	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
+	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
+	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
+	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
+	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
+	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
+	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
-	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
-	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
-	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
-	EDMA_ERR_TRANS_PROTO	= (1 << 31),
-	EDMA_ERR_FATAL		= (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
-				   EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
-				   EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
-				   EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
-				   EDMA_ERR_LNK_DATA_RX |
-				   EDMA_ERR_LNK_DATA_TX |
-				   EDMA_ERR_TRANS_PROTO),
+	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
+	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
+	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
+	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
+	EDMA_ERR_OVERRUN_5	= (1 << 5),
+	EDMA_ERR_UNDERRUN_5	= (1 << 6),
+	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
+				  EDMA_ERR_PRD_PAR |
+				  EDMA_ERR_DEV_DCON |
+				  EDMA_ERR_DEV_CON |
+				  EDMA_ERR_SERR |
+				  EDMA_ERR_SELF_DIS |
+				  EDMA_ERR_CRQB_PAR |
+				  EDMA_ERR_CRPB_PAR |
+				  EDMA_ERR_INTRL_PAR |
+				  EDMA_ERR_IORDY |
+				  EDMA_ERR_LNK_CTRL_RX_2 |
+				  EDMA_ERR_LNK_DATA_RX |
+				  EDMA_ERR_LNK_DATA_TX |
+				  EDMA_ERR_TRANS_PROTO,
+	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
+				  EDMA_ERR_PRD_PAR |
+				  EDMA_ERR_DEV_DCON |
+				  EDMA_ERR_DEV_CON |
+				  EDMA_ERR_OVERRUN_5 |
+				  EDMA_ERR_UNDERRUN_5 |
+				  EDMA_ERR_SELF_DIS_5 |
+				  EDMA_ERR_CRQB_PAR |
+				  EDMA_ERR_CRPB_PAR |
+				  EDMA_ERR_INTRL_PAR |
+				  EDMA_ERR_IORDY,
 
 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
@@ -267,10 +289,10 @@ enum {
 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
 	EDMA_RSP_Q_PTR_SHIFT	= 3,
 
-	EDMA_CMD_OFS		= 0x28,
-	EDMA_EN			= (1 << 0),
-	EDMA_DS			= (1 << 1),
-	ATA_RST			= (1 << 2),
+	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
+	EDMA_EN			= (1 << 0),	/* enable EDMA */
+	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
+	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
 
 	EDMA_IORDY_TMOUT	= 0x34,
 	EDMA_ARB_CFG		= 0x38,
@@ -282,25 +304,32 @@ enum {
 	MV_HP_ERRATA_60X1B2	= (1 << 3),
 	MV_HP_ERRATA_60X1C0	= (1 << 4),
 	MV_HP_ERRATA_XX42A0	= (1 << 5),
-	MV_HP_50XX		= (1 << 6),
-	MV_HP_GEN_IIE		= (1 << 7),
+	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
+	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
+	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
+	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
 
 	/* Port private flags (pp_flags) */
-	MV_PP_FLAG_EDMA_EN	= (1 << 0),
-	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
+	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
+	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
 };
 
-#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
-#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
-#define IS_GEN_I(hpriv) IS_50XX(hpriv)
-#define IS_GEN_II(hpriv) IS_60XX(hpriv)
+#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
+#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
 
 enum {
-	MV_DMA_BOUNDARY		= 0xffffffffU,
+	/* DMA boundary 0xffff is required by the s/g splitting
+	 * we need on /length/ in mv_fill-sg().
+	 */
+	MV_DMA_BOUNDARY		= 0xffffU,
 
+	/* mask of register bits containing lower 32 bits
+	 * of EDMA request queue DMA address
+	 */
 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
 
+	/* ditto, for response queue */
 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
 };
 
@@ -352,6 +381,10 @@ struct mv_port_priv {
 	dma_addr_t		crpb_dma;
 	struct mv_sg		*sg_tbl;
 	dma_addr_t		sg_tbl_dma;
+
+	unsigned int		req_idx;
+	unsigned int		resp_idx;
+
 	u32			pp_flags;
 };
 
@@ -360,7 +393,15 @@ struct mv_port_signal {
 	u32			pre;
 };
 
-struct mv_host_priv;
+struct mv_host_priv {
+	u32			hp_flags;
+	struct mv_port_signal	signal[8];
+	const struct mv_hw_ops	*ops;
+	u32			irq_cause_ofs;
+	u32			irq_mask_ofs;
+	u32			unmask_all_irqs;
+};
+
 struct mv_hw_ops {
 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
 			   unsigned int port);
@@ -373,25 +414,20 @@ struct mv_hw_ops {
 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
 };
 
-struct mv_host_priv {
-	u32			hp_flags;
-	struct mv_port_signal	signal[8];
-	const struct mv_hw_ops	*ops;
-};
-
 static void mv_irq_clear(struct ata_port *ap);
-static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
-static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
-static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
-static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
-static void mv_phy_reset(struct ata_port *ap);
-static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
+static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
+static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
+static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
+static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
 static int mv_port_start(struct ata_port *ap);
 static void mv_port_stop(struct ata_port *ap);
 static void mv_qc_prep(struct ata_queued_cmd *qc);
 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static void mv_eng_timeout(struct ata_port *ap);
+static void mv_error_handler(struct ata_port *ap);
+static void mv_post_int_cmd(struct ata_queued_cmd *qc);
+static void mv_eh_freeze(struct ata_port *ap);
+static void mv_eh_thaw(struct ata_port *ap);
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -415,16 +451,15 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
 			     unsigned int port_no);
-static void mv_stop_and_reset(struct ata_port *ap);
 
-static struct scsi_host_template mv_sht = {
+static struct scsi_host_template mv5_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
 	.ioctl			= ata_scsi_ioctl,
 	.queuecommand		= ata_scsi_queuecmd,
-	.can_queue		= MV_USE_Q_DEPTH,
+	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= MV_MAX_SG_CT,
+	.sg_tablesize		= MV_MAX_SG_CT / 2,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= 1,
@@ -435,27 +470,44 @@ static struct scsi_host_template mv_sht = {
 	.bios_param		= ata_std_bios_param,
 };
 
-static const struct ata_port_operations mv5_ops = {
-	.port_disable		= ata_port_disable,
+static struct scsi_host_template mv6_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= MV_MAX_SG_CT / 2,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= 1,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= MV_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
 
+static const struct ata_port_operations mv5_ops = {
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
 
-	.phy_reset		= mv_phy_reset,
 	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep,
 	.qc_issue		= mv_qc_issue,
 	.data_xfer		= ata_data_xfer,
 
-	.eng_timeout		= mv_eng_timeout,
-
 	.irq_clear		= mv_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
+
+	.error_handler		= mv_error_handler,
+	.post_internal_cmd	= mv_post_int_cmd,
+	.freeze			= mv_eh_freeze,
+	.thaw			= mv_eh_thaw,
 
 	.scr_read		= mv5_scr_read,
 	.scr_write		= mv5_scr_write,
@@ -465,26 +517,25 @@ static const struct ata_port_operations mv5_ops = {
 };
 
 static const struct ata_port_operations mv6_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
 
-	.phy_reset		= mv_phy_reset,
 	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep,
 	.qc_issue		= mv_qc_issue,
 	.data_xfer		= ata_data_xfer,
 
-	.eng_timeout		= mv_eng_timeout,
-
 	.irq_clear		= mv_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
+
+	.error_handler		= mv_error_handler,
+	.post_internal_cmd	= mv_post_int_cmd,
+	.freeze			= mv_eh_freeze,
+	.thaw			= mv_eh_thaw,
 
 	.scr_read		= mv_scr_read,
 	.scr_write		= mv_scr_write,
@@ -494,26 +545,25 @@ static const struct ata_port_operations mv6_ops = {
 };
 
 static const struct ata_port_operations mv_iie_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
 
-	.phy_reset		= mv_phy_reset,
 	.cable_detect		= ata_cable_sata,
 
 	.qc_prep		= mv_qc_prep_iie,
 	.qc_issue		= mv_qc_issue,
 	.data_xfer		= ata_data_xfer,
 
-	.eng_timeout		= mv_eng_timeout,
-
 	.irq_clear		= mv_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
+
+	.error_handler		= mv_error_handler,
+	.post_internal_cmd	= mv_post_int_cmd,
+	.freeze			= mv_eh_freeze,
+	.thaw			= mv_eh_thaw,
 
 	.scr_read		= mv_scr_read,
 	.scr_write		= mv_scr_write,
@@ -526,44 +576,44 @@ static const struct ata_port_info mv_port_info[] = {
 	{  /* chip_504x */
 		.flags		= MV_COMMON_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_508x */
-		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_5080 */
-		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv5_ops,
 	},
 	{  /* chip_604x */
-		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv6_ops,
 	},
 	{  /* chip_608x */
-		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
-				   MV_FLAG_DUAL_HC),
+		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+				  MV_FLAG_DUAL_HC,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv6_ops,
 	},
 	{  /* chip_6042 */
-		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv_iie_ops,
 	},
 	{  /* chip_7042 */
-		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
 		.pio_mask	= 0x1f,	/* pio0-4 */
-		.udma_mask	= 0x7f,	/* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &mv_iie_ops,
 	},
 };
@@ -573,6 +623,9 @@ static const struct pci_device_id mv_pci_tbl[] = {
 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+	/* RocketRAID 1740/174x have different identifiers */
+	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
+	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
 
 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
@@ -582,11 +635,16 @@ static const struct pci_device_id mv_pci_tbl[] = {
 
 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
 
-	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+	/* Adaptec 1430SA */
+	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
 
-	/* add Marvell 7042 support */
+	/* Marvell 7042 support */
 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
 
+	/* Highpoint RocketRAID PCIe series */
+	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
 	{ }			/* terminate list */
 };
 
@@ -706,6 +764,46 @@ static void mv_irq_clear(struct ata_port *ap)
 {
 }
 
+static void mv_set_edma_ptrs(void __iomem *port_mmio,
+			     struct mv_host_priv *hpriv,
+			     struct mv_port_priv *pp)
+{
+	u32 index;
+
+	/*
+	 * initialize request queue
+	 */
+	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
+
+	WARN_ON(pp->crqb_dma & 0x3ff);
+	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
+	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
+		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
+		writelfl((pp->crqb_dma & 0xffffffff) | index,
+			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+	else
+		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+
+	/*
+	 * initialize response queue
+	 */
+	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
+
+	WARN_ON(pp->crpb_dma & 0xff);
+	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+
+	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
+		writelfl((pp->crpb_dma & 0xffffffff) | index,
+			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+	else
+		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+
+	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
+		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+}
+
 /**
  *      mv_start_dma - Enable eDMA engine
  *      @base: port base address
@@ -717,9 +815,15 @@ static void mv_irq_clear(struct ata_port *ap)
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
+static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
+			 struct mv_port_priv *pp)
 {
-	if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
+	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
+		/* clear EDMA event indicators, if any */
+		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
+
+		mv_set_edma_ptrs(base, hpriv, pp);
+
 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
 	}
@@ -727,7 +831,7 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
 }
 
 /**
- *      mv_stop_dma - Disable eDMA engine
+ *      __mv_stop_dma - Disable eDMA engine
  *      @ap: ATA channel to manipulate
  *
  *      Verify the local cache of the eDMA state is accurate with a
@@ -736,35 +840,49 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_stop_dma(struct ata_port *ap)
+static int __mv_stop_dma(struct ata_port *ap)
 {
 	void __iomem *port_mmio = mv_ap_base(ap);
 	struct mv_port_priv *pp	= ap->private_data;
 	u32 reg;
-	int i;
+	int i, err = 0;
 
-	if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
+	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
 		/* Disable EDMA if active.   The disable bit auto clears.
 		 */
 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
 	} else {
 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
-  	}
+	}
 
 	/* now properly wait for the eDMA to stop */
 	for (i = 1000; i > 0; i--) {
 		reg = readl(port_mmio + EDMA_CMD_OFS);
-		if (!(EDMA_EN & reg)) {
+		if (!(reg & EDMA_EN))
 			break;
-		}
+
 		udelay(100);
 	}
 
-	if (EDMA_EN & reg) {
+	if (reg & EDMA_EN) {
 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
-		/* FIXME: Consider doing a reset here to recover */
+		err = -EIO;
 	}
+
+	return err;
+}
+
+static int mv_stop_dma(struct ata_port *ap)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&ap->host->lock, flags);
+	rc = __mv_stop_dma(ap);
+	spin_unlock_irqrestore(&ap->host->lock, flags);
+
+	return rc;
 }
 
 #ifdef ATA_DEBUG
@@ -774,7 +892,7 @@ static void mv_dump_mem(void __iomem *start, unsigned bytes)
 	for (b = 0; b < bytes; ) {
 		DPRINTK("%p: ", start + b);
 		for (w = 0; b < bytes && w < 4; w++) {
-			printk("%08x ",readl(start + b));
+			printk("%08x ", readl(start + b));
 			b += sizeof(u32);
 		}
 		printk("\n");
@@ -790,8 +908,8 @@ static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
 	for (b = 0; b < bytes; ) {
 		DPRINTK("%02x: ", b);
 		for (w = 0; b < bytes && w < 4; w++) {
-			(void) pci_read_config_dword(pdev,b,&dw);
-			printk("%08x ",dw);
+			(void) pci_read_config_dword(pdev, b, &dw);
+			printk("%08x ", dw);
 			b += sizeof(u32);
 		}
 		printk("\n");
@@ -835,9 +953,9 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port,
 	}
 	for (p = start_port; p < start_port + num_ports; p++) {
 		port_base = mv_port_base(mmio_base, p);
-		DPRINTK("EDMA regs (port %i):\n",p);
+		DPRINTK("EDMA regs (port %i):\n", p);
 		mv_dump_mem(port_base, 0x54);
-		DPRINTK("SATA regs (port %i):\n",p);
+		DPRINTK("SATA regs (port %i):\n", p);
 		mv_dump_mem(port_base+0x300, 0x60);
 	}
 #endif
@@ -863,30 +981,35 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
 	return ofs;
 }
 
-static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
+static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
 {
 	unsigned int ofs = mv_scr_offset(sc_reg_in);
 
-	if (0xffffffffU != ofs)
-		return readl(mv_ap_base(ap) + ofs);
-	else
-		return (u32) ofs;
+	if (ofs != 0xffffffffU) {
+		*val = readl(mv_ap_base(ap) + ofs);
+		return 0;
+	} else
+		return -EINVAL;
 }
 
-static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
 {
 	unsigned int ofs = mv_scr_offset(sc_reg_in);
 
-	if (0xffffffffU != ofs)
+	if (ofs != 0xffffffffU) {
 		writelfl(val, mv_ap_base(ap) + ofs);
+		return 0;
+	} else
+		return -EINVAL;
 }
 
-static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
+static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
+			void __iomem *port_mmio)
 {
 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
 
 	/* set up non-NCQ EDMA configuration */
-	cfg &= ~(1 << 9);	/* disable equeue */
+	cfg &= ~(1 << 9);	/* disable eQue */
 
 	if (IS_GEN_I(hpriv)) {
 		cfg &= ~0x1f;		/* clear queue depth */
@@ -906,7 +1029,7 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
 		cfg |= (1 << 18);	/* enab early completion */
 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
-		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
+		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
 	}
 
 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
@@ -930,6 +1053,7 @@ static int mv_port_start(struct ata_port *ap)
 	void __iomem *port_mmio = mv_ap_base(ap);
 	void *mem;
 	dma_addr_t mem_dma;
+	unsigned long flags;
 	int rc;
 
 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -968,28 +1092,13 @@ static int mv_port_start(struct ata_port *ap)
 	pp->sg_tbl = mem;
 	pp->sg_tbl_dma = mem_dma;
 
-	mv_edma_cfg(hpriv, port_mmio);
-
-	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
-	writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
-		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
-
-	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
-		writelfl(pp->crqb_dma & 0xffffffff,
-			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
-	else
-		writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+	spin_lock_irqsave(&ap->host->lock, flags);
 
-	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+	mv_edma_cfg(ap, hpriv, port_mmio);
 
-	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
-		writelfl(pp->crpb_dma & 0xffffffff,
-			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
-	else
-		writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+	mv_set_edma_ptrs(port_mmio, hpriv, pp);
 
-	writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
-		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+	spin_unlock_irqrestore(&ap->host->lock, flags);
 
 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
 	 * we'll be unable to send non-data, PIO, etc due to restricted access
@@ -1010,11 +1119,7 @@ static int mv_port_start(struct ata_port *ap)
  */
 static void mv_port_stop(struct ata_port *ap)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&ap->host->lock, flags);
 	mv_stop_dma(ap);
-	spin_unlock_irqrestore(&ap->host->lock, flags);
 }
 
 /**
@@ -1026,38 +1131,41 @@ static void mv_port_stop(struct ata_port *ap)
  *      LOCKING:
  *      Inherited from caller.
  */
-static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
+static void mv_fill_sg(struct ata_queued_cmd *qc)
 {
 	struct mv_port_priv *pp = qc->ap->private_data;
-	unsigned int n_sg = 0;
 	struct scatterlist *sg;
-	struct mv_sg *mv_sg;
+	struct mv_sg *mv_sg, *last_sg = NULL;
 
 	mv_sg = pp->sg_tbl;
 	ata_for_each_sg(sg, qc) {
 		dma_addr_t addr = sg_dma_address(sg);
 		u32 sg_len = sg_dma_len(sg);
 
-		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
-		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
-		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
+		while (sg_len) {
+			u32 offset = addr & 0xffff;
+			u32 len = sg_len;
 
-		if (ata_sg_is_last(sg, qc))
-			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+			if ((offset + sg_len > 0x10000))
+				len = 0x10000 - offset;
 
-		mv_sg++;
-		n_sg++;
-	}
+			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
+			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
 
-	return n_sg;
-}
+			sg_len -= len;
+			addr += len;
 
-static inline unsigned mv_inc_q_index(unsigned index)
-{
-	return (index + 1) & MV_MAX_Q_DEPTH_MASK;
+			last_sg = mv_sg;
+			mv_sg++;
+		}
+	}
+
+	if (likely(last_sg))
+		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
 }
 
-static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
+static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
 {
 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
 		(last ? CRQB_CMD_LAST : 0);
@@ -1085,7 +1193,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 	u16 flags = 0;
 	unsigned in_index;
 
- 	if (ATA_PROT_DMA != qc->tf.protocol)
+	if (qc->tf.protocol != ATA_PROT_DMA)
 		return;
 
 	/* Fill in command request block
@@ -1094,10 +1202,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 		flags |= CRQB_FLAG_READ;
 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
 	flags |= qc->tag << CRQB_TAG_SHIFT;
+	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
 
-	/* get current queue index from hardware */
-	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
-			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+	/* get current queue index from software */
+	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
 
 	pp->crqb[in_index].sg_addr =
 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
@@ -1177,7 +1285,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
 	unsigned in_index;
 	u32 flags = 0;
 
- 	if (ATA_PROT_DMA != qc->tf.protocol)
+	if (qc->tf.protocol != ATA_PROT_DMA)
 		return;
 
 	/* Fill in Gen IIE command request block
@@ -1187,10 +1295,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
 
 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
 	flags |= qc->tag << CRQB_TAG_SHIFT;
+	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
+						   what we use as our tag */
 
-	/* get current queue index from hardware */
-	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
-			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+	/* get current queue index from software */
+	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
 
 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
@@ -1238,83 +1347,41 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  */
 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 {
-	void __iomem *port_mmio = mv_ap_base(qc->ap);
-	struct mv_port_priv *pp = qc->ap->private_data;
-	unsigned in_index;
-	u32 in_ptr;
+	struct ata_port *ap = qc->ap;
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_port_priv *pp = ap->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	u32 in_index;
 
-	if (ATA_PROT_DMA != qc->tf.protocol) {
+	if (qc->tf.protocol != ATA_PROT_DMA) {
 		/* We're about to send a non-EDMA capable command to the
 		 * port.  Turn off EDMA so there won't be problems accessing
 		 * shadow block, etc registers.
 		 */
-		mv_stop_dma(qc->ap);
+		__mv_stop_dma(ap);
 		return ata_qc_issue_prot(qc);
 	}
 
-	in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
-	in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+	mv_start_dma(port_mmio, hpriv, pp);
+
+	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
 
 	/* until we do queuing, the queue should be empty at this point */
 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
 
-	in_index = mv_inc_q_index(in_index);	/* now incr producer index */
+	pp->req_idx++;
 
-	mv_start_dma(port_mmio, pp);
+	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
 
 	/* and write the request in pointer to kick the EDMA to life */
-	in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
-	in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
-	writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
+		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
 
 	return 0;
 }
 
 /**
- *      mv_get_crpb_status - get status from most recently completed cmd
- *      @ap: ATA channel to manipulate
- *
- *      This routine is for use when the port is in DMA mode, when it
- *      will be using the CRPB (command response block) method of
- *      returning command completion information.  We check indices
- *      are good, grab status, and bump the response consumer index to
- *      prove that we're up to date.
- *
- *      LOCKING:
- *      Inherited from caller.
- */
-static u8 mv_get_crpb_status(struct ata_port *ap)
-{
-	void __iomem *port_mmio = mv_ap_base(ap);
-	struct mv_port_priv *pp = ap->private_data;
-	unsigned out_index;
-	u32 out_ptr;
-	u8 ata_status;
-
-	out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
-	out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
-
-	ata_status = le16_to_cpu(pp->crpb[out_index].flags)
-					>> CRPB_FLAG_STATUS_SHIFT;
-
-	/* increment our consumer index... */
-	out_index = mv_inc_q_index(out_index);
-
-	/* and, until we do NCQ, there should only be 1 CRPB waiting */
-	WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
-		>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
-
-	/* write out our inc'd consumer index so EDMA knows we're caught up */
-	out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
-	out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
-	writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
-
-	/* Return ATA status register for completed CRPB */
-	return ata_status;
-}
-
-/**
  *      mv_err_intr - Handle error interrupts on the port
  *      @ap: ATA channel to manipulate
  *      @reset_allowed: bool: 0 == don't trigger from reset here
@@ -1328,30 +1395,188 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
  *      LOCKING:
  *      Inherited from caller.
  */
-static void mv_err_intr(struct ata_port *ap, int reset_allowed)
+static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
 {
 	void __iomem *port_mmio = mv_ap_base(ap);
-	u32 edma_err_cause, serr = 0;
+	u32 edma_err_cause, eh_freeze_mask, serr = 0;
+	struct mv_port_priv *pp = ap->private_data;
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+	unsigned int action = 0, err_mask = 0;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+
+	ata_ehi_clear_desc(ehi);
+
+	if (!edma_enabled) {
+		/* just a guess: do we need to do this? should we
+		 * expand this, and do it in all cases?
+		 */
+		sata_scr_read(&ap->link, SCR_ERROR, &serr);
+		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
+	}
 
 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
-	if (EDMA_ERR_SERR & edma_err_cause) {
-		sata_scr_read(ap, SCR_ERROR, &serr);
-		sata_scr_write_flush(ap, SCR_ERROR, serr);
+	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
+
+	/*
+	 * all generations share these EDMA error cause bits
+	 */
+
+	if (edma_err_cause & EDMA_ERR_DEV)
+		err_mask |= AC_ERR_DEV;
+	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
+			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
+			EDMA_ERR_INTRL_PAR)) {
+		err_mask |= AC_ERR_ATA_BUS;
+		action |= ATA_EH_HARDRESET;
+		ata_ehi_push_desc(ehi, "parity error");
 	}
-	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
-		struct mv_port_priv *pp	= ap->private_data;
-		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
+		ata_ehi_hotplugged(ehi);
+		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
+			"dev disconnect" : "dev connect");
+	}
+
+	if (IS_GEN_I(hpriv)) {
+		eh_freeze_mask = EDMA_EH_FREEZE_5;
+
+		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
+			struct mv_port_priv *pp	= ap->private_data;
+			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+			ata_ehi_push_desc(ehi, "EDMA self-disable");
+		}
+	} else {
+		eh_freeze_mask = EDMA_EH_FREEZE;
+
+		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+			struct mv_port_priv *pp	= ap->private_data;
+			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+			ata_ehi_push_desc(ehi, "EDMA self-disable");
+		}
+
+		if (edma_err_cause & EDMA_ERR_SERR) {
+			sata_scr_read(&ap->link, SCR_ERROR, &serr);
+			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
+			err_mask = AC_ERR_ATA_BUS;
+			action |= ATA_EH_HARDRESET;
+		}
 	}
-	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
-		"SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
 
 	/* Clear EDMA now that SERR cleanup done */
 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
-	/* check for fatal here and recover if needed */
-	if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
-		mv_stop_and_reset(ap);
+	if (!err_mask) {
+		err_mask = AC_ERR_OTHER;
+		action |= ATA_EH_HARDRESET;
+	}
+
+	ehi->serror |= serr;
+	ehi->action |= action;
+
+	if (qc)
+		qc->err_mask |= err_mask;
+	else
+		ehi->err_mask |= err_mask;
+
+	if (edma_err_cause & eh_freeze_mask)
+		ata_port_freeze(ap);
+	else
+		ata_port_abort(ap);
+}
+
+static void mv_intr_pio(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	u8 ata_status;
+
+	/* ignore spurious intr if drive still BUSY */
+	ata_status = readb(ap->ioaddr.status_addr);
+	if (unlikely(ata_status & ATA_BUSY))
+		return;
+
+	/* get active ATA command */
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (unlikely(!qc))			/* no active tag */
+		return;
+	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
+		return;
+
+	/* and finally, complete the ATA command */
+	qc->err_mask |= ac_err_mask(ata_status);
+	ata_qc_complete(qc);
+}
+
+static void mv_intr_edma(struct ata_port *ap)
+{
+	void __iomem *port_mmio = mv_ap_base(ap);
+	struct mv_host_priv *hpriv = ap->host->private_data;
+	struct mv_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc;
+	u32 out_index, in_index;
+	int work_done = 0;
+
+	/* get h/w response queue pointer */
+	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
+			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+	while (1) {
+		u16 status;
+		unsigned int tag;
+
+		/* get s/w response queue last-read pointer, and compare */
+		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
+		if (in_index == out_index)
+			break;
+
+		/* 50xx: get active ATA command */
+		if (IS_GEN_I(hpriv))
+			tag = ap->link.active_tag;
+
+		/* Gen II/IIE: get active ATA command via tag, to enable
+		 * support for queueing.  this works transparently for
+		 * queued and non-queued modes.
+		 */
+		else if (IS_GEN_II(hpriv))
+			tag = (le16_to_cpu(pp->crpb[out_index].id)
+				>> CRPB_IOID_SHIFT_6) & 0x3f;
+
+		else /* IS_GEN_IIE */
+			tag = (le16_to_cpu(pp->crpb[out_index].id)
+				>> CRPB_IOID_SHIFT_7) & 0x3f;
+
+		qc = ata_qc_from_tag(ap, tag);
+
+		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
+		 * bits (WARNING: might not necessarily be associated
+		 * with this command), which -should- be clear
+		 * if all is well
+		 */
+		status = le16_to_cpu(pp->crpb[out_index].flags);
+		if (unlikely(status & 0xff)) {
+			mv_err_intr(ap, qc);
+			return;
+		}
+
+		/* and finally, complete the ATA command */
+		if (qc) {
+			qc->err_mask |=
+				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
+			ata_qc_complete(qc);
+		}
+
+		/* advance software response queue pointer, to
+		 * indicate (after the loop completes) to hardware
+		 * that we have consumed a response queue entry.
+		 */
+		work_done = 1;
+		pp->resp_idx++;
+	}
+
+	if (work_done)
+		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
+			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
+			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
 }
 
 /**
@@ -1374,10 +1599,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
 {
 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
-	struct ata_queued_cmd *qc;
 	u32 hc_irq_cause;
-	int shift, port, port0, hard_port, handled;
-	unsigned int err_mask;
+	int port, port0;
 
 	if (hc == 0)
 		port0 = 0;
@@ -1386,79 +1609,96 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
 
 	/* we'll need the HC success int register in most cases */
 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
-	if (hc_irq_cause)
-		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+	if (!hc_irq_cause)
+		return;
+
+	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
 
 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
-		hc,relevant,hc_irq_cause);
+		hc, relevant, hc_irq_cause);
 
 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
-		u8 ata_status = 0;
 		struct ata_port *ap = host->ports[port];
 		struct mv_port_priv *pp = ap->private_data;
+		int have_err_bits, hard_port, shift;
 
-		hard_port = mv_hardport_from_port(port); /* range 0..3 */
-		handled = 0;	/* ensure ata_status is set if handled++ */
-
-		/* Note that DEV_IRQ might happen spuriously during EDMA,
-		 * and should be ignored in such cases.
-		 * The cause of this is still under investigation.
-		 */
-		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
-			/* EDMA: check for response queue interrupt */
-			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
-				ata_status = mv_get_crpb_status(ap);
-				handled = 1;
-			}
-		} else {
-			/* PIO: check for device (drive) interrupt */
-			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
-				ata_status = readb(ap->ioaddr.status_addr);
-				handled = 1;
-				/* ignore spurious intr if drive still BUSY */
-				if (ata_status & ATA_BUSY) {
-					ata_status = 0;
-					handled = 0;
-				}
-			}
-		}
-
-		if (ap && (ap->flags & ATA_FLAG_DISABLED))
+		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
 			continue;
 
-		err_mask = ac_err_mask(ata_status);
-
 		shift = port << 1;		/* (port * 2) */
 		if (port >= MV_PORTS_PER_HC) {
 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
 		}
-		if ((PORT0_ERR << shift) & relevant) {
-			mv_err_intr(ap, 1);
-			err_mask |= AC_ERR_OTHER;
-			handled = 1;
+		have_err_bits = ((PORT0_ERR << shift) & relevant);
+
+		if (unlikely(have_err_bits)) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
+				continue;
+
+			mv_err_intr(ap, qc);
+			continue;
 		}
 
-		if (handled) {
-			qc = ata_qc_from_tag(ap, ap->active_tag);
-			if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
-				VPRINTK("port %u IRQ found for qc, "
-					"ata_status 0x%x\n", port,ata_status);
-				/* mark qc status appropriately */
-				if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
-					qc->err_mask |= err_mask;
-					ata_qc_complete(qc);
-				}
-			}
+		hard_port = mv_hardport_from_port(port); /* range 0..3 */
+
+		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
+				mv_intr_edma(ap);
+		} else {
+			if ((DEV_IRQ << hard_port) & hc_irq_cause)
+				mv_intr_pio(ap);
 		}
 	}
 	VPRINTK("EXIT\n");
 }
 
+static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
+{
+	struct mv_host_priv *hpriv = host->private_data;
+	struct ata_port *ap;
+	struct ata_queued_cmd *qc;
+	struct ata_eh_info *ehi;
+	unsigned int i, err_mask, printed = 0;
+	u32 err_cause;
+
+	err_cause = readl(mmio + hpriv->irq_cause_ofs);
+
+	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
+		   err_cause);
+
+	DPRINTK("All regs @ PCI error\n");
+	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
+
+	writelfl(0, mmio + hpriv->irq_cause_ofs);
+
+	for (i = 0; i < host->n_ports; i++) {
+		ap = host->ports[i];
+		if (!ata_link_offline(&ap->link)) {
+			ehi = &ap->link.eh_info;
+			ata_ehi_clear_desc(ehi);
+			if (!printed++)
+				ata_ehi_push_desc(ehi,
+					"PCI err cause 0x%08x", err_cause);
+			err_mask = AC_ERR_HOST_BUS;
+			ehi->action = ATA_EH_HARDRESET;
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (qc)
+				qc->err_mask |= err_mask;
+			else
+				ehi->err_mask |= err_mask;
+
+			ata_port_freeze(ap);
+		}
+	}
+}
+
 /**
- *      mv_interrupt -
+ *      mv_interrupt - Main interrupt event handler
  *      @irq: unused
  *      @dev_instance: private data; in this case the host structure
- *      @regs: unused
  *
  *      Read the read only register to determine if any host
  *      controllers have pending interrupts.  If so, call lower level
@@ -1474,7 +1714,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *pt_
 	struct ata_host *host = dev_instance;
 	unsigned int hc, handled = 0, n_hcs;
 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
-	struct mv_host_priv *hpriv;
 	u32 irq_stat;
 
 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1488,34 +1727,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *pt_
 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
 	spin_lock(&host->lock);
 
+	if (unlikely(irq_stat & PCI_ERR)) {
+		mv_pci_error(host, mmio);
+		handled = 1;
+		goto out_unlock;	/* skip all other HC irq handling */
+	}
+
 	for (hc = 0; hc < n_hcs; hc++) {
 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
 		if (relevant) {
 			mv_host_intr(host, relevant, hc);
-			handled++;
-		}
-	}
-
-	hpriv = host->private_data;
-	if (IS_60XX(hpriv)) {
-		/* deal with the interrupt coalescing bits */
-		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
-			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
-			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
-			writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
+			handled = 1;
 		}
 	}
 
-	if (PCI_ERR & irq_stat) {
-		printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
-		       readl(mmio + PCI_IRQ_CAUSE_OFS));
-
-		DPRINTK("All regs @ PCI error\n");
-		mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
-
-		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
-		handled++;
-	}
+out_unlock:
 	spin_unlock(&host->lock);
 
 	return IRQ_RETVAL(handled);
@@ -1546,36 +1772,39 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
 	return ofs;
 }
 
-static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
+static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
 {
 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
 
-	if (ofs != 0xffffffffU)
-		return readl(addr + ofs);
-	else
-		return (u32) ofs;
+	if (ofs != 0xffffffffU) {
+		*val = readl(addr + ofs);
+		return 0;
+	} else
+		return -EINVAL;
 }
 
-static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
 {
 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
 
-	if (ofs != 0xffffffffU)
+	if (ofs != 0xffffffffU) {
 		writelfl(val, addr + ofs);
+		return 0;
+	} else
+		return -EINVAL;
 }
 
 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
 {
-	u8 rev_id;
 	int early_5080;
+	u8 revision;
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
-
-	early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
+	early_5080 = (pdev->device == 0x5080) && (revision == 0);
 
 	if (!early_5080) {
 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
@@ -1709,6 +1938,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 #define ZERO(reg) writel(0, mmio + (reg))
 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
 {
+	struct ata_host     *host = dev_get_drvdata(&pdev->dev);
+	struct mv_host_priv *hpriv = host->private_data;
 	u32 tmp;
 
 	tmp = readl(mmio + MV_PCI_MODE);
@@ -1720,8 +1951,8 @@ static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
 	ZERO(HC_MAIN_IRQ_MASK_OFS);
 	ZERO(MV_PCI_SERR_MASK);
-	ZERO(PCI_IRQ_CAUSE_OFS);
-	ZERO(PCI_IRQ_MASK_OFS);
+	ZERO(hpriv->irq_cause_ofs);
+	ZERO(hpriv->irq_mask_ofs);
 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
 	ZERO(MV_PCI_ERR_ATTRIBUTE);
@@ -1766,9 +1997,8 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
 	for (i = 0; i < 1000; i++) {
 		udelay(1);
 		t = readl(reg);
-		if (PCI_MASTER_EMPTY & t) {
+		if (PCI_MASTER_EMPTY & t)
 			break;
-		}
 	}
 	if (!(PCI_MASTER_EMPTY & t)) {
 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
@@ -1904,7 +2134,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
 
 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
 
-	if (IS_60XX(hpriv)) {
+	if (IS_GEN_II(hpriv)) {
 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
 		ifctl |= (1 << 7);		/* enable gen2i speed */
 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
@@ -1920,32 +2150,12 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
 
 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
 
-	if (IS_50XX(hpriv))
+	if (IS_GEN_I(hpriv))
 		mdelay(1);
 }
 
-static void mv_stop_and_reset(struct ata_port *ap)
-{
-	struct mv_host_priv *hpriv = ap->host->private_data;
-	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
-
-	mv_stop_dma(ap);
-
-	mv_channel_reset(hpriv, mmio, ap->port_no);
-
-	__mv_phy_reset(ap, 0);
-}
-
-static inline void __msleep(unsigned int msec, int can_sleep)
-{
-	if (can_sleep)
-		msleep(msec);
-	else
-		mdelay(msec);
-}
-
 /**
- *      __mv_phy_reset - Perform eDMA reset followed by COMRESET
+ *      mv_phy_reset - Perform eDMA reset followed by COMRESET
  *      @ap: ATA channel to manipulate
  *
  *      Part of this is taken from __sata_phy_reset and modified to
@@ -1955,57 +2165,65 @@ static inline void __msleep(unsigned int msec, int can_sleep)
  *      Inherited from caller.  This is coded to safe to call at
  *      interrupt level, i.e. it does not sleep.
  */
-static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
+static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
+			 unsigned long deadline)
 {
 	struct mv_port_priv *pp	= ap->private_data;
 	struct mv_host_priv *hpriv = ap->host->private_data;
 	void __iomem *port_mmio = mv_ap_base(ap);
-	struct ata_taskfile tf;
-	struct ata_device *dev = &ap->device[0];
-	unsigned long timeout;
 	int retry = 5;
 	u32 sstatus;
 
 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
 
-	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
-		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
-		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
+#ifdef DEBUG
+	{
+		u32 sstatus, serror, scontrol;
+
+		mv_scr_read(ap, SCR_STATUS, &sstatus);
+		mv_scr_read(ap, SCR_ERROR, &serror);
+		mv_scr_read(ap, SCR_CONTROL, &scontrol);
+		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
+			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
+	}
+#endif
 
 	/* Issue COMRESET via SControl */
 comreset_retry:
-	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
-	__msleep(1, can_sleep);
+	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
+	msleep(1);
 
-	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
-	__msleep(20, can_sleep);
+	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
+	msleep(20);
 
-	timeout = jiffies + msecs_to_jiffies(200);
 	do {
-		sata_scr_read(ap, SCR_STATUS, &sstatus);
+		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
 			break;
 
-		__msleep(1, can_sleep);
-	} while (time_before(jiffies, timeout));
+		msleep(1);
+	} while (time_before(jiffies, deadline));
 
 	/* work around errata */
-	if (IS_60XX(hpriv) &&
+	if (IS_GEN_II(hpriv) &&
 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
 	    (retry-- > 0))
 		goto comreset_retry;
 
-	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
-		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
-		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
+#ifdef DEBUG
+	{
+		u32 sstatus, serror, scontrol;
 
-	if (ata_port_online(ap)) {
-		ata_port_probe(ap);
-	} else {
-		sata_scr_read(ap, SCR_STATUS, &sstatus);
-		ata_port_printk(ap, KERN_INFO,
-				"no device found (phy stat %08x)\n", sstatus);
-		ata_port_disable(ap);
+		mv_scr_read(ap, SCR_STATUS, &sstatus);
+		mv_scr_read(ap, SCR_ERROR, &serror);
+		mv_scr_read(ap, SCR_CONTROL, &scontrol);
+		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
+			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
+	}
+#endif
+
+	if (ata_link_offline(&ap->link)) {
+		*class = ATA_DEV_NONE;
 		return;
 	}
 
@@ -2019,68 +2237,155 @@ comreset_retry:
 		u8 drv_stat = ata_check_status(ap);
 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
 			break;
-		__msleep(500, can_sleep);
+		msleep(500);
 		if (retry-- <= 0)
 			break;
+		if (time_after(jiffies, deadline))
+			break;
 	}
 
-	tf.lbah = readb(ap->ioaddr.lbah_addr);
-	tf.lbam = readb(ap->ioaddr.lbam_addr);
-	tf.lbal = readb(ap->ioaddr.lbal_addr);
-	tf.nsect = readb(ap->ioaddr.nsect_addr);
+	/* FIXME: if we passed the deadline, the following
+	 * code probably produces an invalid result
+	 */
 
-	dev->class = ata_dev_classify(&tf);
-	if (!ata_dev_enabled(dev)) {
-		VPRINTK("Port disabled post-sig: No device present.\n");
-		ata_port_disable(ap);
-	}
+	/* finally, read device signature from TF registers */
+	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
 
 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
 
-	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
 
 	VPRINTK("EXIT\n");
 }
 
-static void mv_phy_reset(struct ata_port *ap)
+static int mv_prereset(struct ata_link *link, unsigned long deadline)
 {
-	__mv_phy_reset(ap, 1);
+	struct ata_port *ap = link->ap;
+	struct mv_port_priv *pp	= ap->private_data;
+	struct ata_eh_context *ehc = &link->eh_context;
+	int rc;
+
+	rc = mv_stop_dma(ap);
+	if (rc)
+		ehc->i.action |= ATA_EH_HARDRESET;
+
+	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
+		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
+		ehc->i.action |= ATA_EH_HARDRESET;
+	}
+
+	/* if we're about to do hardreset, nothing more to do */
+	if (ehc->i.action & ATA_EH_HARDRESET)
+		return 0;
+
+	if (ata_link_online(link))
+		rc = ata_wait_ready(ap, deadline);
+	else
+		rc = -ENODEV;
+
+	return rc;
 }
 
-/**
- *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
- *      @ap: ATA channel to manipulate
- *
- *      Intent is to clear all pending error conditions, reset the
- *      chip/bus, fail the command, and move on.
- *
- *      LOCKING:
- *      This routine holds the host lock while failing the command.
- */
-static void mv_eng_timeout(struct ata_port *ap)
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+			unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
+	struct mv_host_priv *hpriv = ap->host->private_data;
 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
-	struct ata_queued_cmd *qc;
-	unsigned long flags;
 
-	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
-	DPRINTK("All regs @ start of eng_timeout\n");
-	mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
+	mv_stop_dma(ap);
 
-	qc = ata_qc_from_tag(ap, ap->active_tag);
-        printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
-	       mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
+	mv_channel_reset(hpriv, mmio, ap->port_no);
 
-	spin_lock_irqsave(&ap->host->lock, flags);
-	mv_err_intr(ap, 0);
-	mv_stop_and_reset(ap);
-	spin_unlock_irqrestore(&ap->host->lock, flags);
+	mv_phy_reset(ap, class, deadline);
 
-	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
-	if (qc->flags & ATA_QCFLAG_ACTIVE) {
-		qc->err_mask |= AC_ERR_TIMEOUT;
-		ata_eh_qc_complete(qc);
+	return 0;
+}
+
+static void mv_postreset(struct ata_link *link, unsigned int *classes)
+{
+	struct ata_port *ap = link->ap;
+	u32 serr;
+
+	/* print link status */
+	sata_print_link_status(link);
+
+	/* clear SError */
+	sata_scr_read(link, SCR_ERROR, &serr);
+	sata_scr_write_flush(link, SCR_ERROR, serr);
+
+	/* bail out if no device is present */
+	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+		DPRINTK("EXIT, no device\n");
+		return;
+	}
+
+	/* set up device control */
+	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+}
+
+static void mv_error_handler(struct ata_port *ap)
+{
+	ata_do_eh(ap, mv_prereset, ata_std_softreset,
+		  mv_hardreset, mv_postreset);
+}
+
+static void mv_post_int_cmd(struct ata_queued_cmd *qc)
+{
+	mv_stop_dma(qc->ap);
+}
+
+static void mv_eh_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
+	u32 tmp, mask;
+	unsigned int shift;
+
+	/* FIXME: handle coalescing completion events properly */
+
+	shift = ap->port_no * 2;
+	if (hc > 0)
+		shift++;
+
+	mask = 0x3 << shift;
+
+	/* disable assertion of portN err, done events */
+	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+}
+
+static void mv_eh_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
+	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+	void __iomem *port_mmio = mv_ap_base(ap);
+	u32 tmp, mask, hc_irq_cause;
+	unsigned int shift, hc_port_no = ap->port_no;
+
+	/* FIXME: handle coalescing completion events properly */
+
+	shift = ap->port_no * 2;
+	if (hc > 0) {
+		shift++;
+		hc_port_no -= 4;
 	}
+
+	mask = 0x3 << shift;
+
+	/* clear EDMA errors on this port */
+	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+	/* clear pending irq events */
+	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
+	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
+	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
+	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+
+	/* enable assertion of portN err, done events */
+	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
 }
 
 /**
@@ -2136,17 +2441,17 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 {
 	struct pci_dev *pdev = to_pci_dev(host->dev);
 	struct mv_host_priv *hpriv = host->private_data;
-	u8 rev_id;
 	u32 hp_flags = hpriv->hp_flags;
+	u8 pdev_revision;
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &pdev_revision);
 
-	switch(board_idx) {
+	switch (board_idx) {
 	case chip_5080:
 		hpriv->ops = &mv5xxx_ops;
-		hp_flags |= MV_HP_50XX;
+		hp_flags |= MV_HP_GEN_I;
 
-		switch (rev_id) {
+		switch (pdev_revision) {
 		case 0x1:
 			hp_flags |= MV_HP_ERRATA_50XXB0;
 			break;
@@ -2164,9 +2469,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 	case chip_504x:
 	case chip_508x:
 		hpriv->ops = &mv5xxx_ops;
-		hp_flags |= MV_HP_50XX;
+		hp_flags |= MV_HP_GEN_I;
 
-		switch (rev_id) {
+		switch (pdev_revision) {
 		case 0x0:
 			hp_flags |= MV_HP_ERRATA_50XXB0;
 			break;
@@ -2184,8 +2489,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 	case chip_604x:
 	case chip_608x:
 		hpriv->ops = &mv6xxx_ops;
+		hp_flags |= MV_HP_GEN_II;
 
-		switch (rev_id) {
+		switch (pdev_revision) {
 		case 0x7:
 			hp_flags |= MV_HP_ERRATA_60X1B2;
 			break;
@@ -2201,12 +2507,41 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 		break;
 
 	case chip_7042:
+		hp_flags |= MV_HP_PCIE;
+		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
+		    (pdev->device == 0x2300 || pdev->device == 0x2310))
+		{
+			/*
+			 * Highpoint RocketRAID PCIe 23xx series cards:
+			 *
+			 * Unconfigured drives are treated as "Legacy"
+			 * by the BIOS, and it overwrites sector 8 with
+			 * a "Lgcy" metadata block prior to Linux boot.
+			 *
+			 * Configured drives (RAID or JBOD) leave sector 8
+			 * alone, but instead overwrite a high numbered
+			 * sector for the RAID metadata.  This sector can
+			 * be determined exactly, by truncating the physical
+			 * drive capacity to a nice even GB value.
+			 *
+			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
+			 *
+			 * Warn the user, lest they think we're just buggy.
+			 */
+			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
+				" BIOS CORRUPTS DATA on all attached drives,"
+				" regardless of if/how they are configured."
+				" BEWARE!\n");
+			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
+				" use sectors 8-9 on \"Legacy\" drives,"
+				" and avoid the final two gigabytes on"
+				" all RocketRAID BIOS initialized drives.\n");
+		}
 	case chip_6042:
 		hpriv->ops = &mv6xxx_ops;
-
 		hp_flags |= MV_HP_GEN_IIE;
 
-		switch (rev_id) {
+		switch (pdev_revision) {
 		case 0x0:
 			hp_flags |= MV_HP_ERRATA_XX42A0;
 			break;
@@ -2222,11 +2557,21 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
 		break;
 
 	default:
-		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "BUG: invalid board index %u\n", board_idx);
 		return 1;
 	}
 
 	hpriv->hp_flags = hp_flags;
+	if (hp_flags & MV_HP_PCIE) {
+		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
+		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
+		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
+	} else {
+		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
+		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
+		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
+	}
 
 	return 0;
 }
@@ -2270,7 +2615,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
 	hpriv->ops->enable_leds(hpriv, mmio);
 
 	for (port = 0; port < host->n_ports; port++) {
-		if (IS_60XX(hpriv)) {
+		if (IS_GEN_II(hpriv)) {
 			void __iomem *port_mmio = mv_port_base(mmio, port);
 
 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
@@ -2283,8 +2628,14 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
 	}
 
 	for (port = 0; port < host->n_ports; port++) {
+		struct ata_port *ap = host->ports[port];
 		void __iomem *port_mmio = mv_port_base(mmio, port);
-		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
+		unsigned int offset = port_mmio - mmio;
+
+		mv_port_init(&ap->ioaddr, port_mmio);
+
+		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
 	}
 
 	for (hc = 0; hc < n_hc; hc++) {
@@ -2300,12 +2651,12 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
 	}
 
 	/* Clear any currently outstanding host interrupt conditions */
-	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
+	writelfl(0, mmio + hpriv->irq_cause_ofs);
 
 	/* and unmask interrupt generation for host regs */
-	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
+	writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
 
-	if (IS_50XX(hpriv))
+	if (IS_GEN_I(hpriv))
 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
 	else
 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
@@ -2314,8 +2665,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
 		"PCI int cause/mask=0x%08x/0x%08x\n",
 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
-		readl(mmio + PCI_IRQ_CAUSE_OFS),
-		readl(mmio + PCI_IRQ_MASK_OFS));
+		readl(mmio + hpriv->irq_cause_ofs),
+		readl(mmio + hpriv->irq_mask_ofs));
 
 done:
 	return rc;
@@ -2334,25 +2685,32 @@ static void mv_print_info(struct ata_host *host)
 {
 	struct pci_dev *pdev = to_pci_dev(host->dev);
 	struct mv_host_priv *hpriv = host->private_data;
-	u8 rev_id, scc;
-	const char *scc_s;
+	u8 scc;
+	const char *scc_s, *gen;
 
 	/* Use this to determine the HW stepping of the chip so we know
 	 * what errata to workaround
 	 */
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
-
 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
 	if (scc == 0)
 		scc_s = "SCSI";
 	else if (scc == 0x01)
 		scc_s = "RAID";
 	else
-		scc_s = "unknown";
+		scc_s = "?";
+
+	if (IS_GEN_I(hpriv))
+		gen = "I";
+	else if (IS_GEN_II(hpriv))
+		gen = "II";
+	else if (IS_GEN_IIE(hpriv))
+		gen = "IIE";
+	else
+		gen = "?";
 
 	dev_printk(KERN_INFO, &pdev->dev,
-	       "%u slots %u ports %s mode IRQ via %s\n",
-	       (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
+	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
+	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
 }
 
@@ -2366,7 +2724,7 @@ static void mv_print_info(struct ata_host *host)
  */
 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	static int printed_version = 0;
+	static int printed_version;
 	unsigned int board_idx = (unsigned int)ent->driver_data;
 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
 	struct ata_host *host;
@@ -2414,8 +2772,9 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	mv_print_info(host);
 
 	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
-				 &mv_sht);
+				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
 }
 
 static int __init mv_init(void)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 0d218c7..df96f9a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -49,7 +49,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME			"sata_nv"
-#define DRV_VERSION			"3.4"
+#define DRV_VERSION			"3.5"
 
 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
 
@@ -163,12 +163,41 @@ enum {
 	NV_ADMA_STAT_STOPPED		= (1 << 10),
 	NV_ADMA_STAT_DONE		= (1 << 12),
 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
-	 				  NV_ADMA_STAT_TIMEOUT,
+					  NV_ADMA_STAT_TIMEOUT,
 
 	/* port flags */
 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
 
+	/* MCP55 reg offset */
+	NV_CTL_MCP55			= 0x400,
+	NV_INT_STATUS_MCP55		= 0x440,
+	NV_INT_ENABLE_MCP55		= 0x444,
+	NV_NCQ_REG_MCP55		= 0x448,
+
+	/* MCP55 */
+	NV_INT_ALL_MCP55		= 0xffff,
+	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
+	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
+
+	/* SWNCQ ENABLE BITS*/
+	NV_CTL_PRI_SWNCQ		= 0x02,
+	NV_CTL_SEC_SWNCQ		= 0x04,
+
+	/* SW NCQ status bits*/
+	NV_SWNCQ_IRQ_DEV		= (1 << 0),
+	NV_SWNCQ_IRQ_PM			= (1 << 1),
+	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
+	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
+
+	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
+	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
+	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
+	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
+
+	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
+					  NV_SWNCQ_IRQ_REMOVED,
+
 };
 
 /* ADMA Physical Region Descriptor - one SG segment */
@@ -199,7 +228,7 @@ struct nv_adma_cpb {
 	u8			reserved1;     /* 1 */
 	u8			ctl_flags;     /* 2 */
 	/* len is length of taskfile in 64 bit words */
- 	u8			len;           /* 3  */
+	u8			len;		/* 3  */
 	u8			tag;           /* 4 */
 	u8			next_cpb_idx;  /* 5 */
 	__le16			reserved2;     /* 6-7 */
@@ -215,9 +244,9 @@ struct nv_adma_port_priv {
 	dma_addr_t		cpb_dma;
 	struct nv_adma_prd	*aprd;
 	dma_addr_t		aprd_dma;
-	void __iomem *		ctl_block;
-	void __iomem *		gen_block;
-	void __iomem *		notifier_clear_block;
+	void __iomem		*ctl_block;
+	void __iomem		*gen_block;
+	void __iomem		*notifier_clear_block;
 	u8			flags;
 	int			last_issue_ncq;
 };
@@ -226,9 +255,45 @@ struct nv_host_priv {
 	unsigned long		type;
 };
 
-#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+struct defer_queue {
+	u32		defer_bits;
+	unsigned int	head;
+	unsigned int	tail;
+	unsigned int	tag[ATA_MAX_QUEUE];
+};
+
+enum ncq_saw_flag_list {
+	ncq_saw_d2h	= (1U << 0),
+	ncq_saw_dmas	= (1U << 1),
+	ncq_saw_sdb	= (1U << 2),
+	ncq_saw_backout	= (1U << 3),
+};
+
+struct nv_swncq_port_priv {
+	struct ata_prd	*prd;	 /* our SG list */
+	dma_addr_t	prd_dma; /* and its DMA mapping */
+	void __iomem	*sactive_block;
+	void __iomem	*irq_block;
+	void __iomem	*tag_block;
+	u32		qc_active;
+
+	unsigned int	last_issue_tag;
+
+	/* fifo circular queue to store deferral command */
+	struct defer_queue defer_queue;
 
-static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+	/* for NCQ interrupt analysis */
+	u32		dhfis_bits;
+	u32		dmafis_bits;
+	u32		sdbfis_bits;
+
+	unsigned int	ncq_flags;
+};
+
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 #ifdef CONFIG_PM
 static int nv_pci_device_resume(struct pci_dev *pdev);
 #endif
@@ -236,8 +301,8 @@ static void nv_ck804_host_stop(struct ata_host *host);
 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
-static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 static void nv_nf2_freeze(struct ata_port *ap);
 static void nv_nf2_thaw(struct ata_port *ap);
@@ -263,13 +328,29 @@ static void nv_adma_host_stop(struct ata_host *host);
 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 
+static void nv_mcp55_thaw(struct ata_port *ap);
+static void nv_mcp55_freeze(struct ata_port *ap);
+static void nv_swncq_error_handler(struct ata_port *ap);
+static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_port_start(struct ata_port *ap);
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_swncq_port_resume(struct ata_port *ap);
+#endif
+
 enum nv_host_type
 {
 	GENERIC,
 	NFORCE2,
 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
 	CK804,
-	ADMA
+	ADMA,
+	SWNCQ,
 };
 
 static const struct pci_device_id nv_pci_tbl[] = {
@@ -280,10 +361,10 @@ static const struct pci_device_id nv_pci_tbl[] = {
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
@@ -325,6 +406,7 @@ static struct scsi_host_template nv_adma_sht = {
 	.name			= DRV_NAME,
 	.ioctl			= ata_scsi_ioctl,
 	.queuecommand		= ata_scsi_queuecmd,
+	.change_queue_depth	= ata_scsi_change_queue_depth,
 	.can_queue		= NV_ADMA_MAX_CPBS,
 	.this_id		= ATA_SHT_THIS_ID,
 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
@@ -338,8 +420,26 @@ static struct scsi_host_template nv_adma_sht = {
 	.bios_param		= ata_std_bios_param,
 };
 
+static struct scsi_host_template nv_swncq_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.change_queue_depth	= ata_scsi_change_queue_depth,
+	.can_queue		= ATA_MAX_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= nv_swncq_slave_config,
+	.slave_destroy		= ata_scsi_slave_destroy,
+	.bios_param		= ata_std_bios_param,
+};
+
 static const struct ata_port_operations nv_generic_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.exec_command		= ata_exec_command,
@@ -358,14 +458,12 @@ static const struct ata_port_operations nv_generic_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations nv_nf2_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.exec_command		= ata_exec_command,
@@ -384,14 +482,12 @@ static const struct ata_port_operations nv_nf2_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations nv_ck804_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.exec_command		= ata_exec_command,
@@ -410,7 +506,6 @@ static const struct ata_port_operations nv_ck804_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= ata_port_start,
@@ -418,7 +513,6 @@ static const struct ata_port_operations nv_ck804_ops = {
 };
 
 static const struct ata_port_operations nv_adma_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= nv_adma_tf_read,
 	.check_atapi_dma	= nv_adma_check_atapi_dma,
@@ -429,6 +523,7 @@ static const struct ata_port_operations nv_adma_ops = {
 	.bmdma_start		= ata_bmdma_start,
 	.bmdma_stop		= ata_bmdma_stop,
 	.bmdma_status		= ata_bmdma_status,
+	.qc_defer		= ata_std_qc_defer,
 	.qc_prep		= nv_adma_qc_prep,
 	.qc_issue		= nv_adma_qc_issue,
 	.freeze			= nv_adma_freeze,
@@ -438,7 +533,6 @@ static const struct ata_port_operations nv_adma_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= nv_adma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= nv_scr_read,
 	.scr_write		= nv_scr_write,
 	.port_start		= nv_adma_port_start,
@@ -450,12 +544,41 @@ static const struct ata_port_operations nv_adma_ops = {
 	.host_stop		= nv_adma_host_stop,
 };
 
+static const struct ata_port_operations nv_swncq_ops = {
+	.tf_load		= ata_tf_load,
+	.tf_read		= ata_tf_read,
+	.exec_command		= ata_exec_command,
+	.check_status		= ata_check_status,
+	.dev_select		= ata_std_dev_select,
+	.bmdma_setup		= ata_bmdma_setup,
+	.bmdma_start		= ata_bmdma_start,
+	.bmdma_stop		= ata_bmdma_stop,
+	.bmdma_status		= ata_bmdma_status,
+	.qc_defer		= ata_std_qc_defer,
+	.qc_prep		= nv_swncq_qc_prep,
+	.qc_issue		= nv_swncq_qc_issue,
+	.freeze			= nv_mcp55_freeze,
+	.thaw			= nv_mcp55_thaw,
+	.error_handler		= nv_swncq_error_handler,
+	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
+	.data_xfer		= ata_data_xfer,
+	.irq_clear		= ata_bmdma_irq_clear,
+	.irq_on			= ata_irq_on,
+	.scr_read		= nv_scr_read,
+	.scr_write		= nv_scr_write,
+#ifdef CONFIG_PM
+	.port_suspend		= nv_swncq_port_suspend,
+	.port_resume		= nv_swncq_port_resume,
+#endif
+	.port_start		= nv_swncq_port_start,
+};
+
 static const struct ata_port_info nv_port_info[] = {
 	/* generic */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_HRST_TO_RESUME,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
@@ -465,8 +588,8 @@ static const struct ata_port_info nv_port_info[] = {
 	/* nforce2/3 */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_HRST_TO_RESUME,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
@@ -476,8 +599,8 @@ static const struct ata_port_info nv_port_info[] = {
 	/* ck804 */
 	{
 		.sht		= &nv_sht,
-		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_HRST_TO_RESUME,
+		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
@@ -488,14 +611,26 @@ static const struct ata_port_info nv_port_info[] = {
 	{
 		.sht		= &nv_adma_sht,
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_HRST_TO_RESUME |
 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
 		.pio_mask	= NV_PIO_MASK,
 		.mwdma_mask	= NV_MWDMA_MASK,
 		.udma_mask	= NV_UDMA_MASK,
 		.port_ops	= &nv_adma_ops,
 		.irq_handler	= nv_adma_interrupt,
 	},
+	/* SWNCQ */
+	{
+		.sht		= &nv_swncq_sht,
+		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_NCQ,
+		.link_flags	= ATA_LFLAG_HRST_TO_RESUME,
+		.pio_mask	= NV_PIO_MASK,
+		.mwdma_mask	= NV_MWDMA_MASK,
+		.udma_mask	= NV_UDMA_MASK,
+		.port_ops	= &nv_swncq_ops,
+		.irq_handler	= nv_swncq_interrupt,
+	},
 };
 
 MODULE_AUTHOR("NVIDIA");
@@ -505,6 +640,7 @@ MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
 static int adma_enabled = 1;
+static int swncq_enabled;
 
 static void nv_adma_register_mode(struct ata_port *ap)
 {
@@ -517,12 +653,12 @@ static void nv_adma_register_mode(struct ata_port *ap)
 		return;
 
 	status = readw(mmio + NV_ADMA_STAT);
-	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
+	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
 		ndelay(50);
 		status = readw(mmio + NV_ADMA_STAT);
 		count++;
 	}
-	if(count == 20)
+	if (count == 20)
 		ata_port_printk(ap, KERN_WARNING,
 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
 			status);
@@ -532,12 +668,12 @@ static void nv_adma_register_mode(struct ata_port *ap)
 
 	count = 0;
 	status = readw(mmio + NV_ADMA_STAT);
-	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
+	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
 		ndelay(50);
 		status = readw(mmio + NV_ADMA_STAT);
 		count++;
 	}
-	if(count == 20)
+	if (count == 20)
 		ata_port_printk(ap, KERN_WARNING,
 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
 			 status);
@@ -561,13 +697,13 @@ static void nv_adma_mode(struct ata_port *ap)
 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
 
 	status = readw(mmio + NV_ADMA_STAT);
-	while(((status & NV_ADMA_STAT_LEGACY) ||
+	while (((status & NV_ADMA_STAT_LEGACY) ||
 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
 		ndelay(50);
 		status = readw(mmio + NV_ADMA_STAT);
 		count++;
 	}
-	if(count == 20)
+	if (count == 20)
 		ata_port_printk(ap, KERN_WARNING,
 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
 			status);
@@ -593,7 +729,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 		/* Not a proper libata device, ignore */
 		return rc;
 
-	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
+	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
 		/*
 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
 		 * Therefore ATAPI commands are sent through the legacy interface.
@@ -611,8 +747,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 		   on the port. */
 		adma_enable = 0;
 		nv_adma_register_mode(ap);
-	}
-	else {
+	} else {
 		bounce_limit = *ap->dev->dma_mask;
 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
@@ -621,23 +756,22 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 
 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
 
-	if(ap->port_no == 1)
+	if (ap->port_no == 1)
 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
 	else
 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
 
-	if(adma_enable) {
+	if (adma_enable) {
 		new_reg = current_reg | config_mask;
 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
-	}
-	else {
+	} else {
 		new_reg = current_reg & ~config_mask;
 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
 	}
 
-	if(current_reg != new_reg)
+	if (current_reg != new_reg)
 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
 
 	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
@@ -657,11 +791,13 @@ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
 
 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 {
-	/* Since commands where a result TF is requested are not
-	   executed in ADMA mode, the only time this function will be called
-	   in ADMA mode will be if a command fails. In this case we
-	   don't care about going into register mode with ADMA commands
-	   pending, as the commands will all shortly be aborted anyway. */
+	/* Other than when internal or pass-through commands are executed,
+	   the only time this function will be called in ADMA mode will be
+	   if a command fails. In the failure case we don't care about going
+	   into register mode with ADMA commands pending, as the commands will
+	   all shortly be aborted anyway. We assume that NCQ commands are not
+	   issued via passthrough, which is the only way that switching into
+	   ADMA mode could abort outstanding commands. */
 	nv_adma_register_mode(ap);
 
 	ata_tf_read(ap, tf);
@@ -671,7 +807,7 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 {
 	unsigned int idx = 0;
 
-	if(tf->flags & ATA_TFLAG_ISADDR) {
+	if (tf->flags & ATA_TFLAG_ISADDR) {
 		if (tf->flags & ATA_TFLAG_LBA48) {
 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
@@ -688,12 +824,12 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
 	}
 
-	if(tf->flags & ATA_TFLAG_DEVICE)
+	if (tf->flags & ATA_TFLAG_DEVICE)
 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
 
 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
 
-	while(idx < 12)
+	while (idx < 12)
 		cpb[idx++] = cpu_to_le16(IGN);
 
 	return idx;
@@ -710,23 +846,24 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 		     flags & (NV_CPB_RESP_ATA_ERR |
 			      NV_CPB_RESP_CMD_ERR |
 			      NV_CPB_RESP_CPB_ERR)))) {
-		struct ata_eh_info *ehi = &ap->eh_info;
+		struct ata_eh_info *ehi = &ap->link.eh_info;
 		int freeze = 0;
 
 		ata_ehi_clear_desc(ehi);
-		ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
+		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
 		if (flags & NV_CPB_RESP_ATA_ERR) {
-			ata_ehi_push_desc(ehi, ": ATA error");
+			ata_ehi_push_desc(ehi, "ATA error");
 			ehi->err_mask |= AC_ERR_DEV;
 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
-			ata_ehi_push_desc(ehi, ": CMD error");
+			ata_ehi_push_desc(ehi, "CMD error");
 			ehi->err_mask |= AC_ERR_DEV;
 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
-			ata_ehi_push_desc(ehi, ": CPB error");
+			ata_ehi_push_desc(ehi, "CPB error");
 			ehi->err_mask |= AC_ERR_SYSTEM;
 			freeze = 1;
 		} else {
 			/* notifier error, but no error in CPB flags? */
+			ata_ehi_push_desc(ehi, "unknown");
 			ehi->err_mask |= AC_ERR_OTHER;
 			freeze = 1;
 		}
@@ -742,15 +879,16 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
 		if (likely(qc)) {
-			DPRINTK("Completing qc from tag %d\n",cpb_num);
+			DPRINTK("Completing qc from tag %d\n", cpb_num);
 			ata_qc_complete(qc);
 		} else {
-			struct ata_eh_info *ehi = &ap->eh_info;
+			struct ata_eh_info *ehi = &ap->link.eh_info;
 			/* Notifier bits set without a command may indicate the drive
 			   is misbehaving. Raise host state machine violation on this
 			   condition. */
-			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
-				cpb_num);
+			ata_port_printk(ap, KERN_ERR,
+					"notifier for tag %d with no cmd?\n",
+					cpb_num);
 			ehi->err_mask |= AC_ERR_HSM;
 			ehi->action |= ATA_EH_SOFTRESET;
 			ata_port_freeze(ap);
@@ -762,7 +900,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
 
 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
 {
-	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 
 	/* freeze if hotplugged */
 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
@@ -790,6 +928,8 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs
 	int i, handled = 0;
 	u32 notifier_clears[2];
 
+	DPRINTK("ENTER\n");
+
 	spin_lock(&host->lock);
 
 	for (i = 0; i < host->n_ports; i++) {
@@ -815,7 +955,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs
 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
 					>> (NV_INT_PORT_SHIFT * i);
-				if(ata_tag_valid(ap->active_tag))
+				if (ata_tag_valid(ap->link.active_tag))
 					/** NV_INT_DEV indication seems unreliable at times
 					    at least in ADMA mode. Force it on always when a
 					    command is active, to prevent losing interrupts. */
@@ -829,7 +969,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs
 
 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
 
-			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
 			    !notifier_error)
 				/* Nothing to do */
 				continue;
@@ -850,23 +990,24 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs
 					       NV_ADMA_STAT_HOTUNPLUG |
 					       NV_ADMA_STAT_TIMEOUT |
 					       NV_ADMA_STAT_SERROR))) {
-				struct ata_eh_info *ehi = &ap->eh_info;
+				struct ata_eh_info *ehi = &ap->link.eh_info;
 
 				ata_ehi_clear_desc(ehi);
-				ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
+				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
 				if (status & NV_ADMA_STAT_TIMEOUT) {
 					ehi->err_mask |= AC_ERR_SYSTEM;
-					ata_ehi_push_desc(ehi, ": timeout");
+					ata_ehi_push_desc(ehi, "timeout");
 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
 					ata_ehi_hotplugged(ehi);
-					ata_ehi_push_desc(ehi, ": hotplug");
+					ata_ehi_push_desc(ehi, "hotplug");
 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
 					ata_ehi_hotplugged(ehi);
-					ata_ehi_push_desc(ehi, ": hot unplug");
+					ata_ehi_push_desc(ehi, "hot unplug");
 				} else if (status & NV_ADMA_STAT_SERROR) {
 					/* let libata analyze SError and figure out the cause */
-					ata_ehi_push_desc(ehi, ": SError");
-				}
+					ata_ehi_push_desc(ehi, "SError");
+				} else
+					ata_ehi_push_desc(ehi, "unknown");
 				ata_port_freeze(ap);
 				continue;
 			}
@@ -876,23 +1017,23 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance, struct pt_regs
 				u32 check_commands;
 				int pos, error = 0;
 
-				if(ata_tag_valid(ap->active_tag))
-					check_commands = 1 << ap->active_tag;
+				if (ata_tag_valid(ap->link.active_tag))
+					check_commands = 1 << ap->link.active_tag;
 				else
-					check_commands = ap->sactive;
+					check_commands = ap->link.sactive;
 
 				/** Check CPBs for completed commands */
 				while ((pos = ffs(check_commands)) && !error) {
 					pos--;
 					error = nv_adma_check_cpb(ap, pos,
-						notifier_error & (1 << pos) );
-					check_commands &= ~(1 << pos );
+						notifier_error & (1 << pos));
+					check_commands &= ~(1 << pos);
 				}
 			}
 		}
 	}
 
-	if(notifier_clears[0] || notifier_clears[1]) {
+	if (notifier_clears[0] || notifier_clears[1]) {
 		/* Note: Both notifier clear registers must be written
 		   if either is set, even if one is zero, according to NVIDIA. */
 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
@@ -918,14 +1059,14 @@ static void nv_adma_freeze(struct ata_port *ap)
 		return;
 
 	/* clear any outstanding CK804 notifications */
-	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
 
 	/* Disable interrupt */
 	tmp = readw(mmio + NV_ADMA_CTL);
-	writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
 		mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 }
 
 static void nv_adma_thaw(struct ata_port *ap)
@@ -941,9 +1082,9 @@ static void nv_adma_thaw(struct ata_port *ap)
 
 	/* Enable interrupt */
 	tmp = readw(mmio + NV_ADMA_CTL);
-	writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
 		mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 }
 
 static void nv_adma_irq_clear(struct ata_port *ap)
@@ -958,7 +1099,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
 	}
 
 	/* clear any outstanding CK804 notifications */
-	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
 
 	/* clear ADMA status */
@@ -983,7 +1124,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
 {
 	struct nv_adma_port_priv *pp = qc->ap->private_data;
 
-	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
 		ata_bmdma_post_internal_cmd(qc);
 }
 
@@ -1029,7 +1170,7 @@ static int nv_adma_port_start(struct ata_port *ap)
 	pp->cpb_dma = mem_dma;
 
 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
-	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
 
 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
@@ -1053,15 +1194,15 @@ static int nv_adma_port_start(struct ata_port *ap)
 
 	/* clear GO for register mode, enable interrupt */
 	tmp = readw(mmio + NV_ADMA_CTL);
-	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
-		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
 
 	tmp = readw(mmio + NV_ADMA_CTL);
 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 	udelay(1);
 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 
 	return 0;
 }
@@ -1101,7 +1242,7 @@ static int nv_adma_port_resume(struct ata_port *ap)
 
 	/* set CPB block location */
 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
-	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
+	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
 
 	/* clear any outstanding interrupt conditions */
 	writew(0xffff, mmio + NV_ADMA_STAT);
@@ -1114,15 +1255,15 @@ static int nv_adma_port_resume(struct ata_port *ap)
 
 	/* clear GO for register mode, enable interrupt */
 	tmp = readw(mmio + NV_ADMA_CTL);
-	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
-		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
 
 	tmp = readw(mmio + NV_ADMA_CTL);
 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 	udelay(1);
 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 
 	return 0;
 }
@@ -1206,7 +1347,8 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
 	idx = 0;
 
 	ata_for_each_sg(sg, qc) {
-		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+		aprd = (idx < 5) ? &cpb->aprd[idx] :
+			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
 		nv_adma_fill_aprd(qc, sg, idx, aprd);
 		idx++;
 	}
@@ -1221,14 +1363,12 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
 	struct nv_adma_port_priv *pp = qc->ap->private_data;
 
 	/* ADMA engine can only be used for non-ATAPI DMA commands,
-	   or interrupt-driven no-data commands, where a result taskfile
-	   is not required. */
-	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
-	   (qc->tf.flags & ATA_TFLAG_POLLING) ||
-	   (qc->flags & ATA_QCFLAG_RESULT_TF))
+	   or interrupt-driven no-data commands. */
+	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+	   (qc->tf.flags & ATA_TFLAG_POLLING))
 		return 1;
 
-	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
+	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
 	   (qc->tf.protocol == ATA_PROT_NODATA))
 		return 0;
 
@@ -1243,6 +1383,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
 		       NV_CPB_CTL_IEN;
 
 	if (nv_adma_use_reg_mode(qc)) {
+		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+			(qc->flags & ATA_QCFLAG_DMAMAP));
 		nv_adma_register_mode(qc->ap);
 		ata_qc_prep(qc);
 		return;
@@ -1265,14 +1407,14 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
 
 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
 
-	if(qc->flags & ATA_QCFLAG_DMAMAP) {
+	if (qc->flags & ATA_QCFLAG_DMAMAP) {
 		nv_adma_fill_sg(qc, cpb);
 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
 	} else
 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
 
-	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
-	   finished filling in all of the contents */
+	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
+	   until we are finished filling in all of the contents */
 	wmb();
 	cpb->ctl_flags = ctl_flags;
 	wmb();
@@ -1287,9 +1429,21 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
 
 	VPRINTK("ENTER\n");
 
+	/* We can't handle result taskfile with NCQ commands, since
+	   retrieving the taskfile switches us out of ADMA mode and would abort
+	   existing commands. */
+	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
+		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
+		ata_dev_printk(qc->dev, KERN_ERR,
+			"NCQ w/ RESULT_TF not allowed\n");
+		return AC_ERR_SYSTEM;
+	}
+
 	if (nv_adma_use_reg_mode(qc)) {
 		/* use ATA register mode */
 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
+		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+			(qc->flags & ATA_QCFLAG_DMAMAP));
 		nv_adma_register_mode(qc->ap);
 		return ata_qc_issue_prot(qc);
 	} else
@@ -1299,16 +1453,16 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
 	   and (number of cpbs to append -1) in top 8 bits */
 	wmb();
 
-	if(curr_ncq != pp->last_issue_ncq) {
-	   	/* Seems to need some delay before switching between NCQ and non-NCQ
-		   commands, else we get command timeouts and such. */
+	if (curr_ncq != pp->last_issue_ncq) {
+		/* Seems to need some delay before switching between NCQ and
+		   non-NCQ commands, else we get command timeouts and such. */
 		udelay(20);
 		pp->last_issue_ncq = curr_ncq;
 	}
 
 	writew(qc->tag, mmio + NV_ADMA_APPEND);
 
-	DPRINTK("Issued tag %u\n",qc->tag);
+	DPRINTK("Issued tag %u\n", qc->tag);
 
 	return 0;
 }
@@ -1320,6 +1474,8 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, struct pt_r
 	unsigned int handled = 0;
 	unsigned long flags;
 
+	DPRINTK("ENTER\n");
+
 	spin_lock_irqsave(&host->lock, flags);
 
 	for (i = 0; i < host->n_ports; i++) {
@@ -1330,7 +1486,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, struct pt_r
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
 
-			qc = ata_qc_from_tag(ap, ap->active_tag);
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
 				handled += ata_host_intr(ap, qc);
 			else
@@ -1350,6 +1506,8 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
 {
 	int i, handled = 0;
 
+	DPRINTK("ENTER\n");
+
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
 
@@ -1368,6 +1526,8 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, struct pt_regs
 	u8 irq_stat;
 	irqreturn_t ret;
 
+	DPRINTK("ENTER\n");
+
 	spin_lock(&host->lock);
 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
 	ret = nv_do_interrupt(host, irq_stat);
@@ -1382,6 +1542,8 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, struct pt_reg
 	u8 irq_stat;
 	irqreturn_t ret;
 
+	DPRINTK("ENTER\n");
+
 	spin_lock(&host->lock);
 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
 	ret = nv_do_interrupt(host, irq_stat);
@@ -1390,20 +1552,22 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, struct pt_reg
 	return ret;
 }
 
-static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
+		return -EINVAL;
 
-	return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+	*val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
-static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 
 	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 static void nv_nf2_freeze(struct ata_port *ap)
@@ -1454,16 +1618,44 @@ static void nv_ck804_thaw(struct ata_port *ap)
 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
 }
 
-static int nv_hardreset(struct ata_port *ap, unsigned int *class,
+static void nv_mcp55_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+	u32 mask;
+
+	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+	mask &= ~(NV_INT_ALL_MCP55 << shift);
+	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+	ata_bmdma_freeze(ap);
+}
+
+static void nv_mcp55_thaw(struct ata_port *ap)
+{
+	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+	u32 mask;
+
+	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+	mask |= (NV_INT_MASK_MCP55 << shift);
+	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+	ata_bmdma_thaw(ap);
+}
+
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
 			unsigned long deadline)
 {
 	unsigned int dummy;
 
 	/* SATA hardreset fails to retrieve proper device signature on
 	 * some controllers.  Don't classify on hardreset.  For more
-	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
+	 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
 	 */
-	return sata_std_hardreset(ap, &dummy, deadline);
+	return sata_std_hardreset(link, &dummy, deadline);
 }
 
 static void nv_error_handler(struct ata_port *ap)
@@ -1475,12 +1667,12 @@ static void nv_error_handler(struct ata_port *ap)
 static void nv_adma_error_handler(struct ata_port *ap)
 {
 	struct nv_adma_port_priv *pp = ap->private_data;
-	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
 		void __iomem *mmio = pp->ctl_block;
 		int i;
 		u16 tmp;
 
-		if(ata_tag_valid(ap->active_tag) || ap->sactive) {
+		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
@@ -1488,16 +1680,17 @@ static void nv_adma_error_handler(struct ata_port *ap)
 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
 
-			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+			ata_port_printk(ap, KERN_ERR,
+				"EH in ADMA mode, notifier 0x%X "
 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
 				"next cpb count 0x%X next cpb idx 0x%x\n",
 				notifier, notifier_error, gen_ctl, status,
 				cpb_count, next_cpb_idx);
 
-			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
 				struct nv_adma_cpb *cpb = &pp->cpb[i];
-				if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
-				    ap->sactive & (1 << i) )
+				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+				    ap->link.sactive & (1 << i))
 					ata_port_printk(ap, KERN_ERR,
 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
 						i, cpb->ctl_flags, cpb->resp_flags);
@@ -1507,8 +1700,9 @@ static void nv_adma_error_handler(struct ata_port *ap)
 		/* Push us back into port register mode for error handling. */
 		nv_adma_register_mode(ap);
 
-		/* Mark all of the CPBs as invalid to prevent them from being executed */
-		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
+		/* Mark all of the CPBs as invalid to prevent them from
+		   being executed */
+		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
 
 		/* clear CPB fetch count */
@@ -1517,19 +1711,678 @@ static void nv_adma_error_handler(struct ata_port *ap)
 		/* Reset channel */
 		tmp = readw(mmio + NV_ADMA_CTL);
 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 		udelay(1);
 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
-		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
+		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
 	}
 
 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
 			   nv_hardreset, ata_std_postreset);
 }
 
-static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
 {
-	static int printed_version = 0;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+
+	/* queue is full */
+	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
+	dq->defer_bits |= (1 << qc->tag);
+	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
+}
+
+static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+	unsigned int tag;
+
+	if (dq->head == dq->tail)	/* null queue */
+		return NULL;
+
+	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
+	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
+	WARN_ON(!(dq->defer_bits & (1 << tag)));
+	dq->defer_bits &= ~(1 << tag);
+
+	return ata_qc_from_tag(ap, tag);
+}
+
+static void nv_swncq_fis_reinit(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	pp->dhfis_bits = 0;
+	pp->dmafis_bits = 0;
+	pp->sdbfis_bits = 0;
+	pp->ncq_flags = 0;
+}
+
+static void nv_swncq_pp_reinit(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct defer_queue *dq = &pp->defer_queue;
+
+	dq->head = 0;
+	dq->tail = 0;
+	dq->defer_bits = 0;
+	pp->qc_active = 0;
+	pp->last_issue_tag = ATA_TAG_POISON;
+	nv_swncq_fis_reinit(ap);
+}
+
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	writew(fis, pp->irq_block);
+}
+
+static void __ata_bmdma_stop(struct ata_port *ap)
+{
+	struct ata_queued_cmd qc;
+
+	qc.ap = ap;
+	ata_bmdma_stop(&qc);
+}
+
+static void nv_swncq_ncq_stop(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	unsigned int i;
+	u32 sactive;
+	u32 done_mask;
+
+	ata_port_printk(ap, KERN_ERR,
+			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
+			ap->qc_active, ap->link.sactive);
+	ata_port_printk(ap, KERN_ERR,
+		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
+		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
+		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
+		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
+
+	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
+			ap->ops->check_status(ap),
+			ioread8(ap->ioaddr.error_addr));
+
+	sactive = readl(pp->sactive_block);
+	done_mask = pp->qc_active ^ sactive;
+
+	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
+	for (i = 0; i < ATA_MAX_QUEUE; i++) {
+		u8 err = 0;
+		if (pp->qc_active & (1 << i))
+			err = 0;
+		else if (done_mask & (1 << i))
+			err = 1;
+		else
+			continue;
+
+		ata_port_printk(ap, KERN_ERR,
+				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
+				(pp->dhfis_bits >> i) & 0x1,
+				(pp->dmafis_bits >> i) & 0x1,
+				(pp->sdbfis_bits >> i) & 0x1,
+				(sactive >> i) & 0x1,
+				(err ? "error! tag doesn't exit" : " "));
+	}
+
+	nv_swncq_pp_reinit(ap);
+	ap->ops->irq_clear(ap);
+	__ata_bmdma_stop(ap);
+	nv_swncq_irq_clear(ap, 0xffff);
+}
+
+static void nv_swncq_error_handler(struct ata_port *ap)
+{
+	struct ata_eh_context *ehc = &ap->link.eh_context;
+
+	if (ap->link.sactive) {
+		nv_swncq_ncq_stop(ap);
+		ehc->i.action |= ATA_EH_HARDRESET;
+	}
+
+	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
+			   nv_hardreset, ata_std_postreset);
+}
+
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	u32 tmp;
+
+	/* clear irq */
+	writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+	/* disable irq */
+	writel(0, mmio + NV_INT_ENABLE_MCP55);
+
+	/* disable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
+	writel(tmp, mmio + NV_CTL_MCP55);
+
+	return 0;
+}
+
+static int nv_swncq_port_resume(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	u32 tmp;
+
+	/* clear irq */
+	writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+	/* enable irq */
+	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+	/* enable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+	return 0;
+}
+#endif
+
+static void nv_swncq_host_init(struct ata_host *host)
+{
+	u32 tmp;
+	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u8 regval;
+
+	/* disable  ECO 398 */
+	pci_read_config_byte(pdev, 0x7f, &regval);
+	regval &= ~(1 << 7);
+	pci_write_config_byte(pdev, 0x7f, regval);
+
+	/* enable swncq */
+	tmp = readl(mmio + NV_CTL_MCP55);
+	VPRINTK("HOST_CTL:0x%X\n", tmp);
+	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+	/* enable irq intr */
+	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
+	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+	/*  clear port irq */
+	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
+}
+
+static int nv_swncq_slave_config(struct scsi_device *sdev)
+{
+	struct ata_port *ap = ata_shost_to_port(sdev->host);
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct ata_device *dev;
+	int rc;
+	u8 rev;
+	u8 check_maxtor = 0;
+	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+	rc = ata_scsi_slave_config(sdev);
+	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+		/* Not a proper libata device, ignore */
+		return rc;
+
+	dev = &ap->link.device[sdev->id];
+	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
+		return rc;
+
+	/* if MCP51 and Maxtor, then disable ncq */
+	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
+		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
+		check_maxtor = 1;
+
+	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
+	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
+		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
+		pci_read_config_byte(pdev, 0x8, &rev);
+		if (rev <= 0xa2)
+			check_maxtor = 1;
+	}
+
+	if (!check_maxtor)
+		return rc;
+
+	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+	if (strncmp(model_num, "Maxtor", 6) == 0) {
+		ata_scsi_change_queue_depth(sdev, 1);
+		ata_dev_printk(dev, KERN_NOTICE,
+			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
+	}
+
+	return rc;
+}
+
+static int nv_swncq_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host->dev;
+	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+	struct nv_swncq_port_priv *pp;
+	int rc;
+
+	rc = ata_port_start(ap);
+	if (rc)
+		return rc;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
+				      &pp->prd_dma, GFP_KERNEL);
+	if (!pp->prd)
+		return -ENOMEM;
+	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
+
+	ap->private_data = pp;
+	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
+	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
+	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
+
+	return 0;
+}
+
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (qc->tf.protocol != ATA_PROT_NCQ) {
+		ata_qc_prep(qc);
+		return;
+	}
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	nv_swncq_fill_sg(qc);
+}
+
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int idx;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_prd *prd;
+
+	WARN_ON(qc->__sg == NULL);
+	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+
+	prd = pp->prd + ATA_MAX_PRD * qc->tag;
+
+	idx = 0;
+	ata_for_each_sg(sg, qc) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		addr = (u32)sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			prd[idx].addr = cpu_to_le32(addr);
+			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	if (idx)
+		prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
+					  struct ata_queued_cmd *qc)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	if (qc == NULL)
+		return 0;
+
+	DPRINTK("Enter\n");
+
+	writel((1 << qc->tag), pp->sactive_block);
+	pp->last_issue_tag = qc->tag;
+	pp->dhfis_bits &= ~(1 << qc->tag);
+	pp->dmafis_bits &= ~(1 << qc->tag);
+	pp->qc_active |= (0x1 << qc->tag);
+
+	ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+	ap->ops->exec_command(ap, &qc->tf);
+
+	DPRINTK("Issued tag %u\n", qc->tag);
+
+	return 0;
+}
+
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	if (qc->tf.protocol != ATA_PROT_NCQ)
+		return ata_qc_issue_prot(qc);
+
+	DPRINTK("Enter\n");
+
+	if (!pp->qc_active)
+		nv_swncq_issue_atacmd(ap, qc);
+	else
+		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
+
+	return 0;
+}
+
+static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
+{
+	u32 serror;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+
+	ata_ehi_clear_desc(ehi);
+
+	/* AHCI needs SError cleared; otherwise, it might lock up */
+	sata_scr_read(&ap->link, SCR_ERROR, &serror);
+	sata_scr_write(&ap->link, SCR_ERROR, serror);
+
+	/* analyze @irq_stat */
+	if (fis & NV_SWNCQ_IRQ_ADDED)
+		ata_ehi_push_desc(ehi, "hot plug");
+	else if (fis & NV_SWNCQ_IRQ_REMOVED)
+		ata_ehi_push_desc(ehi, "hot unplug");
+
+	ata_ehi_hotplugged(ehi);
+
+	/* okay, let's hand over to EH */
+	ehi->serror |= serror;
+
+	ata_port_freeze(ap);
+}
+
+static int nv_swncq_sdbfis(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u32 sactive;
+	int nr_done = 0;
+	u32 done_mask;
+	int i;
+	u8 host_stat;
+	u8 lack_dhfis = 0;
+
+	host_stat = ap->ops->bmdma_status(ap);
+	if (unlikely(host_stat & ATA_DMA_ERR)) {
+		/* error when transfering data to/from memory */
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+		ehi->err_mask |= AC_ERR_HOST_BUS;
+		ehi->action |= ATA_EH_SOFTRESET;
+		return -EINVAL;
+	}
+
+	ap->ops->irq_clear(ap);
+	__ata_bmdma_stop(ap);
+
+	sactive = readl(pp->sactive_block);
+	done_mask = pp->qc_active ^ sactive;
+
+	if (unlikely(done_mask & sactive)) {
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
+				  "(%08x->%08x)", pp->qc_active, sactive);
+		ehi->err_mask |= AC_ERR_HSM;
+		ehi->action |= ATA_EH_HARDRESET;
+		return -EINVAL;
+	}
+	for (i = 0; i < ATA_MAX_QUEUE; i++) {
+		if (!(done_mask & (1 << i)))
+			continue;
+
+		qc = ata_qc_from_tag(ap, i);
+		if (qc) {
+			ata_qc_complete(qc);
+			pp->qc_active &= ~(1 << i);
+			pp->dhfis_bits &= ~(1 << i);
+			pp->dmafis_bits &= ~(1 << i);
+			pp->sdbfis_bits |= (1 << i);
+			nr_done++;
+		}
+	}
+
+	if (!ap->qc_active) {
+		DPRINTK("over\n");
+		nv_swncq_pp_reinit(ap);
+		return nr_done;
+	}
+
+	if (pp->qc_active & pp->dhfis_bits)
+		return nr_done;
+
+	if ((pp->ncq_flags & ncq_saw_backout) ||
+	    (pp->qc_active ^ pp->dhfis_bits))
+		/* if the controller cann't get a device to host register FIS,
+		 * The driver needs to reissue the new command.
+		 */
+		lack_dhfis = 1;
+
+	DPRINTK("id 0x%x QC: qc_active 0x%x,"
+		"SWNCQ:qc_active 0x%X defer_bits %X "
+		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+		ap->print_id, ap->qc_active, pp->qc_active,
+		pp->defer_queue.defer_bits, pp->dhfis_bits,
+		pp->dmafis_bits, pp->last_issue_tag);
+
+	nv_swncq_fis_reinit(ap);
+
+	if (lack_dhfis) {
+		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
+		nv_swncq_issue_atacmd(ap, qc);
+		return nr_done;
+	}
+
+	if (pp->defer_queue.defer_bits) {
+		/* send deferral queue command */
+		qc = nv_swncq_qc_from_dq(ap);
+		WARN_ON(qc == NULL);
+		nv_swncq_issue_atacmd(ap, qc);
+	}
+
+	return nr_done;
+}
+
+static inline u32 nv_swncq_tag(struct ata_port *ap)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	u32 tag;
+
+	tag = readb(pp->tag_block) >> 2;
+	return (tag & 0x1f);
+}
+
+static int nv_swncq_dmafis(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	unsigned int rw;
+	u8 dmactl;
+	u32 tag;
+	struct nv_swncq_port_priv *pp = ap->private_data;
+
+	__ata_bmdma_stop(ap);
+	tag = nv_swncq_tag(ap);
+
+	DPRINTK("dma setup tag 0x%x\n", tag);
+	qc = ata_qc_from_tag(ap, tag);
+
+	if (unlikely(!qc))
+		return 0;
+
+	rw = qc->tf.flags & ATA_TFLAG_WRITE;
+
+	/* load PRD table addr. */
+	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
+		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~ATA_DMA_WR;
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+
+	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	return 1;
+}
+
+static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
+{
+	struct nv_swncq_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	u32 serror;
+	u8 ata_stat;
+	int rc = 0;
+
+	ata_stat = ap->ops->check_status(ap);
+	nv_swncq_irq_clear(ap, fis);
+	if (!fis)
+		return;
+
+	if (ap->pflags & ATA_PFLAG_FROZEN)
+		return;
+
+	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
+		nv_swncq_hotplug(ap, fis);
+		return;
+	}
+
+	if (!pp->qc_active)
+		return;
+
+	if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
+		return;
+	ap->ops->scr_write(ap, SCR_ERROR, serror);
+
+	if (ata_stat & ATA_ERR) {
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
+		ehi->err_mask |= AC_ERR_DEV;
+		ehi->serror |= serror;
+		ehi->action |= ATA_EH_SOFTRESET;
+		ata_port_freeze(ap);
+		return;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
+		/* If the IRQ is backout, driver must issue
+		 * the new command again some time later.
+		 */
+		pp->ncq_flags |= ncq_saw_backout;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
+		pp->ncq_flags |= ncq_saw_sdb;
+		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
+			ap->print_id, pp->qc_active, pp->dhfis_bits,
+			pp->dmafis_bits, readl(pp->sactive_block));
+		rc = nv_swncq_sdbfis(ap);
+		if (rc < 0)
+			goto irq_error;
+	}
+
+	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
+		/* The interrupt indicates the new command
+		 * was transmitted correctly to the drive.
+		 */
+		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
+		pp->ncq_flags |= ncq_saw_d2h;
+		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
+			ata_ehi_push_desc(ehi, "illegal fis transaction");
+			ehi->err_mask |= AC_ERR_HSM;
+			ehi->action |= ATA_EH_HARDRESET;
+			goto irq_error;
+		}
+
+		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
+		    !(pp->ncq_flags & ncq_saw_dmas)) {
+			ata_stat = ap->ops->check_status(ap);
+			if (ata_stat & ATA_BUSY)
+				goto irq_exit;
+
+			if (pp->defer_queue.defer_bits) {
+				DPRINTK("send next command\n");
+				qc = nv_swncq_qc_from_dq(ap);
+				nv_swncq_issue_atacmd(ap, qc);
+			}
+		}
+	}
+
+	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
+		/* program the dma controller with appropriate PRD buffers
+		 * and start the DMA transfer for requested command.
+		 */
+		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
+		pp->ncq_flags |= ncq_saw_dmas;
+		rc = nv_swncq_dmafis(ap);
+	}
+
+irq_exit:
+	return;
+irq_error:
+	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
+	ata_port_freeze(ap);
+	return;
+}
+
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
+{
+	struct ata_host *host = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+	u32 irq_stat;
+
+	DPRINTK("ENTER\n");
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+			if (ap->link.sactive) {
+				nv_swncq_host_interrupt(ap, (u16)irq_stat);
+				handled = 1;
+			} else {
+				if (irq_stat)	/* reserve Hotplug */
+					nv_swncq_irq_clear(ap, 0xfff0);
+
+				handled += nv_host_intr(ap, (u8)irq_stat);
+			}
+		}
+		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 	struct ata_host *host;
 	struct nv_host_priv *hpriv;
@@ -1541,7 +2394,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
         // Make sure this is a SATA controller by counting the number of bars
         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
         // it's an IDE controller and we ignore it.
-	for (bar=0; bar<6; bar++)
+	for (bar = 0; bar < 6; bar++)
 		if (pci_resource_start(pdev, bar) == 0)
 			return -ENODEV;
 
@@ -1553,13 +2406,21 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		return rc;
 
 	/* determine type and allocate host */
-	if (type >= CK804 && adma_enabled) {
+	if (type == CK804 && adma_enabled) {
 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
 		type = ADMA;
 	}
 
+	if (type == SWNCQ) {
+		if (swncq_enabled)
+			dev_printk(KERN_NOTICE, &pdev->dev,
+				   "Using SWNCQ mode\n");
+		else
+			type = GENERIC;
+	}
+
 	ppi[0] = &nv_port_info[type];
-	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 
@@ -1599,7 +2460,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		rc = nv_adma_host_init(host);
 		if (rc)
 			return rc;
-	}
+	} else if (type == SWNCQ)
+		nv_swncq_host_init(host);
 
 	pci_set_master(pdev);
 	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
@@ -1614,37 +2476,37 @@ static int nv_pci_device_resume(struct pci_dev *pdev)
 	int rc;
 
 	rc = ata_pci_device_do_resume(pdev);
-	if(rc)
+	if (rc)
 		return rc;
 
 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
-		if(hpriv->type >= CK804) {
+		if (hpriv->type >= CK804) {
 			u8 regval;
 
 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
 		}
-		if(hpriv->type == ADMA) {
+		if (hpriv->type == ADMA) {
 			u32 tmp32;
 			struct nv_adma_port_priv *pp;
 			/* enable/disable ADMA on the ports appropriately */
 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
 
 			pp = host->ports[0]->private_data;
-			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
-				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
 			else
 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
-				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
 			pp = host->ports[1]->private_data;
-			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
-				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
 			else
 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
-				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
 
 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
 		}
@@ -1698,3 +2560,6 @@ module_init(nv_init);
 module_exit(nv_exit);
 module_param_named(adma, adma_enabled, bool, 0444);
 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
+module_param_named(swncq, swncq_enabled, bool, 0444);
+MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
+
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index d9227d0..31592cf 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,6 +2,7 @@
  *  sata_promise.c - Promise SATA
  *
  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *		    Mikael Pettersson <mikpe@it.uu.se>
  *  		    Please ALWAYS copy linux-ide@vger.kernel.org
  *		    on emails.
  *
@@ -45,12 +46,12 @@
 #include "sata_promise.h"
 
 #define DRV_NAME	"sata_promise"
-#define DRV_VERSION	"2.07"
-
+#define DRV_VERSION	"2.11"
 
 enum {
 	PDC_MAX_PORTS		= 4,
 	PDC_MMIO_BAR		= 3,
+	PDC_MAX_PRD		= LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
 
 	/* register offsets */
 	PDC_FEATURE		= 0x04, /* Feature/Error reg (per port) */
@@ -84,17 +85,19 @@ enum {
 	PDC_PCI_SYS_ERR		= (1 << 22), /* PCI system error */
 	PDC1_PCI_PARITY_ERR	= (1 << 23), /* PCI parity error (from SATA150 driver) */
 	PDC1_ERR_MASK		= PDC1_PCI_PARITY_ERR,
-	PDC2_ERR_MASK		= PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR,
-	PDC_ERR_MASK		= (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC_OVERRUN_ERR
-				   | PDC_UNDERRUN_ERR | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR
-				   | PDC1_ERR_MASK | PDC2_ERR_MASK),
+	PDC2_ERR_MASK		= PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
+				  PDC2_ATA_DMA_CNT_ERR,
+	PDC_ERR_MASK		= PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
+				  PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
+				  PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
+				  PDC1_ERR_MASK | PDC2_ERR_MASK,
 
 	board_2037x		= 0,	/* FastTrak S150 TX2plus */
 	board_2037x_pata	= 1,	/* FastTrak S150 TX2plus PATA port */
 	board_20319		= 2,	/* FastTrak S150 TX4 */
 	board_20619		= 3,	/* FastTrak TX4000 */
 	board_2057x		= 4,	/* SATAII150 Tx2plus */
-	board_2057x_pata	= 5,	/* SATAII150 Tx2plus */
+	board_2057x_pata	= 5,	/* SATAII150 Tx2plus PATA port */
 	board_40518		= 6,	/* SATAII150 Tx4 */
 
 	PDC_HAS_PATA		= (1 << 1), /* PDC20375/20575 has PATA */
@@ -124,14 +127,13 @@ enum {
 	PDC_FLAG_4_PORTS	= (1 << 26), /* 4 ports */
 };
 
-
 struct pdc_port_priv {
 	u8			*pkt;
 	dma_addr_t		pkt_dma;
 };
 
-static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
 static int pdc_common_port_start(struct ata_port *ap);
 static int pdc_sata_port_start(struct ata_port *ap);
@@ -157,7 +159,7 @@ static struct scsi_host_template pdc_ata_sht = {
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
+	.sg_tablesize		= PDC_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -169,7 +171,6 @@ static struct scsi_host_template pdc_ata_sht = {
 };
 
 static const struct ata_port_operations pdc_sata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= pdc_tf_load_mmio,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -187,7 +188,6 @@ static const struct ata_port_operations pdc_sata_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= pdc_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= pdc_sata_scr_read,
 	.scr_write		= pdc_sata_scr_write,
@@ -196,7 +196,6 @@ static const struct ata_port_operations pdc_sata_ops = {
 
 /* First-generation chips need a more restrictive ->check_atapi_dma op */
 static const struct ata_port_operations pdc_old_sata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= pdc_tf_load_mmio,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -214,7 +213,6 @@ static const struct ata_port_operations pdc_old_sata_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= pdc_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= pdc_sata_scr_read,
 	.scr_write		= pdc_sata_scr_write,
@@ -222,7 +220,6 @@ static const struct ata_port_operations pdc_old_sata_ops = {
 };
 
 static const struct ata_port_operations pdc_pata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= pdc_tf_load_mmio,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -240,78 +237,77 @@ static const struct ata_port_operations pdc_pata_ops = {
 	.data_xfer		= ata_data_xfer,
 	.irq_clear		= pdc_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= pdc_common_port_start,
 };
 
 static const struct ata_port_info pdc_port_info[] = {
-	/* board_2037x */
+	[board_2037x] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
 				  PDC_FLAG_SATA_PATA,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_old_sata_ops,
 	},
 
-	/* board_2037x_pata */
+	[board_2037x_pata] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_pata_ops,
 	},
 
-	/* board_20319 */
+	[board_20319] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
 				  PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_old_sata_ops,
 	},
 
-	/* board_20619 */
+	[board_20619] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
 				  PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_pata_ops,
 	},
 
-	/* board_2057x */
+	[board_2057x] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
 				  PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_sata_ops,
 	},
 
-	/* board_2057x_pata */
+	[board_2057x_pata] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
 				  PDC_FLAG_GEN_II,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_pata_ops,
 	},
 
-	/* board_40518 */
+	[board_40518] =
 	{
 		.flags		= PDC_COMMON_FLAGS | ATA_FLAG_SATA |
 				  PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_sata_ops,
 	},
 };
@@ -330,8 +326,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 
 	{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
 	{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
-	{ PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
-	{ PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
+	{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
+	{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
 	{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
 	{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
 
@@ -340,7 +336,6 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 	{ }	/* terminate list */
 };
 
-
 static struct pci_driver pdc_ata_pci_driver = {
 	.name			= DRV_NAME,
 	.id_table		= pdc_ata_pci_tbl,
@@ -348,7 +343,6 @@ static struct pci_driver pdc_ata_pci_driver = {
 	.remove			= ata_pci_remove_one,
 };
 
-
 static int pdc_common_port_start(struct ata_port *ap)
 {
 	struct device *dev = ap->host->dev;
@@ -382,7 +376,7 @@ static int pdc_sata_port_start(struct ata_port *ap)
 
 	/* fix up PHYMODE4 align timing */
 	if (ap->flags & PDC_FLAG_GEN_II) {
-		void __iomem *mmio = (void __iomem *) ap->ioaddr.scr_addr;
+		void __iomem *mmio = ap->ioaddr.scr_addr;
 		unsigned int tmp;
 
 		tmp = readl(mmio + 0x014);
@@ -418,7 +412,7 @@ static void pdc_reset_port(struct ata_port *ap)
 static int pdc_pata_cable_detect(struct ata_port *ap)
 {
 	u8 tmp;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
+	void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
 
 	tmp = readb(mmio);
 	if (tmp & 0x01)
@@ -431,20 +425,20 @@ static int pdc_sata_cable_detect(struct ata_port *ap)
 	return ATA_CBL_SATA;
 }
 
-static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
-	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+		return -EINVAL;
+	*val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
-
-static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
-			       u32 val)
+static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
@@ -479,7 +473,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
 	buf32[2] = 0;				/* no next-packet */
 
 	/* select drive */
-	if (sata_scr_valid(ap)) {
+	if (sata_scr_valid(&ap->link)) {
 		dev_sel = PDC_DEVICE_SATA;
 	} else {
 		dev_sel = ATA_DEVICE_OBS;
@@ -531,6 +525,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
 	memcpy(buf+31, cdb, cdb_len);
 }
 
+/**
+ *	pdc_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *	Make sure hardware does not choke on it.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static void pdc_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int idx;
+	const u32 SG_COUNT_ASIC_BUG = 41*4;
+
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	WARN_ON(qc->__sg == NULL);
+	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+
+	idx = 0;
+	ata_for_each_sg(sg, qc) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			ap->prd[idx].addr = cpu_to_le32(addr);
+			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	if (idx) {
+		u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+
+		if (len > SG_COUNT_ASIC_BUG) {
+			u32 addr;
+
+			VPRINTK("Splitting last PRD.\n");
+
+			addr = le32_to_cpu(ap->prd[idx - 1].addr);
+			ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+
+			addr = addr + len - SG_COUNT_ASIC_BUG;
+			len = SG_COUNT_ASIC_BUG;
+			ap->prd[idx].addr = cpu_to_le32(addr);
+			ap->prd[idx].flags_len = cpu_to_le32(len);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+		}
+
+		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+	}
+}
+
 static void pdc_qc_prep(struct ata_queued_cmd *qc)
 {
 	struct pdc_port_priv *pp = qc->ap->private_data;
@@ -540,7 +612,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
 
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
-		ata_qc_prep(qc);
+		pdc_fill_sg(qc);
 		/* fall through */
 
 	case ATA_PROT_NODATA:
@@ -556,11 +628,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
 		break;
 
 	case ATA_PROT_ATAPI:
-		ata_qc_prep(qc);
+		pdc_fill_sg(qc);
 		break;
 
 	case ATA_PROT_ATAPI_DMA:
-		ata_qc_prep(qc);
+		pdc_fill_sg(qc);
 		/*FALLTHROUGH*/
 	case ATA_PROT_ATAPI_NODATA:
 		pdc_atapi_pkt(qc);
@@ -573,7 +645,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
 
 static void pdc_freeze(struct ata_port *ap)
 {
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *mmio = ap->ioaddr.cmd_addr;
 	u32 tmp;
 
 	tmp = readl(mmio + PDC_CTLSTAT);
@@ -585,7 +657,7 @@ static void pdc_freeze(struct ata_port *ap)
 
 static void pdc_thaw(struct ata_port *ap)
 {
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+	void __iomem *mmio = ap->ioaddr.cmd_addr;
 	u32 tmp;
 
 	/* clear IRQ */
@@ -630,7 +702,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
 static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
 			   u32 port_status, u32 err_mask)
 {
-	struct ata_eh_info *ehi = &ap->eh_info;
+	struct ata_eh_info *ehi = &ap->link.eh_info;
 	unsigned int ac_err_mask = 0;
 
 	ata_ehi_clear_desc(ehi);
@@ -647,8 +719,12 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
 			   | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
 		ac_err_mask |= AC_ERR_HOST_BUS;
 
-	if (sata_scr_valid(ap))
-		ehi->serror |= pdc_sata_scr_read(ap, SCR_ERROR);
+	if (sata_scr_valid(&ap->link)) {
+		u32 serror;
+
+		pdc_sata_scr_read(ap, SCR_ERROR, &serror);
+		ehi->serror |= serror;
+	}
 
 	qc->err_mask |= ac_err_mask;
 
@@ -657,8 +733,8 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
 	ata_port_abort(ap);
 }
 
-static inline unsigned int pdc_host_intr( struct ata_port *ap,
-                                          struct ata_queued_cmd *qc)
+static inline unsigned int pdc_host_intr(struct ata_port *ap,
+					 struct ata_queued_cmd *qc)
 {
 	unsigned int handled = 0;
 	void __iomem *port_mmio = ap->ioaddr.cmd_addr;
@@ -685,10 +761,10 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
 		handled = 1;
 		break;
 
-        default:
+	default:
 		ap->stats.idle_irq++;
 		break;
-        }
+	}
 
 	return handled;
 }
@@ -701,7 +777,20 @@ static void pdc_irq_clear(struct ata_port *ap)
 	readl(mmio + PDC_INT_SEQMASK);
 }
 
-static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
+static int pdc_is_sataii_tx4(unsigned long flags)
+{
+	const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
+	return (flags & mask) == mask;
+}
+
+static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
+					  int is_sataii_tx4)
+{
+	static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
+	return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
+}
+
+static irqreturn_t pdc_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
 	struct ata_host *host = dev_instance;
 	struct ata_port *ap;
@@ -709,6 +798,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *p
 	unsigned int i, tmp;
 	unsigned int handled = 0;
 	void __iomem *mmio_base;
+	unsigned int hotplug_offset, ata_no;
+	u32 hotplug_status;
+	int is_sataii_tx4;
 
 	VPRINTK("ENTER\n");
 
@@ -719,10 +811,20 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *p
 
 	mmio_base = host->iomap[PDC_MMIO_BAR];
 
+	/* read and clear hotplug flags for all ports */
+	if (host->ports[0]->flags & PDC_FLAG_GEN_II)
+		hotplug_offset = PDC2_SATA_PLUG_CSR;
+	else
+		hotplug_offset = PDC_SATA_PLUG_CSR;
+	hotplug_status = readl(mmio_base + hotplug_offset);
+	if (hotplug_status & 0xff)
+		writel(hotplug_status | 0xff, mmio_base + hotplug_offset);
+	hotplug_status &= 0xff;	/* clear uninteresting bits */
+
 	/* reading should also clear interrupts */
 	mask = readl(mmio_base + PDC_INT_SEQMASK);
 
-	if (mask == 0xffffffff) {
+	if (mask == 0xffffffff && hotplug_status == 0) {
 		VPRINTK("QUICK EXIT 2\n");
 		return IRQ_NONE;
 	}
@@ -730,22 +832,40 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *p
 	spin_lock(&host->lock);
 
 	mask &= 0xffff;		/* only 16 tags possible */
-	if (!mask) {
+	if (mask == 0 && hotplug_status == 0) {
 		VPRINTK("QUICK EXIT 3\n");
 		goto done_irq;
 	}
 
 	writel(mask, mmio_base + PDC_INT_SEQMASK);
 
+	is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
+
 	for (i = 0; i < host->n_ports; i++) {
 		VPRINTK("port %u\n", i);
 		ap = host->ports[i];
+
+		/* check for a plug or unplug event */
+		ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+		tmp = hotplug_status & (0x11 << ata_no);
+		if (tmp && ap &&
+		    !(ap->flags & ATA_FLAG_DISABLED)) {
+			struct ata_eh_info *ehi = &ap->link.eh_info;
+			ata_ehi_clear_desc(ehi);
+			ata_ehi_hotplugged(ehi);
+			ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
+			ata_port_freeze(ap);
+			++handled;
+			continue;
+		}
+
+		/* check for a packet interrupt */
 		tmp = mask & (1 << (i + 1));
 		if (tmp && ap &&
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
 
-			qc = ata_qc_from_tag(ap, ap->active_tag);
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
 				handled += pdc_host_intr(ap, qc);
 		}
@@ -802,16 +922,16 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
 
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
-	WARN_ON (tf->protocol == ATA_PROT_DMA ||
-		 tf->protocol == ATA_PROT_ATAPI_DMA);
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATA_PROT_ATAPI_DMA);
 	ata_tf_load(ap, tf);
 }
 
-
-static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
 {
-	WARN_ON (tf->protocol == ATA_PROT_DMA ||
-		 tf->protocol == ATA_PROT_ATAPI_DMA);
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATA_PROT_ATAPI_DMA);
 	ata_exec_command(ap, tf);
 }
 
@@ -834,8 +954,11 @@ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
 	}
 	/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
 	if (scsicmd[0] == WRITE_10) {
-		unsigned int lba;
-		lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5];
+		unsigned int lba =
+			(scsicmd[2] << 24) |
+			(scsicmd[3] << 16) |
+			(scsicmd[4] << 8) |
+			scsicmd[5];
 		if (lba >= 0xFFFF4FA2)
 			pio = 1;
 	}
@@ -867,7 +990,6 @@ static void pdc_ata_setup_port(struct ata_port *ap,
 	ap->ioaddr.scr_addr		= scr_addr;
 }
 
-
 static void pdc_host_init(struct ata_host *host)
 {
 	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
@@ -897,9 +1019,9 @@ static void pdc_host_init(struct ata_host *host)
 	tmp = readl(mmio + hotplug_offset);
 	writel(tmp | 0xff, mmio + hotplug_offset);
 
-	/* mask plug/unplug ints */
+	/* unmask plug/unplug ints */
 	tmp = readl(mmio + hotplug_offset);
-	writel(tmp | 0xff0000, mmio + hotplug_offset);
+	writel(tmp & ~0xff0000, mmio + hotplug_offset);
 
 	/* don't initialise TBG or SLEW on 2nd generation chips */
 	if (is_gen2)
@@ -921,7 +1043,8 @@ static void pdc_host_init(struct ata_host *host)
 	writel(tmp, mmio + PDC_SLEW_CTL);
 }
 
-static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc_ata_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
 {
 	static int printed_version;
 	const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
@@ -955,10 +1078,8 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 
 	if (pi->flags & PDC_FLAG_SATA_PATA) {
 		u8 tmp = readb(base + PDC_FLASH_CTL+1);
-		if (!(tmp & 0x80)) {
+		if (!(tmp & 0x80))
 			ppi[n_ports++] = pi + 1;
-			dev_printk(KERN_INFO, &pdev->dev, "PATA port found\n");
-		}
 	}
 
 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
@@ -968,22 +1089,17 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	}
 	host->iomap = pcim_iomap_table(pdev);
 
-	is_sataii_tx4 = 0;
-	if ((pi->flags & (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) == (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) {
-		is_sataii_tx4 = 1;
-		dev_printk(KERN_INFO, &pdev->dev, "applying SATAII TX4 port numbering workaround\n");
-	}
+	is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
 	for (i = 0; i < host->n_ports; i++) {
-		static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
-		int ata_nr;
+		struct ata_port *ap = host->ports[i];
+		unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+		unsigned int port_offset = 0x200 + ata_no * 0x80;
+		unsigned int scr_offset = 0x400 + ata_no * 0x100;
 
-		ata_nr = i;
-		if (is_sataii_tx4)
-			ata_nr = sataii_tx4_port_remap[i];
+		pdc_ata_setup_port(ap, base + port_offset, base + scr_offset);
 
-		pdc_ata_setup_port(host->ports[i],
-				   base + 0x200 + ata_nr * 0x80,
-				   base + 0x400 + ata_nr * 0x100);
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, port_offset, "port");
 	}
 
 	/* initialize adapter */
@@ -1002,19 +1118,16 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 				 &pdc_ata_sht);
 }
 
-
 static int __init pdc_ata_init(void)
 {
 	return pci_register_driver(&pdc_ata_pci_driver);
 }
 
-
 static void __exit pdc_ata_exit(void)
 {
 	pci_unregister_driver(&pdc_ata_pci_driver);
 }
 
-
 MODULE_AUTHOR("Jeff Garzik");
 MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 258aa1e..ba6eb62 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -39,7 +39,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_qstor"
-#define DRV_VERSION	"0.08"
+#define DRV_VERSION	"0.09"
 
 enum {
 	QS_MMIO_BAR		= 4,
@@ -103,7 +103,7 @@ enum {
 	QS_DMA_BOUNDARY		= ~0UL
 };
 
-typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t;
+typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
 
 struct qs_port_priv {
 	u8			*pkt;
@@ -111,19 +111,20 @@ struct qs_port_priv {
 	qs_state_t		state;
 };
 
-static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static int qs_port_start(struct ata_port *ap);
 static void qs_host_stop(struct ata_host *host);
-static void qs_phy_reset(struct ata_port *ap);
 static void qs_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
 static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
 static void qs_bmdma_stop(struct ata_queued_cmd *qc);
 static u8 qs_bmdma_status(struct ata_port *ap);
 static void qs_irq_clear(struct ata_port *ap);
-static void qs_eng_timeout(struct ata_port *ap);
+static void qs_freeze(struct ata_port *ap);
+static void qs_thaw(struct ata_port *ap);
+static void qs_error_handler(struct ata_port *ap);
 
 static struct scsi_host_template qs_ata_sht = {
 	.module			= THIS_MODULE,
@@ -135,7 +136,6 @@ static struct scsi_host_template qs_ata_sht = {
 	.sg_tablesize		= QS_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
-	//FIXME .use_clustering		= ATA_SHT_USE_CLUSTERING,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.proc_name		= DRV_NAME,
 	.dma_boundary		= QS_DMA_BOUNDARY,
@@ -145,21 +145,20 @@ static struct scsi_host_template qs_ata_sht = {
 };
 
 static const struct ata_port_operations qs_ata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
 	.check_atapi_dma	= qs_check_atapi_dma,
 	.exec_command		= ata_exec_command,
 	.dev_select		= ata_std_dev_select,
-	.phy_reset		= qs_phy_reset,
 	.qc_prep		= qs_qc_prep,
 	.qc_issue		= qs_qc_issue,
 	.data_xfer		= ata_data_xfer,
-	.eng_timeout		= qs_eng_timeout,
+	.freeze			= qs_freeze,
+	.thaw			= qs_thaw,
+	.error_handler		= qs_error_handler,
 	.irq_clear		= qs_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= qs_scr_read,
 	.scr_write		= qs_scr_write,
 	.port_start		= qs_port_start,
@@ -172,11 +171,9 @@ static const struct ata_port_info qs_port_info[] = {
 	/* board_2068_idx */
 	{
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_SATA_RESET |
-				  //FIXME ATA_FLAG_SRST |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
 		.pio_mask	= 0x10, /* pio4 */
-		.udma_mask	= 0x7f, /* udma0-6 */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &qs_ata_ops,
 	},
 };
@@ -222,7 +219,9 @@ static void qs_irq_clear(struct ata_port *ap)
 static inline void qs_enter_reg_mode(struct ata_port *ap)
 {
 	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+	struct qs_port_priv *pp = ap->private_data;
 
+	pp->state = qs_state_mmio;
 	writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
 	readb(chan + QS_CCT_CTR0);        /* flush */
 }
@@ -236,37 +235,51 @@ static inline void qs_reset_channel_logic(struct ata_port *ap)
 	qs_enter_reg_mode(ap);
 }
 
-static void qs_phy_reset(struct ata_port *ap)
+static void qs_freeze(struct ata_port *ap)
 {
-	struct qs_port_priv *pp = ap->private_data;
+	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
 
-	pp->state = qs_state_idle;
-	qs_reset_channel_logic(ap);
-	sata_phy_reset(ap);
+	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+	qs_enter_reg_mode(ap);
 }
 
-static void qs_eng_timeout(struct ata_port *ap)
+static void qs_thaw(struct ata_port *ap)
 {
-	struct qs_port_priv *pp = ap->private_data;
+	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
+
+	qs_enter_reg_mode(ap);
+	writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
+}
+
+static int qs_prereset(struct ata_link *link, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
 
-	if (pp->state != qs_state_idle) /* healthy paranoia */
-		pp->state = qs_state_mmio;
 	qs_reset_channel_logic(ap);
-	ata_eng_timeout(ap);
+	return ata_std_prereset(link, deadline);
 }
 
-static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return ~0U;
-	return readl(ap->ioaddr.scr_addr + (sc_reg * 8));
+		return -EINVAL;
+	*val = readl(ap->ioaddr.scr_addr + (sc_reg * 8));
+	return 0;
+}
+
+static void qs_error_handler(struct ata_port *ap)
+{
+	qs_enter_reg_mode(ap);
+	ata_do_eh(ap, qs_prereset, ata_std_softreset, NULL,
+		  ata_std_postreset);
 }
 
-static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 	writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
+	return 0;
 }
 
 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
@@ -337,7 +350,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
 	buf[28] = dflags;
 
 	/* frame information structure (FIS) */
-	ata_tf_to_fis(&qc->tf, &buf[32], 0);
+	ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
 }
 
 static inline void qs_packet_start(struct ata_queued_cmd *qc)
@@ -359,7 +372,6 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
 
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
-
 		pp->state = qs_state_pkt;
 		qs_packet_start(qc);
 		return 0;
@@ -376,6 +388,26 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
 	return ata_qc_issue_prot(qc);
 }
 
+static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
+{
+	qc->err_mask |= ac_err_mask(status);
+
+	if (!qc->err_mask) {
+		ata_qc_complete(qc);
+	} else {
+		struct ata_port    *ap  = qc->ap;
+		struct ata_eh_info *ehi = &ap->link.eh_info;
+
+		ata_ehi_clear_desc(ehi);
+		ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+		if (qc->err_mask == AC_ERR_DEV)
+			ata_port_abort(ap);
+		else
+			ata_port_freeze(ap);
+	}
+}
+
 static inline unsigned int qs_intr_pkt(struct ata_host *host)
 {
 	unsigned int handled = 0;
@@ -402,15 +434,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
 				struct qs_port_priv *pp = ap->private_data;
 				if (!pp || pp->state != qs_state_pkt)
 					continue;
-				qc = ata_qc_from_tag(ap, ap->active_tag);
+				qc = ata_qc_from_tag(ap, ap->link.active_tag);
 				if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
 					switch (sHST) {
 					case 0: /* successful CPB */
 					case 3: /* device error */
-						pp->state = qs_state_idle;
 						qs_enter_reg_mode(qc->ap);
-						qc->err_mask |= ac_err_mask(sDST);
-						ata_qc_complete(qc);
+						qs_do_or_die(qc, sDST);
 						break;
 					default:
 						break;
@@ -432,25 +462,27 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
 		if (ap &&
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
-			struct qs_port_priv *pp = ap->private_data;
-			if (!pp || pp->state != qs_state_mmio)
-				continue;
-			qc = ata_qc_from_tag(ap, ap->active_tag);
-			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-
-				/* check main status, clearing INTRQ */
-				u8 status = ata_check_status(ap);
-				if ((status & ATA_BUSY))
-					continue;
-				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
-					ap->print_id, qc->tf.protocol, status);
-
-				/* complete taskfile transaction */
-				pp->state = qs_state_idle;
-				qc->err_mask |= ac_err_mask(status);
-				ata_qc_complete(qc);
+			struct qs_port_priv *pp;
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
+			if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
+				/*
+				 * The qstor hardware generates spurious
+				 * interrupts from time to time when switching
+				 * in and out of packet mode.
+				 * There's no obvious way to know if we're
+				 * here now due to that, so just ack the irq
+				 * and pretend we knew it was ours.. (ugh).
+				 * This does not affect packet mode.
+				 */
+				ata_check_status(ap);
 				handled = 1;
+				continue;
 			}
+			pp = ap->private_data;
+			if (!pp || pp->state != qs_state_mmio)
+				continue;
+			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+				handled |= ata_host_intr(ap, qc);
 		}
 	}
 	return handled;
@@ -460,12 +492,13 @@ static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
 	struct ata_host *host = dev_instance;
 	unsigned int handled = 0;
+	unsigned long flags;
 
 	VPRINTK("ENTER\n");
 
-	spin_lock(&host->lock);
+	spin_lock_irqsave(&host->lock, flags);
 	handled  = qs_intr_pkt(host) | qs_intr_mmio(host);
-	spin_unlock(&host->lock);
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	VPRINTK("EXIT\n");
 
@@ -502,7 +535,6 @@ static int qs_port_start(struct ata_port *ap)
 	rc = ata_port_start(ap);
 	if (rc)
 		return rc;
-	qs_enter_reg_mode(ap);
 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
 	if (!pp)
 		return -ENOMEM;
@@ -513,6 +545,7 @@ static int qs_port_start(struct ata_port *ap)
 	memset(pp->pkt, 0, QS_PKT_BYTES);
 	ap->private_data = pp;
 
+	qs_enter_reg_mode(ap);
 	addr = (u64)pp->pkt_dma;
 	writel((u32) addr,        chan + QS_CCF_CPBA);
 	writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
@@ -635,9 +668,14 @@ static int qs_ata_init_one(struct pci_dev *pdev,
 		return rc;
 
 	for (port_no = 0; port_no < host->n_ports; ++port_no) {
-		void __iomem *chan =
-			host->iomap[QS_MMIO_BAR] + (port_no * 0x4000);
-		qs_ata_setup_port(&host->ports[port_no]->ioaddr, chan);
+		struct ata_port *ap = host->ports[port_no];
+		unsigned int offset = port_no * 0x4000;
+		void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
+
+		qs_ata_setup_port(&ap->ioaddr, chan);
+
+		ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
 	}
 
 	/* initialize adapter */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 2e4356a..ee0ae7c 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -46,7 +46,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_sil"
-#define DRV_VERSION	"2.2"
+#define DRV_VERSION	"2.3"
 
 enum {
 	SIL_MMIO_BAR		= 5,
@@ -59,7 +59,8 @@ enum {
 	SIL_FLAG_MOD15WRITE	= (1 << 30),
 
 	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
-				  ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
+				  ATA_FLAG_MMIO,
+	SIL_DFL_LINK_FLAGS	= ATA_LFLAG_HRST_TO_RESUME,
 
 	/*
 	 * Controller IDs
@@ -110,14 +111,14 @@ enum {
 	SIL_QUIRK_UDMA5MAX	= (1 << 1),
 };
 
-static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 #ifdef CONFIG_PM
 static int sil_pci_device_resume(struct pci_dev *pdev);
 #endif
 static void sil_dev_config(struct ata_device *dev);
-static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
-static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed);
+static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
 static void sil_freeze(struct ata_port *ap);
 static void sil_thaw(struct ata_port *ap);
 
@@ -137,7 +138,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
 
 /* TODO firmware versions should be added - eric */
 static const struct sil_drivelist {
-	const char * product;
+	const char *product;
 	unsigned int quirk;
 } sil_blacklist [] = {
 	{ "ST320012AS",		SIL_QUIRK_MOD15WRITE },
@@ -185,7 +186,6 @@ static struct scsi_host_template sil_sht = {
 };
 
 static const struct ata_port_operations sil_ops = {
-	.port_disable		= ata_port_disable,
 	.dev_config		= sil_dev_config,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
@@ -206,7 +206,6 @@ static const struct ata_port_operations sil_ops = {
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= sil_scr_read,
 	.scr_write		= sil_scr_write,
 	.port_start		= ata_port_start,
@@ -216,34 +215,38 @@ static const struct ata_port_info sil_port_info[] = {
 	/* sil_3112 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
+		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,
 		.port_ops	= &sil_ops,
 	},
 	/* sil_3112_no_sata_irq */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
 				  SIL_FLAG_NO_SATA_IRQ,
+		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,
 		.port_ops	= &sil_ops,
 	},
 	/* sil_3512 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,
 		.port_ops	= &sil_ops,
 	},
 	/* sil_3114 */
 	{
 		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+		.link_flags	= SIL_DFL_LINK_FLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,
 		.port_ops	= &sil_ops,
 	},
 };
@@ -262,8 +265,9 @@ static const struct {
 	unsigned long sfis_cfg;	/* SATA FIS reception config register */
 } sil_port[] = {
 	/* port 0 ... */
-	{ 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
-	{ 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
+	/*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
+	{  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
+	{  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
 	{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
 	{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
 	/* ... port 3 */
@@ -275,7 +279,7 @@ MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static int slow_down = 0;
+static int slow_down;
 module_param(slow_down, int, 0444);
 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
 
@@ -289,35 +293,33 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 
 /**
  *	sil_set_mode		-	wrap set_mode functions
- *	@ap: port to set up
+ *	@link: link to set up
  *	@r_failed: returned device when we fail
  *
  *	Wrap the libata method for device setup as after the setup we need
  *	to inspect the results and do some configuration work
  */
 
-static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed)
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
 {
-	struct ata_host *host = ap->host;
-	struct ata_device *dev;
-	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+	struct ata_port *ap = link->ap;
+	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
 	void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
-	u32 tmp, dev_mode[2];
-	unsigned int i;
+	struct ata_device *dev;
+	u32 tmp, dev_mode[2] = { };
 	int rc;
 
-	rc = ata_do_set_mode(ap, r_failed);
+	rc = ata_do_set_mode(link, r_failed);
 	if (rc)
 		return rc;
 
-	for (i = 0; i < 2; i++) {
-		dev = &ap->device[i];
+	ata_link_for_each_dev(dev, link) {
 		if (!ata_dev_enabled(dev))
-			dev_mode[i] = 0;	/* PIO0/1/2 */
+			dev_mode[dev->devno] = 0;	/* PIO0/1/2 */
 		else if (dev->flags & ATA_DFLAG_PIO)
-			dev_mode[i] = 1;	/* PIO3/4 */
+			dev_mode[dev->devno] = 1;	/* PIO3/4 */
 		else
-			dev_mode[i] = 3;	/* UDMA */
+			dev_mode[dev->devno] = 3;	/* UDMA */
 		/* value 2 indicates MDMA */
 	}
 
@@ -330,7 +332,8 @@ static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed)
 	return 0;
 }
 
-static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
+static inline void __iomem *sil_scr_addr(struct ata_port *ap,
+					 unsigned int sc_reg)
 {
 	void __iomem *offset = ap->ioaddr.scr_addr;
 
@@ -349,25 +352,32 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_re
 	return NULL;
 }
 
-static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
-	if (mmio)
-		return readl(mmio);
-	return 0xffffffffU;
+
+	if (mmio) {
+		*val = readl(mmio);
+		return 0;
+	}
+	return -EINVAL;
 }
 
-static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
-	if (mmio)
+
+	if (mmio) {
 		writel(val, mmio);
+		return 0;
+	}
+	return -EINVAL;
 }
 
 static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 {
-	struct ata_eh_info *ehi = &ap->eh_info;
-	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
 	u8 status;
 
 	if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
@@ -377,26 +387,22 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
 		 * controllers continue to assert IRQ as long as
 		 * SError bits are pending.  Clear SError immediately.
 		 */
-		serror = sil_scr_read(ap, SCR_ERROR);
+		sil_scr_read(ap, SCR_ERROR, &serror);
 		sil_scr_write(ap, SCR_ERROR, serror);
 
-		/* Trigger hotplug and accumulate SError only if the
-		 * port isn't already frozen.  Otherwise, PHY events
-		 * during hardreset makes controllers with broken SIEN
-		 * repeat probing needlessly.
+		/* Sometimes spurious interrupts occur, double check
+		 * it's PHYRDY CHG.
 		 */
-		if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
-			ata_ehi_hotplugged(&ap->eh_info);
-			ap->eh_info.serror |= serror;
+		if (serror & SERR_PHYRDY_CHG) {
+			ap->link.eh_info.serror |= serror;
+			goto freeze;
 		}
 
-		goto freeze;
+		if (!(bmdma2 & SIL_DMA_COMPLETE))
+			return;
 	}
 
-	if (unlikely(!qc))
-		goto freeze;
-
-	if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) {
+	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
 		/* this sometimes happens, just clear IRQ */
 		ata_chk_status(ap);
 		return;
@@ -554,8 +560,8 @@ static void sil_thaw(struct ata_port *ap)
  */
 static void sil_dev_config(struct ata_device *dev)
 {
-	struct ata_port *ap = dev->ap;
-	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
+	struct ata_port *ap = dev->link->ap;
+	int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
 	unsigned int n, quirks = 0;
 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 
@@ -634,7 +640,7 @@ static void sil_init_controller(struct ata_host *host)
 	}
 }
 
-static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	int board_id = ent->driver_data;
@@ -678,7 +684,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	mmio_base = host->iomap[SIL_MMIO_BAR];
 
 	for (i = 0; i < host->n_ports; i++) {
-		struct ata_ioports *ioaddr = &host->ports[i]->ioaddr;
+		struct ata_port *ap = host->ports[i];
+		struct ata_ioports *ioaddr = &ap->ioaddr;
 
 		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
 		ioaddr->altstatus_addr =
@@ -686,6 +693,9 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
 		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
 		ata_std_ports(ioaddr);
+
+		ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
 	}
 
 	/* initialize and activate */
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 56a0000..90b5b83 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -30,7 +30,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_sil24"
-#define DRV_VERSION	"0.9"
+#define DRV_VERSION	"1.1"
 
 /*
  * Port request block (PRB) 32 bytes
@@ -63,6 +63,21 @@ enum {
 	SIL24_HOST_BAR		= 0,
 	SIL24_PORT_BAR		= 2,
 
+	/* sil24 fetches in chunks of 64bytes.  The first block
+	 * contains the PRB and two SGEs.  From the second block, it's
+	 * consisted of four SGEs and called SGT.  Calculate the
+	 * number of SGTs that fit into one page.
+	 */
+	SIL24_PRB_SZ		= sizeof(struct sil24_prb)
+				  + 2 * sizeof(struct sil24_sge),
+	SIL24_MAX_SGT		= (PAGE_SIZE - SIL24_PRB_SZ)
+				  / (4 * sizeof(struct sil24_sge)),
+
+	/* This will give us one unused SGEs for ATA.  This extra SGE
+	 * will be used to store CDB for ATAPI devices.
+	 */
+	SIL24_MAX_SGE		= 4 * SIL24_MAX_SGT + 1,
+
 	/*
 	 * Global controller registers (128 bytes @ BAR0)
 	 */
@@ -168,7 +183,7 @@ enum {
 
 	DEF_PORT_IRQ		= PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
 				  PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
-				  PORT_IRQ_UNK_FIS,
+				  PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
 
 	/* bits[27:16] are unmasked (raw) */
 	PORT_IRQ_RAW_SHIFT	= 16,
@@ -237,8 +252,9 @@ enum {
 	/* host flags */
 	SIL24_COMMON_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
-				  ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY |
-				  ATA_FLAG_ACPI_SATA,
+				  ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
+				  ATA_FLAG_AN | ATA_FLAG_PMP,
+	SIL24_COMMON_LFLAGS	= ATA_LFLAG_SKIP_D2H_BSY,
 	SIL24_FLAG_PCIX_IRQ_WOC	= (1 << 24), /* IRQ loss errata on PCI-X */
 
 	IRQ_STAT_4PORTS		= 0xf,
@@ -246,13 +262,13 @@ enum {
 
 struct sil24_ata_block {
 	struct sil24_prb prb;
-	struct sil24_sge sge[LIBATA_MAX_PRD];
+	struct sil24_sge sge[SIL24_MAX_SGE];
 };
 
 struct sil24_atapi_block {
 	struct sil24_prb prb;
 	u8 cdb[16];
-	struct sil24_sge sge[LIBATA_MAX_PRD - 1];
+	struct sil24_sge sge[SIL24_MAX_SGE];
 };
 
 union sil24_cmd_block {
@@ -264,11 +280,11 @@ static struct sil24_cerr_info {
 	unsigned int err_mask, action;
 	const char *desc;
 } sil24_cerr_db[] = {
-	[0]			= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+	[0]			= { AC_ERR_DEV, 0,
 				    "device error" },
-	[PORT_CERR_DEV]		= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+	[PORT_CERR_DEV]		= { AC_ERR_DEV, 0,
 				    "device error via D2H FIS" },
-	[PORT_CERR_SDB]		= { AC_ERR_DEV, ATA_EH_REVALIDATE,
+	[PORT_CERR_SDB]		= { AC_ERR_DEV, 0,
 				    "device error via SDB FIS" },
 	[PORT_CERR_DATA]	= { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
 				    "error in data FIS" },
@@ -322,16 +338,20 @@ struct sil24_port_priv {
 	union sil24_cmd_block *cmd_block;	/* 32 cmd blocks */
 	dma_addr_t cmd_block_dma;		/* DMA base addr for them */
 	struct ata_taskfile tf;			/* Cached taskfile registers */
+	int do_port_rst;
 };
 
 static void sil24_dev_config(struct ata_device *dev);
 static u8 sil24_check_status(struct ata_port *ap);
-static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
-static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
+static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val);
+static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
 static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+static int sil24_qc_defer(struct ata_queued_cmd *qc);
 static void sil24_qc_prep(struct ata_queued_cmd *qc);
 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
 static void sil24_irq_clear(struct ata_port *ap);
+static void sil24_pmp_attach(struct ata_port *ap);
+static void sil24_pmp_detach(struct ata_port *ap);
 static void sil24_freeze(struct ata_port *ap);
 static void sil24_thaw(struct ata_port *ap);
 static void sil24_error_handler(struct ata_port *ap);
@@ -340,6 +360,7 @@ static int sil24_port_start(struct ata_port *ap);
 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 #ifdef CONFIG_PM
 static int sil24_pci_device_resume(struct pci_dev *pdev);
+static int sil24_port_resume(struct ata_port *ap);
 #endif
 
 static const struct pci_device_id sil24_pci_tbl[] = {
@@ -372,7 +393,7 @@ static struct scsi_host_template sil24_sht = {
 	.change_queue_depth	= ata_scsi_change_queue_depth,
 	.can_queue		= SIL24_MAX_CMDS,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= LIBATA_MAX_PRD,
+	.sg_tablesize		= SIL24_MAX_SGE,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -384,8 +405,6 @@ static struct scsi_host_template sil24_sht = {
 };
 
 static const struct ata_port_operations sil24_ops = {
-	.port_disable		= ata_port_disable,
-
 	.dev_config		= sil24_dev_config,
 
 	.check_status		= sil24_check_status,
@@ -394,22 +413,28 @@ static const struct ata_port_operations sil24_ops = {
 
 	.tf_read		= sil24_tf_read,
 
+	.qc_defer		= sil24_qc_defer,
 	.qc_prep		= sil24_qc_prep,
 	.qc_issue		= sil24_qc_issue,
 
 	.irq_clear		= sil24_irq_clear,
-	.irq_on			= ata_dummy_irq_on,
-	.irq_ack		= ata_dummy_irq_ack,
 
 	.scr_read		= sil24_scr_read,
 	.scr_write		= sil24_scr_write,
 
+	.pmp_attach		= sil24_pmp_attach,
+	.pmp_detach		= sil24_pmp_detach,
+
 	.freeze			= sil24_freeze,
 	.thaw			= sil24_thaw,
 	.error_handler		= sil24_error_handler,
 	.post_internal_cmd	= sil24_post_internal_cmd,
 
 	.port_start		= sil24_port_start,
+
+#ifdef CONFIG_PM
+	.port_resume		= sil24_port_resume,
+#endif
 };
 
 /*
@@ -424,25 +449,28 @@ static const struct ata_port_info sil24_port_info[] = {
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
 				  SIL24_FLAG_PCIX_IRQ_WOC,
+		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
 		.port_ops	= &sil24_ops,
 	},
 	/* sil_3132 */
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
+		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
 		.port_ops	= &sil24_ops,
 	},
 	/* sil_3131/sil_3531 */
 	{
 		.flags		= SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
+		.link_flags	= SIL24_COMMON_LFLAGS,
 		.pio_mask	= 0x1f,			/* pio0-4 */
 		.mwdma_mask	= 0x07,			/* mwdma0-2 */
-		.udma_mask	= 0x3f,			/* udma0-5 */
+		.udma_mask	= ATA_UDMA5,		/* udma0-5 */
 		.port_ops	= &sil24_ops,
 	},
 };
@@ -456,7 +484,7 @@ static int sil24_tag(int tag)
 
 static void sil24_dev_config(struct ata_device *dev)
 {
-	void __iomem *port = dev->ap->ioaddr.cmd_addr;
+	void __iomem *port = dev->link->ap->ioaddr.cmd_addr;
 
 	if (dev->cdb_len == 16)
 		writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
@@ -464,15 +492,15 @@ static void sil24_dev_config(struct ata_device *dev)
 		writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
 }
 
-static inline void sil24_update_tf(struct ata_port *ap)
+static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
 {
-	struct sil24_port_priv *pp = ap->private_data;
 	void __iomem *port = ap->ioaddr.cmd_addr;
-	struct sil24_prb __iomem *prb = port;
+	struct sil24_prb __iomem *prb;
 	u8 fis[6 * 4];
 
-	memcpy_fromio(fis, prb->fis, 6 * 4);
-	ata_tf_from_fis(fis, &pp->tf);
+	prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
+	memcpy_fromio(fis, prb->fis, sizeof(fis));
+	ata_tf_from_fis(fis, tf);
 }
 
 static u8 sil24_check_status(struct ata_port *ap)
@@ -488,25 +516,30 @@ static int sil24_scr_map[] = {
 	[SCR_ACTIVE]	= 3,
 };
 
-static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
+static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
 {
 	void __iomem *scr_addr = ap->ioaddr.scr_addr;
+
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
 		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
-		return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+		*val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+		return 0;
 	}
-	return 0xffffffffU;
+	return -EINVAL;
 }
 
-static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
 {
 	void __iomem *scr_addr = ap->ioaddr.scr_addr;
+
 	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
 		void __iomem *addr;
 		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
 		writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+		return 0;
 	}
+	return -EINVAL;
 }
 
 static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
@@ -515,35 +548,140 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 	*tf = pp->tf;
 }
 
+static void sil24_config_port(struct ata_port *ap)
+{
+	void __iomem *port = ap->ioaddr.cmd_addr;
+
+	/* configure IRQ WoC */
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
+	else
+		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+	/* zero error counters. */
+	writel(0x8000, port + PORT_DECODE_ERR_THRESH);
+	writel(0x8000, port + PORT_CRC_ERR_THRESH);
+	writel(0x8000, port + PORT_HSHK_ERR_THRESH);
+	writel(0x0000, port + PORT_DECODE_ERR_CNT);
+	writel(0x0000, port + PORT_CRC_ERR_CNT);
+	writel(0x0000, port + PORT_HSHK_ERR_CNT);
+
+	/* always use 64bit activation */
+	writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
+
+	/* clear port multiplier enable and resume bits */
+	writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+}
+
+static void sil24_config_pmp(struct ata_port *ap, int attached)
+{
+	void __iomem *port = ap->ioaddr.cmd_addr;
+
+	if (attached)
+		writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
+	else
+		writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
+}
+
+static void sil24_clear_pmp(struct ata_port *ap)
+{
+	void __iomem *port = ap->ioaddr.cmd_addr;
+	int i;
+
+	writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+
+	for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
+		void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
+
+		writel(0, pmp_base + PORT_PMP_STATUS);
+		writel(0, pmp_base + PORT_PMP_QACTIVE);
+	}
+}
+
 static int sil24_init_port(struct ata_port *ap)
 {
 	void __iomem *port = ap->ioaddr.cmd_addr;
+	struct sil24_port_priv *pp = ap->private_data;
 	u32 tmp;
 
+	/* clear PMP error status */
+	if (ap->nr_pmp_links)
+		sil24_clear_pmp(ap);
+
 	writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
 	ata_wait_register(port + PORT_CTRL_STAT,
 			  PORT_CS_INIT, PORT_CS_INIT, 10, 100);
 	tmp = ata_wait_register(port + PORT_CTRL_STAT,
 				PORT_CS_RDY, 0, 10, 100);
 
-	if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
+	if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
+		pp->do_port_rst = 1;
+		ap->link.eh_context.i.action |= ATA_EH_HARDRESET;
 		return -EIO;
+	}
+
 	return 0;
 }
 
-static int sil24_softreset(struct ata_port *ap, unsigned int *class,
-			   unsigned long deadline)
+static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
+				 const struct ata_taskfile *tf,
+				 int is_cmd, u32 ctrl,
+				 unsigned long timeout_msec)
 {
 	void __iomem *port = ap->ioaddr.cmd_addr;
 	struct sil24_port_priv *pp = ap->private_data;
 	struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
 	dma_addr_t paddr = pp->cmd_block_dma;
-	u32 mask, irq_stat;
+	u32 irq_enabled, irq_mask, irq_stat;
+	int rc;
+
+	prb->ctrl = cpu_to_le16(ctrl);
+	ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
+
+	/* temporarily plug completion and error interrupts */
+	irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
+	writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
+
+	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+	writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
+
+	irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
+	irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0,
+				     10, timeout_msec);
+
+	writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
+	irq_stat >>= PORT_IRQ_RAW_SHIFT;
+
+	if (irq_stat & PORT_IRQ_COMPLETE)
+		rc = 0;
+	else {
+		/* force port into known state */
+		sil24_init_port(ap);
+
+		if (irq_stat & PORT_IRQ_ERROR)
+			rc = -EIO;
+		else
+			rc = -EBUSY;
+	}
+
+	/* restore IRQ enabled */
+	writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
+
+	return rc;
+}
+
+static int sil24_do_softreset(struct ata_link *link, unsigned int *class,
+			      int pmp, unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	unsigned long timeout_msec = 0;
+	struct ata_taskfile tf;
 	const char *reason;
+	int rc;
 
 	DPRINTK("ENTER\n");
 
-	if (ata_port_offline(ap)) {
+	if (ata_link_offline(link)) {
 		DPRINTK("PHY reports no device\n");
 		*class = ATA_DEV_NONE;
 		goto out;
@@ -551,34 +689,27 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class,
 
 	/* put the port into known state */
 	if (sil24_init_port(ap)) {
-		reason ="port not ready";
+		reason = "port not ready";
 		goto err;
 	}
 
 	/* do SRST */
-	prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
-	prb->fis[1] = 0; /* no PMP yet */
-
-	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
-	writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
-
-	mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
-	irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
-				     100, jiffies_to_msecs(deadline - jiffies));
-
-	writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
-	irq_stat >>= PORT_IRQ_RAW_SHIFT;
-
-	if (!(irq_stat & PORT_IRQ_COMPLETE)) {
-		if (irq_stat & PORT_IRQ_ERROR)
-			reason = "SRST command error";
-		else
-			reason = "timeout";
+	if (time_after(deadline, jiffies))
+		timeout_msec = jiffies_to_msecs(deadline - jiffies);
+
+	ata_tf_init(link->device, &tf);	/* doesn't really matter */
+	rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
+				   timeout_msec);
+	if (rc == -EBUSY) {
+		reason = "timeout";
+		goto err;
+	} else if (rc) {
+		reason = "SRST command error";
 		goto err;
 	}
 
-	sil24_update_tf(ap);
-	*class = ata_dev_classify(&pp->tf);
+	sil24_read_tf(ap, 0, &tf);
+	*class = ata_dev_classify(&tf);
 
 	if (*class == ATA_DEV_UNKNOWN)
 		*class = ATA_DEV_NONE;
@@ -588,40 +719,72 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class,
 	return 0;
 
  err:
-	ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
+	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
 	return -EIO;
 }
 
-static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+			   unsigned long deadline)
+{
+	return sil24_do_softreset(link, class, SATA_PMP_CTRL_PORT, deadline);
+}
+
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
 			   unsigned long deadline)
 {
+	struct ata_port *ap = link->ap;
 	void __iomem *port = ap->ioaddr.cmd_addr;
+	struct sil24_port_priv *pp = ap->private_data;
+	int did_port_rst = 0;
 	const char *reason;
 	int tout_msec, rc;
 	u32 tmp;
 
+ retry:
+	/* Sometimes, DEV_RST is not enough to recover the controller.
+	 * This happens often after PM DMA CS errata.
+	 */
+	if (pp->do_port_rst) {
+		ata_port_printk(ap, KERN_WARNING, "controller in dubious "
+				"state, performing PORT_RST\n");
+
+		writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
+		msleep(10);
+		writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+		ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
+				  10, 5000);
+
+		/* restore port configuration */
+		sil24_config_port(ap);
+		sil24_config_pmp(ap, ap->nr_pmp_links);
+
+		pp->do_port_rst = 0;
+		did_port_rst = 1;
+	}
+
 	/* sil24 does the right thing(tm) without any protection */
-	sata_set_spd(ap);
+	sata_set_spd(link);
 
 	tout_msec = 100;
-	if (ata_port_online(ap))
+	if (ata_link_online(link))
 		tout_msec = 5000;
 
 	writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
 	tmp = ata_wait_register(port + PORT_CTRL_STAT,
-				PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
+				PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
+				tout_msec);
 
 	/* SStatus oscillates between zero and valid status after
 	 * DEV_RST, debounce it.
 	 */
-	rc = sata_phy_debounce(ap, sata_deb_timing_long, deadline);
+	rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
 	if (rc) {
 		reason = "PHY debouncing failed";
 		goto err;
 	}
 
 	if (tmp & PORT_CS_DEV_RST) {
-		if (ata_port_offline(ap))
+		if (ata_link_offline(link))
 			return 0;
 		reason = "link not ready";
 		goto err;
@@ -636,7 +799,12 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
 	return -EAGAIN;
 
  err:
-	ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
+	if (!did_port_rst) {
+		pp->do_port_rst = 1;
+		goto retry;
+	}
+
+	ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason);
 	return -EIO;
 }
 
@@ -644,16 +812,51 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
 				 struct sil24_sge *sge)
 {
 	struct scatterlist *sg;
+	struct sil24_sge *last_sge = NULL;
 
 	ata_for_each_sg(sg, qc) {
 		sge->addr = cpu_to_le64(sg_dma_address(sg));
 		sge->cnt = cpu_to_le32(sg_dma_len(sg));
-		if (ata_sg_is_last(sg, qc))
-			sge->flags = cpu_to_le32(SGE_TRM);
-		else
-			sge->flags = 0;
+		sge->flags = 0;
+
+		last_sge = sge;
 		sge++;
 	}
+
+	if (likely(last_sge))
+		last_sge->flags = cpu_to_le32(SGE_TRM);
+}
+
+static int sil24_qc_defer(struct ata_queued_cmd *qc)
+{
+	struct ata_link *link = qc->dev->link;
+	struct ata_port *ap = link->ap;
+	u8 prot = qc->tf.protocol;
+	int is_atapi = (prot == ATA_PROT_ATAPI ||
+			prot == ATA_PROT_ATAPI_NODATA ||
+			prot == ATA_PROT_ATAPI_DMA);
+
+	/* ATAPI commands completing with CHECK_SENSE cause various
+	 * weird problems if other commands are active.  PMP DMA CS
+	 * errata doesn't cover all and HSM violation occurs even with
+	 * only one other device active.  Always run an ATAPI command
+	 * by itself.
+	 */
+	if (unlikely(ap->excl_link)) {
+		if (link == ap->excl_link) {
+			if (ap->nr_active_links)
+				return ATA_DEFER_PORT;
+			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+		} else
+			return ATA_DEFER_PORT;
+	} else if (unlikely(is_atapi)) {
+		ap->excl_link = link;
+		if (ap->nr_active_links)
+			return ATA_DEFER_PORT;
+		qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+	}
+
+	return ata_std_qc_defer(qc);
 }
 
 static void sil24_qc_prep(struct ata_queued_cmd *qc)
@@ -699,7 +902,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
 	}
 
 	prb->ctrl = cpu_to_le16(ctrl);
-	ata_tf_to_fis(&qc->tf, prb->fis, 0);
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
 
 	if (qc->flags & ATA_QCFLAG_DMAMAP)
 		sil24_fill_sg(qc, sge);
@@ -728,6 +931,39 @@ static void sil24_irq_clear(struct ata_port *ap)
 	/* unused */
 }
 
+static void sil24_pmp_attach(struct ata_port *ap)
+{
+	sil24_config_pmp(ap, 1);
+	sil24_init_port(ap);
+}
+
+static void sil24_pmp_detach(struct ata_port *ap)
+{
+	sil24_init_port(ap);
+	sil24_config_pmp(ap, 0);
+}
+
+static int sil24_pmp_softreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline)
+{
+	return sil24_do_softreset(link, class, link->pmp, deadline);
+}
+
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+			       unsigned long deadline)
+{
+	int rc;
+
+	rc = sil24_init_port(link->ap);
+	if (rc) {
+		ata_link_printk(link, KERN_ERR,
+				"hardreset failed (port not ready)\n");
+		return rc;
+	}
+
+	return sata_pmp_std_hardreset(link, class, deadline);
+}
+
 static void sil24_freeze(struct ata_port *ap)
 {
 	void __iomem *port = ap->ioaddr.cmd_addr;
@@ -754,8 +990,11 @@ static void sil24_thaw(struct ata_port *ap)
 static void sil24_error_intr(struct ata_port *ap)
 {
 	void __iomem *port = ap->ioaddr.cmd_addr;
-	struct ata_eh_info *ehi = &ap->eh_info;
-	int freeze = 0;
+	struct sil24_port_priv *pp = ap->private_data;
+	struct ata_queued_cmd *qc = NULL;
+	struct ata_link *link;
+	struct ata_eh_info *ehi;
+	int abort = 0, freeze = 0;
 	u32 irq_stat;
 
 	/* on error, we need to clear IRQ explicitly */
@@ -763,22 +1002,29 @@ static void sil24_error_intr(struct ata_port *ap)
 	writel(irq_stat, port + PORT_IRQ_STAT);
 
 	/* first, analyze and record host port events */
+	link = &ap->link;
+	ehi = &link->eh_info;
 	ata_ehi_clear_desc(ehi);
 
 	ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
 
+	if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
+		ata_ehi_push_desc(ehi, "SDB notify");
+		sata_async_notification(ap);
+	}
+
 	if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
 		ata_ehi_hotplugged(ehi);
-		ata_ehi_push_desc(ehi, ", %s",
-			       irq_stat & PORT_IRQ_PHYRDY_CHG ?
-			       "PHY RDY changed" : "device exchanged");
+		ata_ehi_push_desc(ehi, "%s",
+				  irq_stat & PORT_IRQ_PHYRDY_CHG ?
+				  "PHY RDY changed" : "device exchanged");
 		freeze = 1;
 	}
 
 	if (irq_stat & PORT_IRQ_UNK_FIS) {
 		ehi->err_mask |= AC_ERR_HSM;
 		ehi->action |= ATA_EH_SOFTRESET;
-		ata_ehi_push_desc(ehi , ", unknown FIS");
+		ata_ehi_push_desc(ehi, "unknown FIS");
 		freeze = 1;
 	}
 
@@ -786,8 +1032,44 @@ static void sil24_error_intr(struct ata_port *ap)
 	if (irq_stat & PORT_IRQ_ERROR) {
 		struct sil24_cerr_info *ci = NULL;
 		unsigned int err_mask = 0, action = 0;
-		struct ata_queued_cmd *qc;
-		u32 cerr;
+		u32 context, cerr;
+		int pmp;
+
+		abort = 1;
+
+		/* DMA Context Switch Failure in Port Multiplier Mode
+		 * errata.  If we have active commands to 3 or more
+		 * devices, any error condition on active devices can
+		 * corrupt DMA context switching.
+		 */
+		if (ap->nr_active_links >= 3) {
+			ehi->err_mask |= AC_ERR_OTHER;
+			ehi->action |= ATA_EH_HARDRESET;
+			ata_ehi_push_desc(ehi, "PMP DMA CS errata");
+			pp->do_port_rst = 1;
+			freeze = 1;
+		}
+
+		/* find out the offending link and qc */
+		if (ap->nr_pmp_links) {
+			context = readl(port + PORT_CONTEXT);
+			pmp = (context >> 5) & 0xf;
+
+			if (pmp < ap->nr_pmp_links) {
+				link = &ap->pmp_link[pmp];
+				ehi = &link->eh_info;
+				qc = ata_qc_from_tag(ap, link->active_tag);
+
+				ata_ehi_clear_desc(ehi);
+				ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
+						  irq_stat);
+			} else {
+				err_mask |= AC_ERR_HSM;
+				action |= ATA_EH_HARDRESET;
+				freeze = 1;
+			}
+		} else
+			qc = ata_qc_from_tag(ap, link->active_tag);
 
 		/* analyze CMD_ERR */
 		cerr = readl(port + PORT_CMD_ERR);
@@ -797,36 +1079,46 @@ static void sil24_error_intr(struct ata_port *ap)
 		if (ci && ci->desc) {
 			err_mask |= ci->err_mask;
 			action |= ci->action;
-			ata_ehi_push_desc(ehi, ", %s", ci->desc);
+			ata_ehi_push_desc(ehi, "%s", ci->desc);
 		} else {
 			err_mask |= AC_ERR_OTHER;
 			action |= ATA_EH_SOFTRESET;
-			ata_ehi_push_desc(ehi, ", unknown command error %d",
+			ata_ehi_push_desc(ehi, "unknown command error %d",
 					  cerr);
 		}
 
 		/* record error info */
-		qc = ata_qc_from_tag(ap, ap->active_tag);
 		if (qc) {
-			sil24_update_tf(ap);
+			sil24_read_tf(ap, qc->tag, &pp->tf);
 			qc->err_mask |= err_mask;
 		} else
 			ehi->err_mask |= err_mask;
 
 		ehi->action |= action;
+
+		/* if PMP, resume */
+		if (ap->nr_pmp_links)
+			writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
 	}
 
 	/* freeze or abort */
 	if (freeze)
 		ata_port_freeze(ap);
-	else
-		ata_port_abort(ap);
+	else if (abort) {
+		if (qc)
+			ata_link_abort(qc->dev->link);
+		else
+			ata_port_abort(ap);
+	}
 }
 
 static void sil24_finish_qc(struct ata_queued_cmd *qc)
 {
+	struct ata_port *ap = qc->ap;
+	struct sil24_port_priv *pp = ap->private_data;
+
 	if (qc->flags & ATA_QCFLAG_RESULT_TF)
-		sil24_update_tf(qc->ap);
+		sil24_read_tf(ap, qc->tag, &pp->tf);
 }
 
 static inline void sil24_host_intr(struct ata_port *ap)
@@ -835,6 +1127,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
 	u32 slot_stat, qc_active;
 	int rc;
 
+	/* If PCIX_IRQ_WOC, there's an inherent race window between
+	 * clearing IRQ pending status and reading PORT_SLOT_STAT
+	 * which may cause spurious interrupts afterwards.  This is
+	 * unavoidable and much better than losing interrupts which
+	 * happens if IRQ pending is cleared after reading
+	 * PORT_SLOT_STAT.
+	 */
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
 	slot_stat = readl(port + PORT_SLOT_STAT);
 
 	if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -842,25 +1144,23 @@ static inline void sil24_host_intr(struct ata_port *ap)
 		return;
 	}
 
-	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
-		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
-
 	qc_active = slot_stat & ~HOST_SSTAT_ATTN;
 	rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
 	if (rc > 0)
 		return;
 	if (rc < 0) {
-		struct ata_eh_info *ehi = &ap->eh_info;
+		struct ata_eh_info *ehi = &ap->link.eh_info;
 		ehi->err_mask |= AC_ERR_HSM;
 		ehi->action |= ATA_EH_SOFTRESET;
 		ata_port_freeze(ap);
 		return;
 	}
 
-	if (ata_ratelimit())
+	/* spurious interrupts are expected if PCIX_IRQ_WOC */
+	if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
 		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
 			"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
-			slot_stat, ap->active_tag, ap->sactive);
+			slot_stat, ap->link.active_tag, ap->link.sactive);
 }
 
 static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
@@ -888,7 +1188,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
 		if (status & (1 << i)) {
 			struct ata_port *ap = host->ports[i];
 			if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
-				sil24_host_intr(host->ports[i]);
+				sil24_host_intr(ap);
 				handled++;
 			} else
 				printk(KERN_ERR DRV_NAME
@@ -902,16 +1202,18 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
 
 static void sil24_error_handler(struct ata_port *ap)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct sil24_port_priv *pp = ap->private_data;
 
-	if (sil24_init_port(ap)) {
+	if (sil24_init_port(ap))
 		ata_eh_freeze_port(ap);
-		ehc->i.action |= ATA_EH_HARDRESET;
-	}
 
 	/* perform recovery */
-	ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
-		  ata_std_postreset);
+	sata_pmp_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
+		       ata_std_postreset, sata_pmp_std_prereset,
+		       sil24_pmp_softreset, sil24_pmp_hardreset,
+		       sata_pmp_std_postreset);
+
+	pp->do_port_rst = 0;
 }
 
 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -919,8 +1221,8 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
 	struct ata_port *ap = qc->ap;
 
 	/* make DMA engine forget about the failed command */
-	if (qc->flags & ATA_QCFLAG_FAILED)
-		sil24_init_port(ap);
+	if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
+		ata_eh_freeze_port(ap);
 }
 
 static int sil24_port_start(struct ata_port *ap)
@@ -958,7 +1260,6 @@ static int sil24_port_start(struct ata_port *ap)
 static void sil24_init_controller(struct ata_host *host)
 {
 	void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
-	void __iomem *port_base = host->iomap[SIL24_PORT_BAR];
 	u32 tmp;
 	int i;
 
@@ -970,7 +1271,8 @@ static void sil24_init_controller(struct ata_host *host)
 
 	/* init ports */
 	for (i = 0; i < host->n_ports; i++) {
-		void __iomem *port = port_base + i * PORT_REGS_SIZE;
+		struct ata_port *ap = host->ports[i];
+		void __iomem *port = ap->ioaddr.cmd_addr;
 
 		/* Initial PHY setting */
 		writel(0x20c, port + PORT_PHY_CFG);
@@ -984,29 +1286,11 @@ static void sil24_init_controller(struct ata_host *host)
 						PORT_CS_PORT_RST, 10, 100);
 			if (tmp & PORT_CS_PORT_RST)
 				dev_printk(KERN_ERR, host->dev,
-				           "failed to clear port RST\n");
+					   "failed to clear port RST\n");
 		}
 
-		/* Configure IRQ WoC */
-		if (host->ports[0]->flags & SIL24_FLAG_PCIX_IRQ_WOC)
-			writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
-		else
-			writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
-
-		/* Zero error counters. */
-		writel(0x8000, port + PORT_DECODE_ERR_THRESH);
-		writel(0x8000, port + PORT_CRC_ERR_THRESH);
-		writel(0x8000, port + PORT_HSHK_ERR_THRESH);
-		writel(0x0000, port + PORT_DECODE_ERR_CNT);
-		writel(0x0000, port + PORT_CRC_ERR_CNT);
-		writel(0x0000, port + PORT_HSHK_ERR_CNT);
-
-		/* Always use 64bit activation */
-		writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
-
-		/* Clear port multiplier enable and resume bits */
-		writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME,
-		       port + PORT_CTRL_CLR);
+		/* configure port */
+		sil24_config_port(ap);
 	}
 
 	/* Turn on interrupts */
@@ -1015,7 +1299,8 @@ static void sil24_init_controller(struct ata_host *host)
 
 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	static int printed_version = 0;
+	extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
+	static int printed_version;
 	struct ata_port_info pi = sil24_port_info[ent->driver_data];
 	const struct ata_port_info *ppi[] = { &pi, NULL };
 	void __iomem * const *iomap;
@@ -1023,6 +1308,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	int i, rc;
 	u32 tmp;
 
+	/* cause link error if sil24_cmd_block is sized wrongly */
+	if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
+		__MARKER__sil24_cmd_block_is_sized_wrongly = 1;
+
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
 
@@ -1057,12 +1346,15 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	host->iomap = iomap;
 
 	for (i = 0; i < host->n_ports; i++) {
-		void __iomem *port = iomap[SIL24_PORT_BAR] + i * PORT_REGS_SIZE;
+		struct ata_port *ap = host->ports[i];
+		size_t offset = ap->port_no * PORT_REGS_SIZE;
+		void __iomem *port = iomap[SIL24_PORT_BAR] + offset;
 
 		host->ports[i]->ioaddr.cmd_addr = port;
 		host->ports[i]->ioaddr.scr_addr = port + PORT_SCONTROL;
 
-		ata_std_ports(&host->ports[i]->ioaddr);
+		ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
+		ata_port_pbar_desc(ap, SIL24_PORT_BAR, offset, "port");
 	}
 
 	/* configure and activate the device */
@@ -1118,6 +1410,12 @@ static int sil24_pci_device_resume(struct pci_dev *pdev)
 
 	return 0;
 }
+
+static int sil24_port_resume(struct ata_port *ap)
+{
+	sil24_config_pmp(ap, ap->nr_pmp_links);
+	return 0;
+}
 #endif
 
 static int __init sil24_init(void)
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f111c98..a01260a 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -43,7 +43,7 @@
 #include "sis.h"
 
 #define DRV_NAME	"sata_sis"
-#define DRV_VERSION	"0.8"
+#define DRV_VERSION	"1.0"
 
 enum {
 	sis_180			= 0,
@@ -63,17 +63,17 @@ enum {
 	GENCTL_IOMAPPED_SCR	= (1 << 26), /* if set, SCRs are in IO space */
 };
 
-static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 static const struct pci_device_id sis_pci_tbl[] = {
-	{ PCI_VDEVICE(SI, 0x0180), sis_180 },		/* SiS 964/180 */
-	{ PCI_VDEVICE(SI, 0x0181), sis_180 },		/* SiS 964/180 */
-	{ PCI_VDEVICE(SI, 0x0182), sis_180 },		/* SiS 965/965L */
-	{ PCI_VDEVICE(SI, 0x0183), sis_180 },		/* SiS 965/965L */
-	{ PCI_VDEVICE(SI, 0x1182), sis_180 },		/* SiS 966/680 */
-	{ PCI_VDEVICE(SI, 0x1183), sis_180 },		/* SiS 966/966L/968/680 */
+	{ PCI_VDEVICE(SI, 0x0180), sis_180 },	/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0181), sis_180 },	/* SiS 964/180 */
+	{ PCI_VDEVICE(SI, 0x0182), sis_180 },	/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x0183), sis_180 },	/* SiS 965/965L */
+	{ PCI_VDEVICE(SI, 0x1182), sis_180 },	/* SiS 966/680 */
+	{ PCI_VDEVICE(SI, 0x1183), sis_180 },	/* SiS 966/966L/968/680 */
 
 	{ }	/* terminate list */
 };
@@ -92,7 +92,7 @@ static struct scsi_host_template sis_sht = {
 	.queuecommand		= ata_scsi_queuecmd,
 	.can_queue		= ATA_DEF_QUEUE,
 	.this_id		= ATA_SHT_THIS_ID,
-	.sg_tablesize		= ATA_MAX_PRD,
+	.sg_tablesize		= LIBATA_MAX_PRD,
 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
 	.emulated		= ATA_SHT_EMULATED,
 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
@@ -104,7 +104,6 @@ static struct scsi_host_template sis_sht = {
 };
 
 static const struct ata_port_operations sis_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -123,7 +122,6 @@ static const struct ata_port_operations sis_ops = {
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= sis_scr_read,
 	.scr_write		= sis_scr_write,
 	.port_start		= ata_port_start,
@@ -133,7 +131,7 @@ static const struct ata_port_info sis_port_info = {
 	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0x7,
-	.udma_mask	= 0x7f,
+	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &sis_ops,
 };
 
@@ -151,28 +149,28 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
 
 	if (ap->port_no)  {
 		switch (pdev->device) {
-			case 0x0180:
-			case 0x0181:
-				pci_read_config_byte(pdev, SIS_PMR, &pmr);
-				if ((pmr & SIS_PMR_COMBINED) == 0)
-					addr += SIS180_SATA1_OFS;
-				break;
-
-			case 0x0182:
-			case 0x0183:
-			case 0x1182:
-				addr += SIS182_SATA1_OFS;
-				break;
+		case 0x0180:
+		case 0x0181:
+			pci_read_config_byte(pdev, SIS_PMR, &pmr);
+			if ((pmr & SIS_PMR_COMBINED) == 0)
+				addr += SIS180_SATA1_OFS;
+			break;
+
+		case 0x0182:
+		case 0x0183:
+		case 0x1182:
+			addr += SIS182_SATA1_OFS;
+			break;
 		}
 	}
 	return addr;
 }
 
-static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
+static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
-	u32 val, val2 = 0;
+	u32 val2 = 0;
 	u8 pmr;
 
 	if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -180,16 +178,19 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
 
-	pci_read_config_dword(pdev, cfg_addr, &val);
+	pci_read_config_dword(pdev, cfg_addr, val);
 
 	if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
 	    (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
 		pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
 
-	return (val|val2) &  0xfffffffb; /* avoid problems with powerdowned ports */
+	*val |= val2;
+	*val &= 0xfffffffb;	/* avoid problems with powerdowned ports */
+
+	return 0;
 }
 
-static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static void sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
@@ -207,36 +208,37 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val
 		pci_write_config_dword(pdev, cfg_addr+0x10, val);
 }
 
-static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	u32 val, val2 = 0;
 	u8 pmr;
 
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
+		return -EINVAL;
 
 	if (ap->flags & SIS_FLAG_CFGSCR)
-		return sis_scr_cfg_read(ap, sc_reg);
+		return sis_scr_cfg_read(ap, sc_reg, val);
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
 
-	val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+	*val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
 
 	if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
 	    (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
-		val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
+		*val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
 
-	return (val | val2) &  0xfffffffb;
+	*val &= 0xfffffffb;
+
+	return 0;
 }
 
-static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	u8 pmr;
 
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 
 	pci_read_config_byte(pdev, SIS_PMR, &pmr);
 
@@ -248,9 +250,10 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
 		    (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
 			iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
 	}
+	return 0;
 }
 
-static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	struct ata_port_info pi = sis_port_info;
@@ -306,35 +309,39 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		} else {
 			dev_printk(KERN_INFO, &pdev->dev,
 				   "Detected SiS 180/181 chipset in combined mode\n");
-			port2_start=0;
+			port2_start = 0;
 			pi.flags |= ATA_FLAG_SLAVE_POSS;
 		}
 		break;
 
 	case 0x0182:
 	case 0x0183:
-		pci_read_config_dword ( pdev, 0x6C, &val);
+		pci_read_config_dword(pdev, 0x6C, &val);
 		if (val & (1L << 31)) {
-			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
+			dev_printk(KERN_INFO, &pdev->dev,
+				   "Detected SiS 182/965 chipset\n");
 			pi.flags |= ATA_FLAG_SLAVE_POSS;
 		} else {
-			dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
+			dev_printk(KERN_INFO, &pdev->dev,
+				   "Detected SiS 182/965L chipset\n");
 		}
 		break;
 
 	case 0x1182:
-		dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/966/680 SATA controller\n");
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "Detected SiS 1182/966/680 SATA controller\n");
 		pi.flags |= ATA_FLAG_SLAVE_POSS;
 		break;
 
 	case 0x1183:
-		dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+		dev_printk(KERN_INFO, &pdev->dev,
+			   "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
 		ppi[0] = &sis_info133_for_sata;
 		ppi[1] = &sis_info133_for_sata;
 		break;
 	}
 
-	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 361700e..22d7999 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -53,7 +53,7 @@
 #endif /* CONFIG_PPC_OF */
 
 #define DRV_NAME	"sata_svw"
-#define DRV_VERSION	"2.2"
+#define DRV_VERSION	"2.3"
 
 enum {
 	/* ap->flags bits */
@@ -103,20 +103,21 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
 	return 0;
 }
 
-static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
-	return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+		return -EINVAL;
+	*val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 
-static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
-			       u32 val)
+static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
-	writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
+		return -EINVAL;
+	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 
@@ -181,7 +182,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 		tf->hob_lbal = lbal >> 8;
 		tf->hob_lbam = lbam >> 8;
 		tf->hob_lbah = lbah >> 8;
-        }
+	}
 }
 
 /**
@@ -192,12 +193,13 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  *	spin_lock_irqsave(host lock)
  */
 
-static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
+static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 	u8 dmactl;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
 	/* load PRD table addr. */
 	mb();	/* make sure PRD table writes are visible to controller */
 	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
@@ -222,10 +224,10 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
  *	spin_lock_irqsave(host lock)
  */
 
-static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
+static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
 	u8 dmactl;
 
 	/* start host DMA transaction */
@@ -253,7 +255,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
 
 static u8 k2_stat_check_status(struct ata_port *ap)
 {
-       	return readl((void __iomem *) ap->ioaddr.status_addr);
+	return readl(ap->ioaddr.status_addr);
 }
 
 #ifdef CONFIG_PPC_OF
@@ -327,7 +329,6 @@ static struct scsi_host_template k2_sata_sht = {
 
 
 static const struct ata_port_operations k2_sata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= k2_sata_tf_load,
 	.tf_read		= k2_sata_tf_read,
 	.check_status		= k2_stat_check_status,
@@ -347,7 +348,6 @@ static const struct ata_port_operations k2_sata_ops = {
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= k2_sata_scr_read,
 	.scr_write		= k2_sata_scr_write,
 	.port_start		= ata_port_start,
@@ -360,7 +360,7 @@ static const struct ata_port_info k2_port_info[] = {
 				  ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask	= 0x7f,
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &k2_sata_ops,
 	},
 	/* board_svw8 */
@@ -370,7 +370,7 @@ static const struct ata_port_info k2_port_info[] = {
 				  K2_FLAG_SATA_8_PORTS,
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask	= 0x7f,
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &k2_sata_ops,
 	},
 };
@@ -395,7 +395,7 @@ static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
 }
 
 
-static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	const struct ata_port_info *ppi[] =
@@ -443,9 +443,15 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
 	/* different controllers have different number of ports - currently 4 or 8 */
 	/* All ports are on the same function. Multi-function device is no
 	 * longer available. This should not be seen in any system. */
-	for (i = 0; i < host->n_ports; i++)
-		k2_sata_setup_port(&host->ports[i]->ioaddr,
-				   mmio_base + i * K2_SATA_PORT_OFFSET);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned int offset = i * K2_SATA_PORT_OFFSET;
+
+		k2_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+		ata_port_pbar_desc(ap, 5, -1, "mmio");
+		ata_port_pbar_desc(ap, 5, offset, "port");
+	}
 
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
 	if (rc)
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 4a9171e..dcccf3b 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -30,6 +30,54 @@
  *
  */
 
+/*
+	Theory of operation
+	-------------------
+
+	The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
+	engine, DIMM memory, and four ATA engines (one per SATA port).
+	Data is copied to/from DIMM memory by the HDMA engine, before
+	handing off to one (or more) of the ATA engines.  The ATA
+	engines operate solely on DIMM memory.
+
+	The SX4 behaves like a PATA chip, with no SATA controls or
+	knowledge whatsoever, leading to the presumption that
+	PATA<->SATA bridges exist on SX4 boards, external to the
+	PDC20621 chip itself.
+
+	The chip is quite capable, supporting an XOR engine and linked
+	hardware commands (permits a string to transactions to be
+	submitted and waited-on as a single unit), and an optional
+	microprocessor.
+
+	The limiting factor is largely software.  This Linux driver was
+	written to multiplex the single HDMA engine to copy disk
+	transactions into a fixed DIMM memory space, from where an ATA
+	engine takes over.  As a result, each WRITE looks like this:
+
+		submit HDMA packet to hardware
+		hardware copies data from system memory to DIMM
+		hardware raises interrupt
+
+		submit ATA packet to hardware
+		hardware executes ATA WRITE command, w/ data in DIMM
+		hardware raises interrupt
+
+	and each READ looks like this:
+
+		submit ATA packet to hardware
+		hardware executes ATA READ command, w/ data in DIMM
+		hardware raises interrupt
+
+		submit HDMA packet to hardware
+		hardware copies data from DIMM to system memory
+		hardware raises interrupt
+
+	This is a very slow, lock-step way of doing things that can
+	certainly be improved by motivated kernel hackers.
+
+ */
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -44,7 +92,7 @@
 #include "sata_promise.h"
 
 #define DRV_NAME	"sata_sx4"
-#define DRV_VERSION	"0.11"
+#define DRV_VERSION	"0.12"
 
 
 enum {
@@ -58,6 +106,8 @@ enum {
 	PDC_INT_SEQMASK		= 0x40,	/* Mask of asserted SEQ INTs */
 	PDC_HDMA_CTLSTAT	= 0x12C, /* Host DMA control / status */
 
+	PDC_CTLSTAT		= 0x60,	/* IDEn control / status */
+
 	PDC_20621_SEQCTL	= 0x400,
 	PDC_20621_SEQMASK	= 0x480,
 	PDC_20621_GENERAL_CTL	= 0x484,
@@ -87,48 +137,60 @@ enum {
 
 	board_20621		= 0,	/* FastTrak S150 SX4 */
 
-	PDC_RESET		= (1 << 11), /* HDMA reset */
+	PDC_MASK_INT		= (1 << 10), /* HDMA/ATA mask int */
+	PDC_RESET		= (1 << 11), /* HDMA/ATA reset */
+	PDC_DMA_ENABLE		= (1 << 7),  /* DMA start/stop */
 
 	PDC_MAX_HDMA		= 32,
 	PDC_HDMA_Q_MASK		= (PDC_MAX_HDMA - 1),
 
-	PDC_DIMM0_SPD_DEV_ADDRESS     = 0x50,
-	PDC_DIMM1_SPD_DEV_ADDRESS     = 0x51,
-	PDC_MAX_DIMM_MODULE           = 0x02,
-	PDC_I2C_CONTROL_OFFSET        = 0x48,
-	PDC_I2C_ADDR_DATA_OFFSET      = 0x4C,
-	PDC_DIMM0_CONTROL_OFFSET      = 0x80,
-	PDC_DIMM1_CONTROL_OFFSET      = 0x84,
-	PDC_SDRAM_CONTROL_OFFSET      = 0x88,
-	PDC_I2C_WRITE                 = 0x00000000,
-	PDC_I2C_READ                  = 0x00000040,
-	PDC_I2C_START                 = 0x00000080,
-	PDC_I2C_MASK_INT              = 0x00000020,
-	PDC_I2C_COMPLETE              = 0x00010000,
-	PDC_I2C_NO_ACK                = 0x00100000,
-	PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
-	PDC_DIMM_SPD_SUBADDRESS_END   = 0x7F,
-	PDC_DIMM_SPD_ROW_NUM          = 3,
-	PDC_DIMM_SPD_COLUMN_NUM       = 4,
-	PDC_DIMM_SPD_MODULE_ROW       = 5,
-	PDC_DIMM_SPD_TYPE             = 11,
-	PDC_DIMM_SPD_FRESH_RATE       = 12,
-	PDC_DIMM_SPD_BANK_NUM         = 17,
-	PDC_DIMM_SPD_CAS_LATENCY      = 18,
-	PDC_DIMM_SPD_ATTRIBUTE        = 21,
-	PDC_DIMM_SPD_ROW_PRE_CHARGE   = 27,
-	PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
-	PDC_DIMM_SPD_RAS_CAS_DELAY    = 29,
-	PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
-	PDC_DIMM_SPD_SYSTEM_FREQ      = 126,
-	PDC_CTL_STATUS		      = 0x08,
-	PDC_DIMM_WINDOW_CTLR	      = 0x0C,
-	PDC_TIME_CONTROL              = 0x3C,
-	PDC_TIME_PERIOD               = 0x40,
-	PDC_TIME_COUNTER              = 0x44,
-	PDC_GENERAL_CTLR	      = 0x484,
-	PCI_PLL_INIT                  = 0x8A531824,
-	PCI_X_TCOUNT                  = 0xEE1E5CFF
+	PDC_DIMM0_SPD_DEV_ADDRESS	= 0x50,
+	PDC_DIMM1_SPD_DEV_ADDRESS	= 0x51,
+	PDC_I2C_CONTROL			= 0x48,
+	PDC_I2C_ADDR_DATA		= 0x4C,
+	PDC_DIMM0_CONTROL		= 0x80,
+	PDC_DIMM1_CONTROL		= 0x84,
+	PDC_SDRAM_CONTROL		= 0x88,
+	PDC_I2C_WRITE			= 0,		/* master -> slave */
+	PDC_I2C_READ			= (1 << 6),	/* master <- slave */
+	PDC_I2C_START			= (1 << 7),	/* start I2C proto */
+	PDC_I2C_MASK_INT		= (1 << 5),	/* mask I2C interrupt */
+	PDC_I2C_COMPLETE		= (1 << 16),	/* I2C normal compl. */
+	PDC_I2C_NO_ACK			= (1 << 20),	/* slave no-ack addr */
+	PDC_DIMM_SPD_SUBADDRESS_START	= 0x00,
+	PDC_DIMM_SPD_SUBADDRESS_END	= 0x7F,
+	PDC_DIMM_SPD_ROW_NUM		= 3,
+	PDC_DIMM_SPD_COLUMN_NUM		= 4,
+	PDC_DIMM_SPD_MODULE_ROW		= 5,
+	PDC_DIMM_SPD_TYPE		= 11,
+	PDC_DIMM_SPD_FRESH_RATE		= 12,
+	PDC_DIMM_SPD_BANK_NUM		= 17,
+	PDC_DIMM_SPD_CAS_LATENCY	= 18,
+	PDC_DIMM_SPD_ATTRIBUTE		= 21,
+	PDC_DIMM_SPD_ROW_PRE_CHARGE	= 27,
+	PDC_DIMM_SPD_ROW_ACTIVE_DELAY	= 28,
+	PDC_DIMM_SPD_RAS_CAS_DELAY	= 29,
+	PDC_DIMM_SPD_ACTIVE_PRECHARGE	= 30,
+	PDC_DIMM_SPD_SYSTEM_FREQ	= 126,
+	PDC_CTL_STATUS			= 0x08,
+	PDC_DIMM_WINDOW_CTLR		= 0x0C,
+	PDC_TIME_CONTROL		= 0x3C,
+	PDC_TIME_PERIOD			= 0x40,
+	PDC_TIME_COUNTER		= 0x44,
+	PDC_GENERAL_CTLR		= 0x484,
+	PCI_PLL_INIT			= 0x8A531824,
+	PCI_X_TCOUNT			= 0xEE1E5CFF,
+
+	/* PDC_TIME_CONTROL bits */
+	PDC_TIMER_BUZZER		= (1 << 10),
+	PDC_TIMER_MODE_PERIODIC		= 0,		/* bits 9:8 == 00 */
+	PDC_TIMER_MODE_ONCE		= (1 << 8),	/* bits 9:8 == 01 */
+	PDC_TIMER_ENABLE		= (1 << 7),
+	PDC_TIMER_MASK_INT		= (1 << 5),
+	PDC_TIMER_SEQ_MASK		= 0x1f,		/* SEQ ID for timer */
+	PDC_TIMER_DEFAULT		= PDC_TIMER_MODE_ONCE |
+					  PDC_TIMER_ENABLE |
+					  PDC_TIMER_MASK_INT,
 };
 
 
@@ -150,9 +212,9 @@ struct pdc_host_priv {
 };
 
 
-static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void pdc_eng_timeout(struct ata_port *ap);
-static void pdc_20621_phy_reset (struct ata_port *ap);
+static void pdc_20621_phy_reset(struct ata_port *ap);
 static int pdc_port_start(struct ata_port *ap);
 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
@@ -192,7 +254,6 @@ static struct scsi_host_template pdc_sata_sht = {
 };
 
 static const struct ata_port_operations pdc_20621_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= pdc_tf_load_mmio,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -205,7 +266,6 @@ static const struct ata_port_operations pdc_20621_ops = {
 	.eng_timeout		= pdc_eng_timeout,
 	.irq_clear		= pdc20621_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.port_start		= pdc_port_start,
 };
 
@@ -217,7 +277,7 @@ static const struct ata_port_info pdc_port_info[] = {
 				  ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
 		.pio_mask	= 0x1f, /* pio0-4 */
 		.mwdma_mask	= 0x07, /* mwdma0-2 */
-		.udma_mask	= 0x7f, /* udma0-6 ; FIXME */
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &pdc_20621_ops,
 	},
 
@@ -260,16 +320,16 @@ static int pdc_port_start(struct ata_port *ap)
 	return 0;
 }
 
-static void pdc_20621_phy_reset (struct ata_port *ap)
+static void pdc_20621_phy_reset(struct ata_port *ap)
 {
 	VPRINTK("ENTER\n");
-        ap->cbl = ATA_CBL_SATA;
-        ata_port_probe(ap);
-        ata_bus_reset(ap);
+	ap->cbl = ATA_CBL_SATA;
+	ata_port_probe(ap);
+	ata_bus_reset(ap);
 }
 
 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
-				    	   unsigned int portno,
+				   unsigned int portno,
 					   unsigned int total_len)
 {
 	u32 addr;
@@ -291,7 +351,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
 }
 
 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
-				    	    unsigned int portno,
+				    unsigned int portno,
 					    unsigned int total_len)
 {
 	u32 addr;
@@ -651,8 +711,8 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
 	return ata_qc_issue_prot(qc);
 }
 
-static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
-                                          struct ata_queued_cmd *qc,
+static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
+					  struct ata_queued_cmd *qc,
 					  unsigned int doing_hdma,
 					  void __iomem *mmio)
 {
@@ -743,7 +803,7 @@ static void pdc20621_irq_clear(struct ata_port *ap)
 	readl(mmio + PDC_20621_SEQMASK);
 }
 
-static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
+static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
 	struct ata_host *host = dev_instance;
 	struct ata_port *ap;
@@ -776,9 +836,9 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
 		return IRQ_NONE;
 	}
 
-        spin_lock(&host->lock);
+	spin_lock(&host->lock);
 
-        for (i = 1; i < 9; i++) {
+	for (i = 1; i < 9; i++) {
 		port_no = i - 1;
 		if (port_no > 3)
 			port_no -= 4;
@@ -792,14 +852,14 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
 		    !(ap->flags & ATA_FLAG_DISABLED)) {
 			struct ata_queued_cmd *qc;
 
-			qc = ata_qc_from_tag(ap, ap->active_tag);
+			qc = ata_qc_from_tag(ap, ap->link.active_tag);
 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
 				handled += pdc20621_host_intr(ap, qc, (i > 4),
 							      mmio_base);
 		}
 	}
 
-        spin_unlock(&host->lock);
+	spin_unlock(&host->lock);
 
 	VPRINTK("mask == 0x%x\n", mask);
 
@@ -819,7 +879,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
 
 	spin_lock_irqsave(&host->lock, flags);
 
-	qc = ata_qc_from_tag(ap, ap->active_tag);
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
 
 	switch (qc->tf.protocol) {
 	case ATA_PROT_DMA:
@@ -846,16 +906,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
 
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
-	WARN_ON (tf->protocol == ATA_PROT_DMA ||
-		 tf->protocol == ATA_PROT_NODATA);
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATA_PROT_NODATA);
 	ata_tf_load(ap, tf);
 }
 
 
 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
 {
-	WARN_ON (tf->protocol == ATA_PROT_DMA ||
-		 tf->protocol == ATA_PROT_NODATA);
+	WARN_ON(tf->protocol == ATA_PROT_DMA ||
+		tf->protocol == ATA_PROT_NODATA);
 	ata_exec_command(ap, tf);
 }
 
@@ -893,7 +953,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
 	mmio += PDC_CHIP0_OFS;
 
 	page_mask = 0x00;
-   	window_size = 0x2000 * 4; /* 32K byte uchar size */
+	window_size = 0x2000 * 4; /* 32K byte uchar size */
 	idx = (u16) (offset / window_size);
 
 	writel(0x01, mmio + PDC_GENERAL_CTLR);
@@ -919,7 +979,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
 			      window_size / 4);
 		psource += window_size;
 		size -= window_size;
-		idx ++;
+		idx++;
 	}
 
 	if (size) {
@@ -948,7 +1008,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
 	mmio += PDC_CHIP0_OFS;
 
 	page_mask = 0x00;
-   	window_size = 0x2000 * 4;       /* 32K byte uchar size */
+	window_size = 0x2000 * 4;       /* 32K byte uchar size */
 	idx = (u16) (offset / window_size);
 
 	writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
@@ -971,7 +1031,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
 		readl(mmio + PDC_GENERAL_CTLR);
 		psource += window_size;
 		size -= window_size;
-		idx ++;
+		idx++;
 	}
 
 	if (size) {
@@ -990,7 +1050,7 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
 	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 	u32 i2creg  = 0;
 	u32 status;
-	u32 count =0;
+	u32 count = 0;
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -999,17 +1059,17 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
 	i2creg |= subaddr << 16;
 
 	/* Set the device and subaddress */
-	writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET);
-	readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
+	writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
+	readl(mmio + PDC_I2C_ADDR_DATA);
 
 	/* Write Control to perform read operation, mask int */
 	writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
-	       mmio + PDC_I2C_CONTROL_OFFSET);
+	       mmio + PDC_I2C_CONTROL);
 
 	for (count = 0; count <= 1000; count ++) {
-		status = readl(mmio + PDC_I2C_CONTROL_OFFSET);
+		status = readl(mmio + PDC_I2C_CONTROL);
 		if (status & PDC_I2C_COMPLETE) {
-			status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
+			status = readl(mmio + PDC_I2C_ADDR_DATA);
 			break;
 		} else if (count == 1000)
 			return 0;
@@ -1022,21 +1082,21 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
 
 static int pdc20621_detect_dimm(struct ata_host *host)
 {
-	u32 data=0 ;
+	u32 data = 0;
 	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 			     PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
-   		if (data == 100)
+		if (data == 100)
 			return 100;
-  	} else
+	} else
 		return 0;
 
 	if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
-		if(data <= 0x75)
+		if (data <= 0x75)
 			return 133;
-   	} else
+	} else
 		return 0;
 
-   	return 0;
+	return 0;
 }
 
 
@@ -1044,8 +1104,8 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
 {
 	u32 spd0[50];
 	u32 data = 0;
-   	int size, i;
-   	u8 bdimmsize;
+	int size, i;
+	u8 bdimmsize;
 	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 	static const struct {
 		unsigned int reg;
@@ -1068,40 +1128,40 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
 
-	for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
+	for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
 		pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 				  pdc_i2c_read_data[i].reg,
 				  &spd0[pdc_i2c_read_data[i].ofs]);
 
-   	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
-   	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+	data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
+	data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
 		((((spd0[27] + 9) / 10) - 1) << 8) ;
-   	data |= (((((spd0[29] > spd0[28])
+	data |= (((((spd0[29] > spd0[28])
 		    ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
-   	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
+	data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
 
-   	if (spd0[18] & 0x08)
+	if (spd0[18] & 0x08)
 		data |= ((0x03) << 14);
-   	else if (spd0[18] & 0x04)
+	else if (spd0[18] & 0x04)
 		data |= ((0x02) << 14);
-   	else if (spd0[18] & 0x01)
+	else if (spd0[18] & 0x01)
 		data |= ((0x01) << 14);
-   	else
+	else
 		data |= (0 << 14);
 
-  	/*
+	/*
 	   Calculate the size of bDIMMSize (power of 2) and
 	   merge the DIMM size by program start/end address.
 	*/
 
-   	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
-   	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
-   	data |= (((size / 16) - 1) << 16);
-   	data |= (0 << 23);
+	bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
+	size = (1 << bdimmsize) >> 20;	/* size = xxx(MB) */
+	data |= (((size / 16) - 1) << 16);
+	data |= (0 << 23);
 	data |= 8;
-   	writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
-	readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
-   	return size;
+	writel(data, mmio + PDC_DIMM0_CONTROL);
+	readl(mmio + PDC_DIMM0_CONTROL);
+	return size;
 }
 
 
@@ -1112,9 +1172,9 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
 	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
-   	mmio += PDC_CHIP0_OFS;
+	mmio += PDC_CHIP0_OFS;
 
-   	/*
+	/*
 	  Set To Default : DIMM Module Global Control Register (0x022259F1)
 	  DIMM Arbitration Disable (bit 20)
 	  DIMM Data/Control Output Driving Selection (bit12 - bit15)
@@ -1122,51 +1182,51 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
 	*/
 
 	data = 0x022259F1;
-	writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
-	readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
+	writel(data, mmio + PDC_SDRAM_CONTROL);
+	readl(mmio + PDC_SDRAM_CONTROL);
 
 	/* Turn on for ECC */
 	pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
 			  PDC_DIMM_SPD_TYPE, &spd0);
 	if (spd0 == 0x02) {
 		data |= (0x01 << 16);
-		writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
-		readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
+		writel(data, mmio + PDC_SDRAM_CONTROL);
+		readl(mmio + PDC_SDRAM_CONTROL);
 		printk(KERN_ERR "Local DIMM ECC Enabled\n");
-   	}
+	}
 
-   	/* DIMM Initialization Select/Enable (bit 18/19) */
-   	data &= (~(1<<18));
-   	data |= (1<<19);
-   	writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
+	/* DIMM Initialization Select/Enable (bit 18/19) */
+	data &= (~(1<<18));
+	data |= (1<<19);
+	writel(data, mmio + PDC_SDRAM_CONTROL);
 
-   	error = 1;
-   	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
-		data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
+	error = 1;
+	for (i = 1; i <= 10; i++) {   /* polling ~5 secs */
+		data = readl(mmio + PDC_SDRAM_CONTROL);
 		if (!(data & (1<<19))) {
-	   		error = 0;
-	   		break;
+			error = 0;
+			break;
 		}
 		msleep(i*100);
-   	}
-   	return error;
+	}
+	return error;
 }
 
 
 static unsigned int pdc20621_dimm_init(struct ata_host *host)
 {
 	int speed, size, length;
-	u32 addr,spd0,pci_status;
-	u32 tmp=0;
-	u32 time_period=0;
-	u32 tcount=0;
-	u32 ticks=0;
-	u32 clock=0;
-	u32 fparam=0;
+	u32 addr, spd0, pci_status;
+	u32 tmp = 0;
+	u32 time_period = 0;
+	u32 tcount = 0;
+	u32 ticks = 0;
+	u32 clock = 0;
+	u32 fparam = 0;
 	void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
 
 	/* hard-code chip #0 */
-   	mmio += PDC_CHIP0_OFS;
+	mmio += PDC_CHIP0_OFS;
 
 	/* Initialize PLL based upon PCI Bus Frequency */
 
@@ -1176,7 +1236,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 	VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
 
 	/* Enable timer */
-	writel(0x00001a0, mmio + PDC_TIME_CONTROL);
+	writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
 	readl(mmio + PDC_TIME_CONTROL);
 
 	/* Wait 3 seconds */
@@ -1194,7 +1254,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 	   If SX4 is on PCI-X bus, after 3 seconds, the timer counter
 	   register should be >= (0xffffffff - 3x10^8).
 	*/
-	if(tcount >= PCI_X_TCOUNT) {
+	if (tcount >= PCI_X_TCOUNT) {
 		ticks = (time_period - tcount);
 		VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
 
@@ -1225,41 +1285,43 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 	if (!(speed = pdc20621_detect_dimm(host))) {
 		printk(KERN_ERR "Detect Local DIMM Fail\n");
 		return 1;	/* DIMM error */
-   	}
-   	VPRINTK("Local DIMM Speed = %d\n", speed);
+	}
+	VPRINTK("Local DIMM Speed = %d\n", speed);
 
-   	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
+	/* Programming DIMM0 Module Control Register (index_CID0:80h) */
 	size = pdc20621_prog_dimm0(host);
-   	VPRINTK("Local DIMM Size = %dMB\n",size);
+	VPRINTK("Local DIMM Size = %dMB\n", size);
 
-   	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
+	/* Programming DIMM Module Global Control Register (index_CID0:88h) */
 	if (pdc20621_prog_dimm_global(host)) {
 		printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
 		return 1;
-   	}
+	}
 
 #ifdef ATA_VERBOSE_DEBUG
 	{
-		u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
-  				'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
- 				 '1','.','1','0',
-  				'9','8','0','3','1','6','1','2',0,0};
+		u8 test_parttern1[40] =
+			{0x55,0xAA,'P','r','o','m','i','s','e',' ',
+			'N','o','t',' ','Y','e','t',' ',
+			'D','e','f','i','n','e','d',' ',
+			'1','.','1','0',
+			'9','8','0','3','1','6','1','2',0,0};
 		u8 test_parttern2[40] = {0};
 
-		pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x10040, 40);
-		pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
+		pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
 
-		pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x10040, 40);
-		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
+		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
-		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x10040,
+		pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
 				       40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
 
-		pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x40, 40);
-		pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
+		pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
+		pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
 		printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
 		       test_parttern2[1], &(test_parttern2[2]));
 	}
@@ -1315,15 +1377,15 @@ static void pdc_20621_init(struct ata_host *host)
 	readl(mmio + PDC_HDMA_CTLSTAT);		/* flush */
 }
 
-static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc_sata_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
 {
 	static int printed_version;
 	const struct ata_port_info *ppi[] =
 		{ &pdc_port_info[ent->driver_data], NULL };
 	struct ata_host *host;
-	void __iomem *base;
 	struct pdc_host_priv *hpriv;
-	int rc;
+	int i, rc;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -1349,11 +1411,17 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
 		return rc;
 	host->iomap = pcim_iomap_table(pdev);
 
-	base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
-	pdc_sata_setup_port(&host->ports[0]->ioaddr, base + 0x200);
-	pdc_sata_setup_port(&host->ports[1]->ioaddr, base + 0x280);
-	pdc_sata_setup_port(&host->ports[2]->ioaddr, base + 0x300);
-	pdc_sata_setup_port(&host->ports[3]->ioaddr, base + 0x380);
+	for (i = 0; i < 4; i++) {
+		struct ata_port *ap = host->ports[i];
+		void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
+		unsigned int offset = 0x200 + i * 0x80;
+
+		pdc_sata_setup_port(&ap->ioaddr, base + offset);
+
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
+		ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
+	}
 
 	/* configure and activate */
 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 6815de7..e710e71 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -36,7 +36,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_uli"
-#define DRV_VERSION	"1.2"
+#define DRV_VERSION	"1.3"
 
 enum {
 	uli_5289		= 0,
@@ -56,9 +56,9 @@ struct uli_priv {
 	unsigned int		scr_cfg_addr[uli_max_ports];
 };
 
-static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 static const struct pci_device_id uli_pci_tbl[] = {
 	{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -94,8 +94,6 @@ static struct scsi_host_template uli_sht = {
 };
 
 static const struct ata_port_operations uli_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -117,7 +115,6 @@ static const struct ata_port_operations uli_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= uli_scr_read,
 	.scr_write		= uli_scr_write,
@@ -129,7 +126,7 @@ static const struct ata_port_info uli_port_info = {
 	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 			  ATA_FLAG_IGN_SIMPLEX,
 	.pio_mask       = 0x1f,		/* pio0-4 */
-	.udma_mask      = 0x7f,		/* udma0-6 */
+	.udma_mask      = ATA_UDMA6,
 	.port_ops       = &uli_ops,
 };
 
@@ -146,7 +143,7 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
 	return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
 }
 
-static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
+static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
@@ -156,7 +153,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
 	return val;
 }
 
-static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
+static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
@@ -164,23 +161,25 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
 	pci_write_config_dword(pdev, cfg_addr, val);
 }
 
-static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
+		return -EINVAL;
 
-	return uli_scr_cfg_read(ap, sc_reg);
+	*val = uli_scr_cfg_read(ap, sc_reg);
+	return 0;
 }
 
-static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
-	if (sc_reg > SCR_CONTROL)	//SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
-		return;
+	if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
+		return -EINVAL;
 
 	uli_scr_cfg_write(ap, sc_reg, val);
+	return 0;
 }
 
-static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
@@ -213,7 +212,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	host->private_data = hpriv;
 
 	/* the first two ports are standard SFF */
-	rc = ata_pci_init_native_host(host);
+	rc = ata_pci_init_sff_host(host);
 	if (rc)
 		return rc;
 
@@ -240,6 +239,12 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
 		ata_std_ports(ioaddr);
 
+		ata_port_desc(host->ports[2],
+			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, 0) + 8,
+			((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
+			(unsigned long long)pci_resource_start(pdev, 4) + 16);
+
 		ioaddr = &host->ports[3]->ioaddr;
 		ioaddr->cmd_addr = iomap[2] + 8;
 		ioaddr->altstatus_addr =
@@ -248,6 +253,13 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		ioaddr->bmdma_addr = iomap[4] + 24;
 		hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
 		ata_std_ports(ioaddr);
+
+		ata_port_desc(host->ports[2],
+			"cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+			(unsigned long long)pci_resource_start(pdev, 2) + 9,
+			((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
+			(unsigned long long)pci_resource_start(pdev, 4) + 24);
+
 		break;
 
 	case uli_5289:
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index e8b90e7..3ef072f 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -3,7 +3,7 @@
  *
  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
  * 		   Please ALWAYS copy linux-ide@vger.kernel.org
- 		   on emails.
+ *		   on emails.
  *
  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
  *  Copyright 2003-2004 Jeff Garzik
@@ -46,7 +46,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_via"
-#define DRV_VERSION	"2.2"
+#define DRV_VERSION	"2.3"
 
 enum board_ids_enum {
 	vt6420,
@@ -57,7 +57,6 @@ enum {
 	SATA_CHAN_ENAB		= 0x40, /* SATA channel enable */
 	SATA_INT_GATE		= 0x41, /* SATA interrupt gating */
 	SATA_NATIVE_MODE	= 0x42, /* Native mode enable */
-	SATA_PATA_SHARING	= 0x49, /* PATA/SATA sharing func ctrl */
 	PATA_UDMA_TIMING	= 0xB3, /* PATA timing for DMA/ cable detect */
 	PATA_PIO_TIMING		= 0xAB, /* PATA timing register */
 
@@ -68,12 +67,11 @@ enum {
 	NATIVE_MODE_ALL		= (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
 
 	SATA_EXT_PHY		= (1 << 6), /* 0==use PATA, 1==ext phy */
-	SATA_2DEV		= (1 << 5), /* SATA is master/slave */
 };
 
-static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
 static void svia_noop_freeze(struct ata_port *ap);
 static void vt6420_error_handler(struct ata_port *ap);
 static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -122,8 +120,6 @@ static struct scsi_host_template svia_sht = {
 };
 
 static const struct ata_port_operations vt6420_sata_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -146,14 +142,11 @@ static const struct ata_port_operations vt6420_sata_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations vt6421_pata_ops = {
-	.port_disable		= ata_port_disable,
-
 	.set_piomode		= vt6421_set_pio_mode,
 	.set_dmamode		= vt6421_set_dma_mode,
 
@@ -180,14 +173,11 @@ static const struct ata_port_operations vt6421_pata_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.port_start		= ata_port_start,
 };
 
 static const struct ata_port_operations vt6421_sata_ops = {
-	.port_disable		= ata_port_disable,
-
 	.tf_load		= ata_tf_load,
 	.tf_read		= ata_tf_read,
 	.check_status		= ata_check_status,
@@ -211,7 +201,6 @@ static const struct ata_port_operations vt6421_sata_ops = {
 
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 
 	.scr_read		= svia_scr_read,
 	.scr_write		= svia_scr_write,
@@ -223,7 +212,7 @@ static const struct ata_port_info vt6420_port_info = {
 	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0x07,
-	.udma_mask	= 0x7f,
+	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &vt6420_sata_ops,
 };
 
@@ -231,7 +220,7 @@ static struct ata_port_info vt6421_sport_info = {
 	.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0x07,
-	.udma_mask	= 0x7f,
+	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &vt6421_sata_ops,
 };
 
@@ -239,7 +228,7 @@ static struct ata_port_info vt6421_pport_info = {
 	.flags		= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
 	.pio_mask	= 0x1f,
 	.mwdma_mask	= 0,
-	.udma_mask	= 0x7f,
+	.udma_mask	= ATA_UDMA6,
 	.port_ops	= &vt6421_pata_ops,
 };
 
@@ -249,18 +238,20 @@ MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
-static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
-	return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
+		return -EINVAL;
+	*val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
+	return 0;
 }
 
-static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 	iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
+	return 0;
 }
 
 static void svia_noop_freeze(struct ata_port *ap)
@@ -274,7 +265,7 @@ static void svia_noop_freeze(struct ata_port *ap)
 
 /**
  *	vt6420_prereset - prereset for vt6420
- *	@ap: target ATA port
+ *	@link: target ATA link
  *	@deadline: deadline jiffies for the operation
  *
  *	SCR registers on vt6420 are pieces of shit and may hang the
@@ -292,9 +283,10 @@ static void svia_noop_freeze(struct ata_port *ap)
  *	RETURNS:
  *	0 on success, -errno otherwise.
  */
-static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
 {
-	struct ata_eh_context *ehc = &ap->eh_context;
+	struct ata_port *ap = link->ap;
+	struct ata_eh_context *ehc = &ap->link.eh_context;
 	unsigned long timeout = jiffies + (HZ * 5);
 	u32 sstatus, scontrol;
 	int online;
@@ -303,22 +295,21 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
 	if (!(ap->pflags & ATA_PFLAG_LOADING))
 		goto skip_scr;
 
-	/* Resume phy.  This is the old resume sequence from
-	 * __sata_phy_reset().
-	 */
+	/* Resume phy.  This is the old SATA resume sequence */
 	svia_scr_write(ap, SCR_CONTROL, 0x300);
-	svia_scr_read(ap, SCR_CONTROL); /* flush */
+	svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */
 
 	/* wait for phy to become ready, if necessary */
 	do {
 		msleep(200);
-		if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
+		svia_scr_read(ap, SCR_STATUS, &sstatus);
+		if ((sstatus & 0xf) != 1)
 			break;
 	} while (time_before(jiffies, timeout));
 
 	/* open code sata_print_link_status() */
-	sstatus = svia_scr_read(ap, SCR_STATUS);
-	scontrol = svia_scr_read(ap, SCR_CONTROL);
+	svia_scr_read(ap, SCR_STATUS, &sstatus);
+	svia_scr_read(ap, SCR_CONTROL, &scontrol);
 
 	online = (sstatus & 0xf) == 0x3;
 
@@ -327,7 +318,7 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
 			online ? "up" : "down", sstatus, scontrol);
 
 	/* SStatus is read one more time */
-	svia_scr_read(ap, SCR_STATUS);
+	svia_scr_read(ap, SCR_STATUS, &sstatus);
 
 	if (!online) {
 		/* tell EH to bail */
@@ -370,7 +361,7 @@ static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 	static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
-	pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]);
+	pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]);
 }
 
 static const unsigned int svia_bar_sizes[] = {
@@ -381,12 +372,12 @@ static const unsigned int vt6421_bar_sizes[] = {
 	16, 16, 16, 16, 32, 128
 };
 
-static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port)
+static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
 {
 	return addr + (port * 128);
 }
 
-static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port)
+static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
 {
 	return addr + (port * 64);
 }
@@ -406,6 +397,9 @@ static void vt6421_init_addrs(struct ata_port *ap)
 	ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
 
 	ata_std_ports(ioaddr);
+
+	ata_port_pbar_desc(ap, ap->port_no, -1, "port");
+	ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
 }
 
 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
@@ -414,7 +408,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
 	struct ata_host *host;
 	int rc;
 
-	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
 	if (rc)
 		return rc;
 	*r_host = host;
@@ -478,7 +472,7 @@ static void svia_configure(struct pci_dev *pdev)
 	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "enabling SATA channels (0x%x)\n",
-		           (int) tmp8);
+			   (int) tmp8);
 		tmp8 |= ALL_PORTS;
 		pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
 	}
@@ -488,7 +482,7 @@ static void svia_configure(struct pci_dev *pdev)
 	if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "enabling SATA channel interrupts (0x%x)\n",
-		           (int) tmp8);
+			   (int) tmp8);
 		tmp8 |= ALL_PORTS;
 		pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
 	}
@@ -498,21 +492,20 @@ static void svia_configure(struct pci_dev *pdev)
 	if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
 		dev_printk(KERN_DEBUG, &pdev->dev,
 			   "enabling SATA channel native mode (0x%x)\n",
-		           (int) tmp8);
+			   (int) tmp8);
 		tmp8 |= NATIVE_MODE_ALL;
 		pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
 	}
 }
 
-static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
 	unsigned int i;
 	int rc;
 	struct ata_host *host;
 	int board_id = (int) ent->driver_data;
-	const int *bar_sizes;
-	u8 tmp8;
+	const unsigned *bar_sizes;
 
 	if (!printed_version++)
 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -521,19 +514,10 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (rc)
 		return rc;
 
-	if (board_id == vt6420) {
-		pci_read_config_byte(pdev, SATA_PATA_SHARING, &tmp8);
-		if (tmp8 & SATA_2DEV) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "SATA master/slave not supported (0x%x)\n",
-		       		   (int) tmp8);
-			return -EIO;
-		}
-
+	if (board_id == vt6420)
 		bar_sizes = &svia_bar_sizes[0];
-	} else {
+	else
 		bar_sizes = &vt6421_bar_sizes[0];
-	}
 
 	for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
 		if ((pci_resource_start(pdev, i) == 0) ||
@@ -541,8 +525,8 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 			dev_printk(KERN_ERR, &pdev->dev,
 				"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
 				i,
-			        (unsigned long long)pci_resource_start(pdev, i),
-			        (unsigned long long)pci_resource_len(pdev, i));
+				(unsigned long long)pci_resource_start(pdev, i),
+				(unsigned long long)pci_resource_len(pdev, i));
 			return -ENODEV;
 		}
 
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 85d12b0..efb3476 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -47,7 +47,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"sata_vsc"
-#define DRV_VERSION	"2.2"
+#define DRV_VERSION	"2.3"
 
 enum {
 	VSC_MMIO_BAR			= 0,
@@ -98,20 +98,21 @@ enum {
 			      VSC_SATA_INT_PHY_CHANGE),
 };
 
-static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return 0xffffffffU;
-	return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+		return -EINVAL;
+	*val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 
-static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
-			       u32 val)
+static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
 {
 	if (sc_reg > SCR_CONTROL)
-		return;
+		return -EINVAL;
 	writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+	return 0;
 }
 
 
@@ -161,7 +162,8 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 	/*
 	 * The only thing the ctl register is used for is SRST.
 	 * That is not enabled or disabled via tf_load.
-	 * However, if ATA_NIEN is changed, then we need to change the interrupt register.
+	 * However, if ATA_NIEN is changed, then we need to change
+	 * the interrupt register.
 	 */
 	if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
 		ap->last_ctl = tf->ctl;
@@ -218,7 +220,7 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
 		tf->hob_lbal = lbal >> 8;
 		tf->hob_lbam = lbam >> 8;
 		tf->hob_lbah = lbah >> 8;
-        }
+	}
 }
 
 static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
@@ -239,7 +241,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
 		return;
 	}
 
-	qc = ata_qc_from_tag(ap, ap->active_tag);
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
 	if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
 		handled = ata_host_intr(ap, qc);
 
@@ -255,9 +257,10 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
 /*
  * vsc_sata_interrupt
  *
- * Read the interrupt register and process for the devices that have them pending.
+ * Read the interrupt register and process for the devices that have
+ * them pending.
  */
-static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs)
+static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs)
 {
 	struct ata_host *host = dev_instance;
 	unsigned int i;
@@ -286,7 +289,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct pt_re
 				handled++;
 			} else
 				dev_printk(KERN_ERR, host->dev,
-					": interrupt from disabled port %d\n", i);
+					"interrupt from disabled port %d\n", i);
 		}
 	}
 
@@ -316,7 +319,6 @@ static struct scsi_host_template vsc_sata_sht = {
 
 
 static const struct ata_port_operations vsc_sata_ops = {
-	.port_disable		= ata_port_disable,
 	.tf_load		= vsc_sata_tf_load,
 	.tf_read		= vsc_sata_tf_read,
 	.exec_command		= ata_exec_command,
@@ -335,7 +337,6 @@ static const struct ata_port_operations vsc_sata_ops = {
 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
 	.irq_clear		= ata_bmdma_irq_clear,
 	.irq_on			= ata_irq_on,
-	.irq_ack		= ata_irq_ack,
 	.scr_read		= vsc_sata_scr_read,
 	.scr_write		= vsc_sata_scr_write,
 	.port_start		= ata_port_start,
@@ -364,14 +365,15 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
 }
 
 
-static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
+				       const struct pci_device_id *ent)
 {
 	static const struct ata_port_info pi = {
 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
 				  ATA_FLAG_MMIO,
 		.pio_mask	= 0x1f,
 		.mwdma_mask	= 0x07,
-		.udma_mask	= 0x7f,
+		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &vsc_sata_ops,
 	};
 	const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -407,9 +409,15 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
 
 	mmio_base = host->iomap[VSC_MMIO_BAR];
 
-	for (i = 0; i < host->n_ports; i++)
-		vsc_sata_setup_port(&host->ports[i]->ioaddr,
-				    mmio_base + (i + 1) * VSC_SATA_PORT_OFFSET);
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
+
+		vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+		ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
+		ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
+	}
 
 	/*
 	 * Use 32 bit DMA mask, because 64 bit address support is poor.
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 39eebae..c697a7a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5039,22 +5039,22 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
 	rc = ipr_device_reset(ioa_cfg, res);
 
 	if (rc) {
-		ap->ops->port_disable(ap);
+		ata_port_disable(ap);
 		goto out_unlock;
 	}
 
 	switch(res->cfgte.proto) {
 	case IPR_PROTO_SATA:
 	case IPR_PROTO_SAS_STP:
-		ap->device[0].class = ATA_DEV_ATA;
+		ap->link.device[0].class = ATA_DEV_ATA;
 		break;
 	case IPR_PROTO_SATA_ATAPI:
 	case IPR_PROTO_SAS_STP_ATAPI:
-		ap->device[0].class = ATA_DEV_ATAPI;
+		ap->link.device[0].class = ATA_DEV_ATAPI;
 		break;
 	default:
-		ap->device[0].class = ATA_DEV_UNKNOWN;
-		ap->ops->port_disable(ap);
+		ap->link.device[0].class = ATA_DEV_UNKNOWN;
+		ata_port_disable(ap);
 		break;
 	};
 
@@ -5192,6 +5192,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
 	u32 ioadl_flags = 0;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+	struct ipr_ioadl_desc *last_ioadl = NULL;
 	int len = qc->nbytes + qc->pad_len;
 	struct scatterlist *sg;
 
@@ -5214,11 +5215,13 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
 	ata_for_each_sg(sg, qc) {
 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
-		if (ata_sg_is_last(sg, qc))
-			ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
-		else
-			ioadl++;
+
+		last_ioadl = ioadl;
+		ioadl++;
 	}
+
+	if (likely(last_ioadl))
+		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
 }
 
 /**
@@ -5320,7 +5323,6 @@ static u8 ipr_ata_check_altstatus(struct ata_port *ap)
 }
 
 static struct ata_port_operations ipr_sata_ops = {
-	.port_disable = ata_port_disable,
 	.check_status = ipr_ata_check_status,
 	.check_altstatus = ipr_ata_check_altstatus,
 	.dev_select = ata_noop_dev_select,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 703febb..4f9e5ed 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -43,6 +43,7 @@ enum {
 	ATA_MAX_SECTORS_128	= 128,
 	ATA_MAX_SECTORS		= 256,
 	ATA_MAX_SECTORS_LBA48	= 65535,/* TODO: 65536? */
+	ATA_MAX_SECTORS_TAPE	= 65535,
 
 	ATA_ID_WORDS		= 256,
 	ATA_ID_SERNO		= 10,
@@ -64,6 +65,28 @@ enum {
 	ATA_ID_PROD_LEN		= 40,
 
 	ATA_PCI_CTL_OFS		= 2,
+
+	ATA_PIO0		= (1 << 0),
+	ATA_PIO1		= ATA_PIO0 | (1 << 1),
+	ATA_PIO2		= ATA_PIO1 | (1 << 2),
+	ATA_PIO3		= ATA_PIO2 | (1 << 3),
+	ATA_PIO4		= ATA_PIO3 | (1 << 4),
+	ATA_PIO5		= ATA_PIO4 | (1 << 5),
+	ATA_PIO6		= ATA_PIO5 | (1 << 6),
+
+	ATA_SWDMA0		= (1 << 0),
+	ATA_SWDMA1		= ATA_SWDMA0 | (1 << 1),
+	ATA_SWDMA2		= ATA_SWDMA1 | (1 << 2),
+
+	ATA_SWDMA2_ONLY		= (1 << 2),
+
+	ATA_MWDMA0		= (1 << 0),
+	ATA_MWDMA1		= ATA_MWDMA0 | (1 << 1),
+	ATA_MWDMA2		= ATA_MWDMA1 | (1 << 2),
+
+	ATA_MWDMA12_ONLY	= (1 << 1) | (1 << 2),
+	ATA_MWDMA2_ONLY		= (1 << 2),
+
 	ATA_UDMA0		= (1 << 0),
 	ATA_UDMA1		= ATA_UDMA0 | (1 << 1),
 	ATA_UDMA2		= ATA_UDMA1 | (1 << 2),
@@ -126,6 +149,7 @@ enum {
 	ATA_REG_IRQ		= ATA_REG_NSECT,
 
 	/* ATA device commands */
+	ATA_CMD_DEV_RESET	= 0x08, /* ATAPI device reset */
 	ATA_CMD_CHK_POWER	= 0xE5, /* check power mode */
 	ATA_CMD_STANDBY		= 0xE2, /* place in standby power mode */
 	ATA_CMD_IDLE		= 0xE3, /* place in idle power mode */
@@ -155,14 +179,19 @@ enum {
 	ATA_CMD_PACKET		= 0xA0,
 	ATA_CMD_VERIFY		= 0x40,
 	ATA_CMD_VERIFY_EXT	= 0x42,
- 	ATA_CMD_STANDBYNOW1	= 0xE0,
- 	ATA_CMD_IDLEIMMEDIATE	= 0xE1,
+	ATA_CMD_STANDBYNOW1	= 0xE0,
+	ATA_CMD_IDLEIMMEDIATE	= 0xE1,
+	ATA_CMD_SLEEP		= 0xE6,
 	ATA_CMD_INIT_DEV_PARAMS	= 0x91,
 	ATA_CMD_READ_NATIVE_MAX	= 0xF8,
 	ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
 	ATA_CMD_SET_MAX		= 0xF9,
 	ATA_CMD_SET_MAX_EXT	= 0x37,
 	ATA_CMD_READ_LOG_EXT	= 0x2f,
+	ATA_CMD_PMP_READ	= 0xE4,
+	ATA_CMD_PMP_WRITE	= 0xE8,
+	ATA_CMD_CONF_OVERLAY	= 0xB1,
+	ATA_CMD_SEC_FREEZE_LOCK	= 0xF5,
 
 	/* READ_LOG_EXT pages */
 	ATA_LOG_SATA_NCQ	= 0x10,
@@ -205,12 +234,54 @@ enum {
 
 	SETFEATURES_SPINUP	= 0x07, /* Spin-up drive */
 
+	SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
+	SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+
+	/* SETFEATURE Sector counts for SATA features */
+	SATA_AN			= 0x05,  /* Asynchronous Notification */
+	SATA_DIPM		= 0x03,  /* Device Initiated Power Management */
+
+	/* feature values for SET_MAX */
+	ATA_SET_MAX_ADDR	= 0x00,
+	ATA_SET_MAX_PASSWD	= 0x01,
+	ATA_SET_MAX_LOCK	= 0x02,
+	ATA_SET_MAX_UNLOCK	= 0x03,
+	ATA_SET_MAX_FREEZE_LOCK	= 0x04,
+
+	/* feature values for DEVICE CONFIGURATION OVERLAY */
+	ATA_DCO_RESTORE		= 0xC0,
+	ATA_DCO_FREEZE_LOCK	= 0xC1,
+	ATA_DCO_IDENTIFY	= 0xC2,
+	ATA_DCO_SET		= 0xC3,
+
 	/* ATAPI stuff */
 	ATAPI_PKT_DMA		= (1 << 0),
 	ATAPI_DMADIR		= (1 << 2),	/* ATAPI data dir:
 						   0=to device, 1=to host */
 	ATAPI_CDB_LEN		= 16,
 
+	/* PMP stuff */
+	SATA_PMP_MAX_PORTS	= 15,
+	SATA_PMP_CTRL_PORT	= 15,
+
+	SATA_PMP_GSCR_DWORDS	= 128,
+	SATA_PMP_GSCR_PROD_ID	= 0,
+	SATA_PMP_GSCR_REV	= 1,
+	SATA_PMP_GSCR_PORT_INFO	= 2,
+	SATA_PMP_GSCR_ERROR	= 32,
+	SATA_PMP_GSCR_ERROR_EN	= 33,
+	SATA_PMP_GSCR_FEAT	= 64,
+	SATA_PMP_GSCR_FEAT_EN	= 96,
+
+	SATA_PMP_PSCR_STATUS	= 0,
+	SATA_PMP_PSCR_ERROR	= 1,
+	SATA_PMP_PSCR_CONTROL	= 2,
+
+	SATA_PMP_FEAT_BIST	= (1 << 0),
+	SATA_PMP_FEAT_PMREQ	= (1 << 1),
+	SATA_PMP_FEAT_DYNSSC	= (1 << 2),
+	SATA_PMP_FEAT_NOTIFY	= (1 << 3),
+
 	/* cable types */
 	ATA_CBL_NONE		= 0,
 	ATA_CBL_PATA40		= 1,
@@ -234,6 +305,15 @@ enum {
 	SERR_PROTOCOL		= (1 << 10), /* protocol violation */
 	SERR_INTERNAL		= (1 << 11), /* host internal error */
 	SERR_PHYRDY_CHG		= (1 << 16), /* PHY RDY changed */
+	SERR_PHY_INT_ERR	= (1 << 17), /* PHY internal error */
+	SERR_COMM_WAKE		= (1 << 18), /* Comm wake */
+	SERR_10B_8B_ERR		= (1 << 19), /* 10b to 8b decode error */
+	SERR_DISPARITY		= (1 << 20), /* Disparity */
+	SERR_CRC		= (1 << 21), /* CRC error */
+	SERR_HANDSHAKE		= (1 << 22), /* Handshake error */
+	SERR_LINK_SEQ_ERR	= (1 << 23), /* Link sequence error */
+	SERR_TRANS_ST_ERROR	= (1 << 24), /* Transport state trans. error */
+	SERR_UNRECOG_FIS	= (1 << 25), /* Unrecognized FIS */
 	SERR_DEV_XCHG		= (1 << 26), /* device exchanged */
 
 	/* struct ata_taskfile flags */
@@ -294,24 +374,17 @@ struct ata_taskfile {
 };
 
 #define ata_id_is_ata(id)	(((id)[0] & (1 << 15)) == 0)
-#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
-#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
-#define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
-#define ata_id_has_fua(id)	((id)[84] & (1 << 6))
-#define ata_id_has_flush(id)	((id)[83] & (1 << 12))
-#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
-#define ata_id_has_lba48(id)	((id)[83] & (1 << 10))
-#define ata_id_has_hpa(id)	((id)[82] & (1 << 10))
-#define ata_id_has_wcache(id)	((id)[82] & (1 << 5))
-#define ata_id_has_pm(id)	((id)[82] & (1 << 3))
 #define ata_id_has_lba(id)	((id)[49] & (1 << 9))
 #define ata_id_has_dma(id)	((id)[49] & (1 << 8))
 #define ata_id_has_ncq(id)	((id)[76] & (1 << 8))
 #define ata_id_queue_depth(id)	(((id)[75] & 0x1f) + 1)
 #define ata_id_removeable(id)	((id)[0] & (1 << 7))
-#define ata_id_has_dword_io(id)	((id)[50] & (1 << 0))
+#define ata_id_has_dword_io(id)	((id)[48] & (1 << 0))
+#define ata_id_has_atapi_AN(id)	\
+	( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
+	  ((id)[78] & (1 << 5)) )
 #define ata_id_iordy_disable(id) ((id)[49] & (1 << 10))
-#define ata_id_has_iordy(id) ((id)[49] & (1 << 9))
+#define ata_id_has_iordy(id) ((id)[49] & (1 << 11))
 #define ata_id_u32(id,n)	\
 	(((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)]))
 #define ata_id_u64(id,n)	\
@@ -322,6 +395,112 @@ struct ata_taskfile {
 
 #define ata_id_cdb_intr(id)	(((id)[0] & 0x60) == 0x20)
 
+static inline int ata_id_has_hipm(const u16 *id)
+{
+	u16 val = id[76];
+
+	if (val == 0 || val == 0xffff)
+		return 0;
+
+	return val & (1 << 9);
+}
+
+static inline int ata_id_has_dipm(const u16 *id)
+{
+	u16 val = id[78];
+
+	if (val == 0 || val == 0xffff)
+		return 0;
+
+	return val & (1 << 3);
+}
+
+static inline int ata_id_has_fua(const u16 *id)
+{
+	if ((id[84] & 0xC000) != 0x4000)
+		return 0;
+	return id[84] & (1 << 6);
+}
+
+static inline int ata_id_has_flush(const u16 *id)
+{
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	return id[83] & (1 << 12);
+}
+
+static inline int ata_id_has_flush_ext(const u16 *id)
+{
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	return id[83] & (1 << 13);
+}
+
+static inline int ata_id_has_lba48(const u16 *id)
+{
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	if (!ata_id_u64(id, 100))
+		return 0;
+	return id[83] & (1 << 10);
+}
+
+static inline int ata_id_hpa_enabled(const u16 *id)
+{
+	/* Yes children, word 83 valid bits cover word 82 data */
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	/* And 87 covers 85-87 */
+	if ((id[87] & 0xC000) != 0x4000)
+		return 0;
+	/* Check command sets enabled as well as supported */
+	if ((id[85] & ( 1 << 10)) == 0)
+		return 0;
+	return id[82] & (1 << 10);
+}
+
+static inline int ata_id_has_wcache(const u16 *id)
+{
+	/* Yes children, word 83 valid bits cover word 82 data */
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	return id[82] & (1 << 5);
+}
+
+static inline int ata_id_has_pm(const u16 *id)
+{
+	if ((id[83] & 0xC000) != 0x4000)
+		return 0;
+	return id[82] & (1 << 3);
+}
+
+static inline int ata_id_rahead_enabled(const u16 *id)
+{
+	if ((id[87] & 0xC000) != 0x4000)
+		return 0;
+	return id[85] & (1 << 6);
+}
+
+static inline int ata_id_wcache_enabled(const u16 *id)
+{
+	if ((id[87] & 0xC000) != 0x4000)
+		return 0;
+	return id[85] & (1 << 5);
+}
+
+/**
+ *	ata_id_major_version	-	get ATA level of drive
+ *	@id: Identify data
+ *
+ *	Caveats:
+ *		ATA-1 considers identify optional
+ *		ATA-2 introduces mandatory identify
+ *		ATA-3 introduces word 80 and accurate reporting
+ *
+ *	The practical impact of this is that ata_id_major_version cannot
+ *	reliably report on drives below ATA3.
+ */
+
 static inline unsigned int ata_id_major_version(const u16 *id)
 {
 	unsigned int mver;
@@ -373,6 +552,15 @@ static inline int ata_drive_40wire(const u16 *dev_id)
 	return 1;
 }
 
+static inline int ata_drive_40wire_relaxed(const u16 *dev_id)
+{
+	if (ata_id_is_sata(dev_id))
+		return 0;	/* SATA */
+	if ((dev_id[93] & 0x2000) == 0x2000)
+		return 0;	/* 80 wire */
+	return 1;
+}
+
 static inline int atapi_cdb_len(const u16 *dev_id)
 {
 	u16 tmp = dev_id[0] & 0x3;
@@ -383,6 +571,11 @@ static inline int atapi_cdb_len(const u16 *dev_id)
 	}
 }
 
+static inline int atapi_command_packet_set(const u16 *dev_id)
+{
+	return (dev_id[0] >> 8) & 0x1f;
+}
+
 static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
 {
 	return (tf->protocol == ATA_PROT_ATAPI) ||
@@ -417,4 +610,9 @@ static inline int lba_48_ok(u64 block, u32 n_block)
 	return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
 }
 
+#define sata_pmp_gscr_vendor(gscr)	((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff)
+#define sata_pmp_gscr_devid(gscr)	((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16)
+#define sata_pmp_gscr_rev(gscr)		(((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff)
+#define sata_pmp_gscr_ports(gscr)	((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf)
+
 #endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/libata-compat.h b/include/linux/libata-compat.h
index b439d83..9a6a399 100644
--- a/include/linux/libata-compat.h
+++ b/include/linux/libata-compat.h
@@ -1,6 +1,54 @@
 #ifndef __LIBATA_COMPAT_H__
 #define __LIBATA_COMPAT_H__
 
+#include <linux/pci.h>
+
+#define init_timer_deferrable(foo) init_timer(foo)
+
+#define for_each_sg(sgl, sg, n_elem, i) \
+	for ((sg) = (sgl), (i) = 0; (i) < (n_elem); (i)++, (sg)++)
+
 typedef void (*work_func_t)(void *);
 
+static inline void sg_init_table(struct scatterlist *sg, unsigned int count)
+{
+}
+
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+	return sg + 1;
+}
+
+static inline struct scatterlist *sg_last(struct scatterlist *sgl,
+                                          unsigned int nents)
+{
+	return &sgl[nents - 1];
+}
+
+static inline void sg_set_page(struct scatterlist *sg, struct page *page,
+                               unsigned int len, unsigned int offset)
+{
+	sg->page = page;
+        sg->offset = offset;
+}
+
+static inline struct page *sg_page(struct scatterlist *sg)
+{
+	return sg->page;
+}
+
+#ifdef CONFIG_PCI
+
+static inline int pci_try_set_mwi(struct pci_dev *pdev)
+{
+	return pci_set_mwi(pdev);
+}
+
+static inline int pci_reenable_device(struct pci_dev *pdev)
+{
+	return pci_enable_device(pdev);
+}
+
+#endif /* CONFIG_PCI */
+
 #endif /* __LIBATA_COMPAT_H__ */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index a6cfa00..10c56c1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -28,14 +28,12 @@
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
 #include <linux/dma-mapping.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
 #include <linux/io.h>
 #include <linux/ata.h>
 #include <linux/workqueue.h>
 #include <scsi/scsi_host.h>
-
 #include <linux/libata-compat.h>
 
 /*
@@ -108,15 +106,10 @@ static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
 /* defines only for the constants which don't work well as enums */
 #define ATA_TAG_POISON		0xfafbfcfdU
 
-/* move to PCI layer? */
-static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
-{
-	return &pdev->dev;
-}
-
 enum {
 	/* various global constants */
 	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
+	LIBATA_DUMB_MAX_PRD	= ATA_MAX_PRD / 4,	/* Worst case */
 	ATA_MAX_PORTS		= 8,
 	ATA_DEF_QUEUE		= 1,
 	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
@@ -126,6 +119,8 @@ enum {
 	ATA_DEF_BUSY_WAIT	= 10000,
 	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
 
+	ATAPI_MAX_DRAIN		= 16 << 10,
+
 	ATA_SHT_EMULATED	= 1,
 	ATA_SHT_CMD_PER_LUN	= 1,
 	ATA_SHT_THIS_ID		= -1,
@@ -137,11 +132,17 @@ enum {
 	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
 	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
 	ATA_DFLAG_FLUSH_EXT	= (1 << 4), /* do FLUSH_EXT instead of FLUSH */
-	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
-
-	ATA_DFLAG_PIO		= (1 << 8), /* device limited to PIO mode */
-	ATA_DFLAG_NCQ_OFF	= (1 << 9), /* device limited to non-NCQ mode */
-	ATA_DFLAG_SPUNDOWN	= (1 << 10), /* XXX: for spindown_compat */
+	ATA_DFLAG_ACPI_PENDING	= (1 << 5), /* ACPI resume action pending */
+	ATA_DFLAG_ACPI_FAILED	= (1 << 6), /* ACPI on devcfg has failed */
+	ATA_DFLAG_AN		= (1 << 7), /* AN configured */
+	ATA_DFLAG_HIPM		= (1 << 8), /* device supports HIPM */
+	ATA_DFLAG_DIPM		= (1 << 9), /* device supports DIPM */
+	ATA_DFLAG_CFG_MASK	= (1 << 12) - 1,
+
+	ATA_DFLAG_PIO		= (1 << 12), /* device limited to PIO mode */
+	ATA_DFLAG_NCQ_OFF	= (1 << 13), /* device limited to non-NCQ mode */
+	ATA_DFLAG_SPUNDOWN	= (1 << 14), /* XXX: for spindown_compat */
+	ATA_DFLAG_SLEEPING	= (1 << 15), /* device is sleeping */
 	ATA_DFLAG_INIT_MASK	= (1 << 16) - 1,
 
 	ATA_DFLAG_DETACH	= (1 << 16),
@@ -152,7 +153,22 @@ enum {
 	ATA_DEV_ATA_UNSUP	= 2,	/* ATA device (unsupported) */
 	ATA_DEV_ATAPI		= 3,	/* ATAPI device */
 	ATA_DEV_ATAPI_UNSUP	= 4,	/* ATAPI device (unsupported) */
-	ATA_DEV_NONE		= 5,	/* no device */
+	ATA_DEV_PMP		= 5,	/* SATA port multiplier */
+	ATA_DEV_PMP_UNSUP	= 6,	/* SATA port multiplier (unsupported) */
+	ATA_DEV_SEMB		= 7,	/* SEMB */
+	ATA_DEV_SEMB_UNSUP	= 8,	/* SEMB (unsupported) */
+	ATA_DEV_NONE		= 9,	/* no device */
+
+	/* struct ata_link flags */
+	ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */
+	ATA_LFLAG_SKIP_D2H_BSY	= (1 << 1), /* can't wait for the first D2H
+					     * Register FIS clearing BSY */
+	ATA_LFLAG_NO_SRST	= (1 << 2), /* avoid softreset */
+	ATA_LFLAG_ASSUME_ATA	= (1 << 3), /* assume ATA class */
+	ATA_LFLAG_ASSUME_SEMB	= (1 << 4), /* assume SEMB class */
+	ATA_LFLAG_ASSUME_CLASS	= ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
+	ATA_LFLAG_NO_RETRY	= (1 << 5), /* don't retry this link */
+	ATA_LFLAG_DISABLED	= (1 << 6), /* link is disabled */
 
 	/* struct ata_port flags */
 	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
@@ -168,13 +184,13 @@ enum {
 	ATA_FLAG_PIO_POLLING	= (1 << 9), /* use polling PIO if LLD
 					     * doesn't handle PIO interrupts */
 	ATA_FLAG_NCQ		= (1 << 10), /* host supports NCQ */
-	ATA_FLAG_HRST_TO_RESUME	= (1 << 11), /* hardreset to resume phy */
-	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
-					      * Register FIS clearing BSY */
 	ATA_FLAG_DEBUGMSG	= (1 << 13),
 	ATA_FLAG_IGN_SIMPLEX	= (1 << 15), /* ignore SIMPLEX */
 	ATA_FLAG_NO_IORDY	= (1 << 16), /* controller lacks iordy */
 	ATA_FLAG_ACPI_SATA	= (1 << 17), /* need native SATA ACPI layout */
+	ATA_FLAG_AN		= (1 << 18), /* controller supports AN */
+	ATA_FLAG_PMP		= (1 << 19), /* controller supports PMP */
+	ATA_FLAG_IPM		= (1 << 20), /* driver can handle IPM */
 
 	/* The following flag belongs to ap->pflags but is kept in
 	 * ap->flags because it's referenced in many LLDs and will be
@@ -193,10 +209,11 @@ enum {
 	ATA_PFLAG_UNLOADING	= (1 << 5), /* module is unloading */
 	ATA_PFLAG_SCSI_HOTPLUG	= (1 << 6), /* SCSI hotplug scheduled */
 	ATA_PFLAG_INITIALIZING	= (1 << 7), /* being initialized, don't touch */
+	ATA_PFLAG_RESETTING	= (1 << 8), /* reset in progress */
 
-	ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
 	ATA_PFLAG_SUSPENDED	= (1 << 17), /* port is suspended (power) */
 	ATA_PFLAG_PM_PENDING	= (1 << 18), /* PM operation pending */
+	ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
 
 	/* struct ata_queued_cmd flags */
 	ATA_QCFLAG_ACTIVE	= (1 << 0), /* cmd not yet ack'd to scsi lyer */
@@ -205,6 +222,8 @@ enum {
 	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
 	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
 	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
+	ATA_QCFLAG_CLEAR_EXCL	= (1 << 5), /* clear excl_link on completion */
+	ATA_QCFLAG_QUIET	= (1 << 6), /* don't report device error */
 
 	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
 	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
@@ -214,12 +233,21 @@ enum {
 	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host only */
 	ATA_HOST_STARTED	= (1 << 1),	/* Host started */
 
+	/* bits 24:31 of host->flags are reserved for LLD specific flags */
+
 	/* various lengths of time */
 	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
 	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
 	ATA_TMOUT_INTERNAL	= 30 * HZ,
 	ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
 
+	/* FIXME: GoVault needs 2s but we can't afford that without
+	 * parallel probing.  800ms is enough for iVDR disk
+	 * HHD424020F7SV00.  Increase to 2secs when parallel probing
+	 * is in place.
+	 */
+	ATA_TMOUT_FF_WAIT	= 4 * HZ / 5,
+
 	/* ATA bus states */
 	BUS_UNKNOWN		= 0,
 	BUS_DMA			= 1,
@@ -259,6 +287,10 @@ enum {
 	/* ering size */
 	ATA_ERING_SIZE		= 32,
 
+	/* return values for ->qc_defer */
+	ATA_DEFER_LINK		= 1,
+	ATA_DEFER_PORT		= 2,
+
 	/* desc_len for ata_eh_info and context */
 	ATA_EH_DESC_LEN		= 80,
 
@@ -266,6 +298,7 @@ enum {
 	ATA_EH_REVALIDATE	= (1 << 0),
 	ATA_EH_SOFTRESET	= (1 << 1),
 	ATA_EH_HARDRESET	= (1 << 2),
+	ATA_EH_ENABLE_LINK	= (1 << 3),
 
 	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
 	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE,
@@ -275,6 +308,7 @@ enum {
 	ATA_EHI_RESUME_LINK	= (1 << 1),  /* resume link (reset modifier) */
 	ATA_EHI_NO_AUTOPSY	= (1 << 2),  /* no autopsy */
 	ATA_EHI_QUIET		= (1 << 3),  /* be quiet */
+	ATA_EHI_LPM		= (1 << 4),  /* link power management action */
 
 	ATA_EHI_DID_SOFTRESET	= (1 << 16), /* already soft-reset this port */
 	ATA_EHI_DID_HARDRESET	= (1 << 17), /* already soft-reset this port */
@@ -285,12 +319,16 @@ enum {
 	ATA_EHI_DID_RESET	= ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
 	ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
 
-	/* max repeat if error condition is still set after ->error_handler */
-	ATA_EH_MAX_REPEAT	= 5,
+	/* max tries if error condition is still set after ->error_handler */
+	ATA_EH_MAX_TRIES	= 5,
 
 	/* how hard are we gonna try to probe/recover devices */
 	ATA_PROBE_MAX_TRIES	= 3,
 	ATA_EH_DEV_TRIES	= 3,
+	ATA_EH_PMP_TRIES	= 5,
+	ATA_EH_PMP_LINK_TRIES	= 3,
+
+	SATA_PMP_SCR_TIMEOUT	= 250,
 
 	/* Horkage types. May be set by libata or controller on drives
 	   (some horkage may be drive/controller pair dependant */
@@ -299,6 +337,18 @@ enum {
 	ATA_HORKAGE_NODMA	= (1 << 1),	/* DMA problems */
 	ATA_HORKAGE_NONCQ	= (1 << 2),	/* Don't use NCQ */
 	ATA_HORKAGE_MAX_SEC_128	= (1 << 3),	/* Limit max sects to 128 */
+	ATA_HORKAGE_BROKEN_HPA	= (1 << 4),	/* Broken HPA */
+	ATA_HORKAGE_SKIP_PM	= (1 << 5),	/* Skip PM operations */
+	ATA_HORKAGE_HPA_SIZE	= (1 << 6),	/* native size off by one */
+	ATA_HORKAGE_IPM		= (1 << 7),	/* Link PM problems */
+	ATA_HORKAGE_IVB		= (1 << 8),	/* cbl det validity bit bugs */
+	ATA_HORKAGE_STUCK_ERR	= (1 << 9),	/* stuck ERR on next PACKET */
+
+	 /* DMA mask for user DMA control: User visible values; DO NOT
+	    renumber */
+	ATA_DMA_MASK_ATA	= (1 << 0),	/* DMA on ATA Disk */
+	ATA_DMA_MASK_ATAPI	= (1 << 1),	/* DMA on ATAPI */
+	ATA_DMA_MASK_CFA	= (1 << 2),	/* DMA on CF Card */
 };
 
 enum hsm_task_states {
@@ -321,20 +371,34 @@ enum ata_completion_errors {
 	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
 	AC_ERR_OTHER		= (1 << 8), /* unknown */
 	AC_ERR_NODEV_HINT	= (1 << 9), /* polling device detection hint */
+	AC_ERR_NCQ		= (1 << 10), /* marker for offending NCQ qc */
 };
 
 /* forward declarations */
 struct scsi_device;
 struct ata_port_operations;
 struct ata_port;
+struct ata_link;
 struct ata_queued_cmd;
 
 /* typedefs */
 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
-typedef int (*ata_prereset_fn_t)(struct ata_port *ap, unsigned long deadline);
-typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes,
+typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline);
+typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
 			      unsigned long deadline);
-typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
+typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
+
+/*
+ * host pm policy: If you alter this, you also need to alter libata-scsi.c
+ * (for the ascii descriptions)
+ */
+enum link_pm {
+	NOT_AVAILABLE,
+	MIN_POWER,
+	MAX_PERFORMANCE,
+	MEDIUM_POWER,
+};
+extern struct class_device_attribute class_device_attr_link_power_management_policy;
 
 struct ata_ioports {
 	void __iomem		*cmd_addr;
@@ -357,13 +421,14 @@ struct ata_ioports {
 struct ata_host {
 	spinlock_t		lock;
 	struct device 		*dev;
-	unsigned long		irq;
-	unsigned long		irq2;
 	void __iomem * const	*iomap;
 	unsigned int		n_ports;
 	void			*private_data;
 	const struct ata_port_operations *ops;
 	unsigned long		flags;
+#ifdef CONFIG_ATA_ACPI
+	void			*acpi_handle;
+#endif
 	struct ata_port		*simplex_claimed;	/* channel owning the DMA */
 	struct ata_port		*ports[0];
 };
@@ -381,6 +446,7 @@ struct ata_queued_cmd {
 	unsigned long		flags;		/* ATA_QCFLAG_xxx */
 	unsigned int		tag;
 	unsigned int		n_elem;
+	unsigned int		n_iter;
 	unsigned int		orig_n_elem;
 
 	int			dma_dir;
@@ -391,7 +457,7 @@ struct ata_queued_cmd {
 	unsigned int		nbytes;
 	unsigned int		curbytes;
 
-	unsigned int		cursg;
+	struct scatterlist	*cursg;
 	unsigned int		cursg_ofs;
 
 	struct scatterlist	sgent;
@@ -406,6 +472,7 @@ struct ata_queued_cmd {
 	ata_qc_cb_t		complete_fn;
 
 	void			*private_data;
+	void			*lldd_task;
 };
 
 struct ata_port_stats {
@@ -426,14 +493,24 @@ struct ata_ering {
 };
 
 struct ata_device {
-	struct ata_port		*ap;
+	struct ata_link		*link;
 	unsigned int		devno;		/* 0 or 1 */
 	unsigned long		flags;		/* ATA_DFLAG_xxx */
+	unsigned int		horkage;	/* List of broken features */
 	struct scsi_device	*sdev;		/* attached SCSI device */
+#ifdef CONFIG_ATA_ACPI
+	void			*acpi_handle;
+	union acpi_object	*gtf_cache;
+#endif
 	/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
 	u64			n_sectors;	/* size of device, if ATA */
 	unsigned int		class;		/* ATA_DEV_xxx */
-	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+
+	union {
+		u16		id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+		u32		gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+	};
+
 	u8			pio_mode;
 	u8			dma_mode;
 	u8			xfer_mode;
@@ -457,11 +534,6 @@ struct ata_device {
 	/* error history */
 	struct ata_ering	ering;
 	int			spdn_cnt;
-	unsigned int		horkage;	/* List of broken features */
-#ifdef CONFIG_ATA_ACPI
-	/* ACPI objects info */
-	void			*obj_handle;
-#endif
 };
 
 /* Offset into struct ata_device.  Fields above it are maintained
@@ -490,6 +562,38 @@ struct ata_eh_context {
 	unsigned int		did_probe_mask;
 };
 
+struct ata_acpi_drive
+{
+	u32 pio;
+	u32 dma;
+} __attribute__ ((packed));
+
+struct ata_acpi_gtm {
+	struct ata_acpi_drive drive[2];
+	u32 flags;
+} __attribute__ ((packed));
+
+struct ata_link {
+	struct ata_port		*ap;
+	int			pmp;		/* port multiplier port # */
+
+	unsigned int		active_tag;	/* active tag on this link */
+	u32			sactive;	/* active NCQ commands */
+
+	unsigned int		flags;		/* ATA_LFLAG_xxx */
+
+	unsigned int		hw_sata_spd_limit;
+	unsigned int		sata_spd_limit;
+	unsigned int		sata_spd;	/* current SATA PHY speed */
+
+	/* record runtime error info, protected by host_set lock */
+	struct ata_eh_info	eh_info;
+	/* EH context */
+	struct ata_eh_context	eh_context;
+
+	struct ata_device	device[ATA_MAX_DEVICES];
+};
+
 struct ata_port {
 	struct Scsi_Host	*scsi_host; /* our co-allocated scsi host */
 	const struct ata_port_operations *ops;
@@ -513,22 +617,17 @@ struct ata_port {
 	unsigned int		mwdma_mask;
 	unsigned int		udma_mask;
 	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
-	unsigned int		hw_sata_spd_limit;
-	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
-
-	/* record runtime error info, protected by host lock */
-	struct ata_eh_info	eh_info;
-	/* EH context owned by EH */
-	struct ata_eh_context	eh_context;
-
-	struct ata_device	device[ATA_MAX_DEVICES];
 
 	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
 	unsigned long		qc_allocated;
 	unsigned int		qc_active;
+	int			nr_active_links; /* #links with active qcs */
 
-	unsigned int		active_tag;
-	u32			sactive;
+	struct ata_link		link;	/* host default link */
+
+	int			nr_pmp_links;	/* nr of available PMP links */
+	struct ata_link		*pmp_link;	/* array of PMP links */
+	struct ata_link		*excl_link;	/* for PMP qc exclusion */
 
 	struct ata_port_stats	stats;
 	struct ata_host		*host;
@@ -544,18 +643,25 @@ struct ata_port {
 	u32			msg_enable;
 	struct list_head	eh_done_q;
 	wait_queue_head_t	eh_wait_q;
+	int			eh_tries;
 
 	pm_message_t		pm_mesg;
 	int			*pm_result;
+	enum link_pm		pm_policy;
+
+	struct timer_list	fastdrain_timer;
+	unsigned long		fastdrain_cnt;
 
 	void			*private_data;
 
+#ifdef CONFIG_ATA_ACPI
+	void			*acpi_handle;
+	struct ata_acpi_gtm	__acpi_init_gtm; /* use ata_acpi_init_gtm() */
+#endif
 	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
 };
 
 struct ata_port_operations {
-	void (*port_disable) (struct ata_port *);
-
 	void (*dev_config) (struct ata_device *);
 
 	void (*set_piomode) (struct ata_port *, struct ata_device *);
@@ -571,7 +677,7 @@ struct ata_port_operations {
 	void (*dev_select)(struct ata_port *ap, unsigned int device);
 
 	void (*phy_reset) (struct ata_port *ap); /* obsolete */
-	int  (*set_mode) (struct ata_port *ap, struct ata_device **r_failed_dev);
+	int  (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev);
 
 	int (*cable_detect) (struct ata_port *ap);
 
@@ -582,9 +688,14 @@ struct ata_port_operations {
 
 	void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
 
+	int (*qc_defer) (struct ata_queued_cmd *qc);
 	void (*qc_prep) (struct ata_queued_cmd *qc);
 	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
 
+	/* port multiplier */
+	void (*pmp_attach) (struct ata_port *ap);
+	void (*pmp_detach) (struct ata_port *ap);
+
 	/* Error handlers.  ->error_handler overrides ->eng_timeout and
 	 * indicates that new-style EH is in place.
 	 */
@@ -598,15 +709,14 @@ struct ata_port_operations {
 	irq_handler_t irq_handler;
 	void (*irq_clear) (struct ata_port *);
 	u8 (*irq_on) (struct ata_port *);
-	u8 (*irq_ack) (struct ata_port *ap, unsigned int chk_drq);
 
-	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
-	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
-			   u32 val);
+	int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val);
+	int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val);
 
 	int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
 	int (*port_resume) (struct ata_port *ap);
-
+	int (*enable_pm) (struct ata_port *ap, enum link_pm policy);
+	void (*disable_pm) (struct ata_port *ap);
 	int (*port_start) (struct ata_port *ap);
 	void (*port_stop) (struct ata_port *ap);
 
@@ -619,6 +729,7 @@ struct ata_port_operations {
 struct ata_port_info {
 	struct scsi_host_template	*sht;
 	unsigned long		flags;
+	unsigned long		link_flags;
 	unsigned long		pio_mask;
 	unsigned long		mwdma_mask;
 	unsigned long		udma_mask;
@@ -639,7 +750,7 @@ struct ata_timing {
 	unsigned short udma;		/* t2CYCTYP/2 */
 };
 
-#define FIT(v,vmin,vmax)	max_t(short,min_t(short,v,vmax),vmin)
+#define FIT(v, vmin, vmax)	max_t(short, min_t(short, v, vmax), vmin)
 
 extern const unsigned long sata_deb_timing_normal[];
 extern const unsigned long sata_deb_timing_hotplug[];
@@ -662,38 +773,25 @@ static inline int ata_port_is_dummy(struct ata_port *ap)
 	return ap->ops == &ata_dummy_port_ops;
 }
 
-extern void sata_print_link_status(struct ata_port *ap);
+extern void sata_print_link_status(struct ata_link *link);
 extern void ata_port_probe(struct ata_port *);
-extern void __sata_phy_reset(struct ata_port *ap);
-extern void sata_phy_reset(struct ata_port *ap);
 extern void ata_bus_reset(struct ata_port *ap);
-extern int sata_set_spd(struct ata_port *ap);
-extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param,
-			     unsigned long deadline);
-extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param,
-			   unsigned long deadline);
-extern int ata_std_prereset(struct ata_port *ap, unsigned long deadline);
-extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
+extern int sata_set_spd(struct ata_link *link);
+extern int sata_link_debounce(struct ata_link *link,
+			const unsigned long *params, unsigned long deadline);
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+			    unsigned long deadline);
+extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
+extern int ata_std_softreset(struct ata_link *link, unsigned int *classes,
 			     unsigned long deadline);
-extern int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
-			       unsigned long deadline);
-extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
+extern int sata_link_hardreset(struct ata_link *link,
+			const unsigned long *timing, unsigned long deadline);
+extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
 			      unsigned long deadline);
-extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
+extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
 extern void ata_port_disable(struct ata_port *);
 extern void ata_std_ports(struct ata_ioports *ioaddr);
-#ifdef CONFIG_PCI
-extern int ata_pci_init_one (struct pci_dev *pdev,
-			     const struct ata_port_info * const * ppi);
-extern void ata_pci_remove_one (struct pci_dev *pdev);
-#ifdef CONFIG_PM
-extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
-extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
-extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-extern int ata_pci_device_resume(struct pci_dev *pdev);
-#endif
-extern int ata_pci_clear_simplex(struct pci_dev *pdev);
-#endif /* CONFIG_PCI */
+
 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
 			const struct ata_port_info * const * ppi, int n_ports);
@@ -719,12 +817,12 @@ extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
 			    struct ata_port *ap);
 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
-extern int sata_scr_valid(struct ata_port *ap);
-extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
-extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
-extern int ata_port_online(struct ata_port *ap);
-extern int ata_port_offline(struct ata_port *ap);
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int ata_link_online(struct ata_link *link);
+extern int ata_link_offline(struct ata_link *link);
 #ifdef CONFIG_PM
 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
 extern void ata_host_resume(struct ata_host *host);
@@ -732,33 +830,38 @@ extern void ata_host_resume(struct ata_host *host);
 extern int ata_ratelimit(void);
 extern int ata_busy_sleep(struct ata_port *ap,
 			  unsigned long timeout_pat, unsigned long timeout);
+extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline);
 extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
 extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
 				void *data, unsigned long delay);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
 			     unsigned long interval_msec,
 			     unsigned long timeout_msec);
-extern unsigned int ata_dev_try_classify(struct ata_port *, unsigned int, u8 *);
+extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
+					 u8 *r_err);
 
 /*
  * Default driver ops implementations
  */
 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+			  u8 pmp, int is_cmd, u8 *fis);
 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
-extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
-extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
+extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device);
+extern void ata_std_dev_select(struct ata_port *ap, unsigned int device);
 extern u8 ata_check_status(struct ata_port *ap);
 extern u8 ata_altstatus(struct ata_port *ap);
 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
-extern int ata_port_start (struct ata_port *ap);
-extern int ata_sff_port_start (struct ata_port *ap);
-extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *pt_regs);
+extern int ata_port_start(struct ata_port *ap);
+extern int ata_sff_port_start(struct ata_port *ap);
+extern irqreturn_t ata_interrupt(int irq, void *dev_instance, struct pt_regs *pt_regs);
 extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
 			  unsigned int buflen, int write_data);
 extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
 				unsigned int buflen, int write_data);
+extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
+extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_qc_prep(struct ata_queued_cmd *qc);
 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -773,9 +876,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s,
 extern void ata_id_c_string(const u16 *id, unsigned char *s,
 			    unsigned int ofs, unsigned int len);
 extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown);
-extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
-extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
-extern void ata_bmdma_start (struct ata_queued_cmd *qc);
+extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
+extern void ata_bmdma_start(struct ata_queued_cmd *qc);
 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
 extern u8   ata_bmdma_status(struct ata_port *ap);
 extern void ata_bmdma_irq_clear(struct ata_port *ap);
@@ -802,11 +904,8 @@ extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
 				       int queue_depth);
 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
-extern int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
 extern u8 ata_irq_on(struct ata_port *ap);
-extern u8 ata_dummy_irq_on(struct ata_port *ap);
-extern u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq);
-extern u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq);
 
 extern int ata_cable_40wire(struct ata_port *ap);
 extern int ata_cable_80wire(struct ata_port *ap);
@@ -841,8 +940,39 @@ enum {
 				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
 };
 
+/* libata-acpi.c */
+#ifdef CONFIG_ATA_ACPI
+static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
+{
+	if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID)
+		return &ap->__acpi_init_gtm;
+	return NULL;
+}
+extern int ata_acpi_cbl_80wire(struct ata_port *ap);
+int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
+#else
+static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
+{
+	return NULL;
+}
+static inline int ata_acpi_cbl_80wire(struct ata_port *ap) { return 0; }
+#endif
 
 #ifdef CONFIG_PCI
+struct pci_dev;
+
+extern int ata_pci_init_one(struct pci_dev *pdev,
+			     const struct ata_port_info * const * ppi);
+extern void ata_pci_remove_one(struct pci_dev *pdev);
+#ifdef CONFIG_PM
+extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int ata_pci_device_resume(struct pci_dev *pdev);
+#endif
+extern int ata_pci_clear_simplex(struct pci_dev *pdev);
+
 struct pci_bits {
 	unsigned int		reg;	/* PCI config register to read */
 	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
@@ -850,23 +980,37 @@ struct pci_bits {
 	unsigned long		val;
 };
 
-extern int ata_pci_init_native_host(struct ata_host *host);
+extern int ata_pci_init_sff_host(struct ata_host *host);
 extern int ata_pci_init_bmdma(struct ata_host *host);
-extern int ata_pci_prepare_native_host(struct pci_dev *pdev,
-				const struct ata_port_info * const * ppi,
-				struct ata_host **r_host);
+extern int ata_pci_prepare_sff_host(struct pci_dev *pdev,
+				    const struct ata_port_info * const * ppi,
+				    struct ata_host **r_host);
 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
 extern unsigned long ata_pci_default_filter(struct ata_device *, unsigned long);
 #endif /* CONFIG_PCI */
 
 /*
- * EH
+ * PMP
  */
-extern void ata_eng_timeout(struct ata_port *ap);
+extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
+extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline);
+extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class,
+				  unsigned long deadline);
+extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class);
+extern void sata_pmp_do_eh(struct ata_port *ap,
+		ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+		ata_reset_fn_t hardreset, ata_postreset_fn_t postreset,
+		ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset,
+		ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset);
 
+/*
+ * EH
+ */
 extern void ata_port_schedule_eh(struct ata_port *ap);
+extern int ata_link_abort(struct ata_link *link);
 extern int ata_port_abort(struct ata_port *ap);
 extern int ata_port_freeze(struct ata_port *ap);
+extern int sata_async_notification(struct ata_port *ap);
 
 extern void ata_eh_freeze_port(struct ata_port *ap);
 extern void ata_eh_thaw_port(struct ata_port *ap);
@@ -882,56 +1026,61 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  * printk helpers
  */
 #define ata_port_printk(ap, lv, fmt, args...) \
-	printk(lv"ata%u: "fmt, (ap)->print_id , ##args)
+	printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
+
+#define ata_link_printk(link, lv, fmt, args...) do { \
+	if ((link)->ap->nr_pmp_links) \
+		printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id,	\
+		       (link)->pmp , ##args); \
+	else \
+		printk("%sata%u: "fmt, lv, (link)->ap->print_id , ##args); \
+	} while(0)
 
 #define ata_dev_printk(dev, lv, fmt, args...) \
-	printk(lv"ata%u.%02u: "fmt, (dev)->ap->print_id, (dev)->devno , ##args)
+	printk("%sata%u.%02u: "fmt, lv, (dev)->link->ap->print_id,	\
+	       (dev)->link->pmp + (dev)->devno , ##args)
 
 /*
  * ata_eh_info helpers
  */
-#define ata_ehi_push_desc(ehi, fmt, args...) do { \
-	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
-				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
-				     fmt , ##args); \
-} while (0)
-
-#define ata_ehi_clear_desc(ehi) do { \
-	(ehi)->desc[0] = '\0'; \
-	(ehi)->desc_len = 0; \
-} while (0)
-
-static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
+extern void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+extern void ata_ehi_clear_desc(struct ata_eh_info *ehi);
+
+static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi)
 {
-	ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
+	ehi->flags |= ATA_EHI_RESUME_LINK;
 	ehi->action |= ATA_EH_SOFTRESET;
 	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
 }
 
 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
 {
-	__ata_ehi_hotplugged(ehi);
+	ata_ehi_schedule_probe(ehi);
+	ehi->flags |= ATA_EHI_HOTPLUGGED;
+	ehi->action |= ATA_EH_ENABLE_LINK;
 	ehi->err_mask |= AC_ERR_ATA_BUS;
 }
 
 /*
- * qc helpers
+ * port description helpers
  */
-static inline int
-ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
-{
-	if (sg == &qc->pad_sgent)
-		return 1;
-	if (qc->pad_len)
-		return 0;
-	if (((sg - qc->__sg) + 1) == qc->n_elem)
-		return 1;
-	return 0;
-}
+extern void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
+	__attribute__ ((format (printf, 2, 3)));
+#ifdef CONFIG_PCI
+extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+			       const char *name);
+#endif
 
+/*
+ * qc helpers
+ */
 static inline struct scatterlist *
 ata_qc_first_sg(struct ata_queued_cmd *qc)
 {
+	qc->n_iter = 0;
 	if (qc->n_elem)
 		return qc->__sg;
 	if (qc->pad_len)
@@ -944,8 +1093,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
 {
 	if (sg == &qc->pad_sgent)
 		return NULL;
-	if (++sg - qc->__sg < qc->n_elem)
-		return sg;
+	if (++qc->n_iter < qc->n_elem)
+		return sg_next(sg);
 	if (qc->pad_len)
 		return &qc->pad_sgent;
 	return NULL;
@@ -969,12 +1118,14 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
  */
 static inline unsigned int ata_class_enabled(unsigned int class)
 {
-	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
+	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
+		class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
 }
 
 static inline unsigned int ata_class_disabled(unsigned int class)
 {
-	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
+	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
+		class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
 }
 
 static inline unsigned int ata_class_absent(unsigned int class)
@@ -998,15 +1149,62 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
 }
 
 /*
- * port helpers
+ * link helpers
  */
-static inline int ata_port_max_devices(const struct ata_port *ap)
+static inline int ata_is_host_link(const struct ata_link *link)
 {
-	if (ap->flags & ATA_FLAG_SLAVE_POSS)
+	return link == &link->ap->link;
+}
+
+static inline int ata_link_max_devices(const struct ata_link *link)
+{
+	if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS)
 		return 2;
 	return 1;
 }
 
+static inline int ata_link_active(struct ata_link *link)
+{
+	return ata_tag_valid(link->active_tag) || link->sactive;
+}
+
+static inline struct ata_link *ata_port_first_link(struct ata_port *ap)
+{
+	if (ap->nr_pmp_links)
+		return ap->pmp_link;
+	return &ap->link;
+}
+
+static inline struct ata_link *ata_port_next_link(struct ata_link *link)
+{
+	struct ata_port *ap = link->ap;
+
+	if (link == &ap->link) {
+		if (!ap->nr_pmp_links)
+			return NULL;
+		return ap->pmp_link;
+	}
+
+	if (++link - ap->pmp_link < ap->nr_pmp_links)
+		return link;
+	return NULL;
+}
+
+#define __ata_port_for_each_link(lk, ap) \
+	for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk))
+
+#define ata_port_for_each_link(link, ap) \
+	for ((link) = ata_port_first_link(ap); (link); \
+	     (link) = ata_port_next_link(link))
+
+#define ata_link_for_each_dev(dev, link) \
+	for ((dev) = (link)->device; \
+	     (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \
+	     (dev)++)
+
+#define ata_link_for_each_dev_reverse(dev, link) \
+	for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \
+	     (dev) >= (link)->device || ((dev) = NULL); (dev)--)
 
 static inline u8 ata_chk_status(struct ata_port *ap)
 {
@@ -1088,9 +1286,11 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
 {
 	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
 
+#ifdef ATA_DEBUG
 	if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
-		DPRINTK("ATA: abnormal status 0x%X on port 0x%p\n",
-			status, ap->ioaddr.status_addr);
+		ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
+				status);
+#endif
 
 	return status;
 }
@@ -1127,7 +1327,7 @@ static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
 {
 	memset(tf, 0, sizeof(*tf));
 
-	tf->ctl = dev->ap->ctl;
+	tf->ctl = dev->link->ap->ctl;
 	if (dev->devno == 0)
 		tf->device = ATA_DEVICE_OBS;
 	else
@@ -1139,9 +1339,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
 	qc->dma_dir = DMA_NONE;
 	qc->__sg = NULL;
 	qc->flags = 0;
-	qc->cursg = qc->cursg_ofs = 0;
+	qc->cursg = NULL;
+	qc->cursg_ofs = 0;
 	qc->nbytes = qc->curbytes = 0;
 	qc->n_elem = 0;
+	qc->n_iter = 0;
 	qc->err_mask = 0;
 	qc->pad_len = 0;
 	qc->sect_size = ATA_SECT_SIZE;