Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > fc11cd6e1c513a17304da94a5390f3cd > files > 3414

kernel-2.6.18-194.11.1.el5.src.rpm

From: Rob Evers <revers@redhat.com>
Date: Tue, 20 Oct 2009 09:54:47 -0400
Subject: [scsi] htpiop: RocketRAID driver update v1.0 -> v1.6
Message-id: 20091020134820.25711.19185.sendpatchset@localhost.localdomain
O-Subject: [RHEL5.5 PATCH] htpiop: HighPoint RocketRAID driver update v1.0 -> v1.6
Bugzilla: 519076
RH-Acked-by: Tomas Henzl <thenzl@redhat.com>

https://bugzilla.redhat.com/show_bug.cgi?id=519076

Description:

    Update hptiop driver (HighPoint Systems Rocket Raid) from version
    v1.0 to v1.6.

    From bz519075 comment 11:

    This patch was made to support Marvell IOP and RR44xx adapters.
    We have test it on RHEL 5.4
    - add more PCI device IDs
    - support for adapters based on Marvell IOP
    - fix a result code translation error on big-endian systems
    - fix resource releasing bug when scsi_host_alloc() fail in hptiop_probe()
    - update scsi_cmnd.resid when finishing a request
    - correct some coding style issues
    - using PCI BAR[2] to access RR44xx IOP.
    - using PCI BAR[0] to check and clear RR44xx IRQ.

Upstream Status:

    Patch is confirmed in scsi-misc.

Brew build:

    https://brewweb.devel.redhat.com/taskinfo?taskID=2029151

Testing:

    Tested by HighPoint.  Additionally, ran dt overnight with new
    driver.  No data errors were detected.

diff --git a/Documentation/scsi/hptiop.txt b/Documentation/scsi/hptiop.txt
index d28a312..23ba567 100644
--- a/Documentation/scsi/hptiop.txt
+++ b/Documentation/scsi/hptiop.txt
@@ -1,9 +1,29 @@
-HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop)
+HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
 
 Controller Register Map
+
 -------------------------
 
-The controller IOP is accessed via PCI BAR0.
+For RR44xx Intel IOP based adapters, the controller IOP is accessed via PCI BAR0 and BAR2:
+
+     BAR0 offset    Register
+            0x11C5C Link Interface IRQ Set
+            0x11C60 Link Interface IRQ Clear
+
+     BAR2 offset    Register
+            0x10    Inbound Message Register 0
+            0x14    Inbound Message Register 1
+            0x18    Outbound Message Register 0
+            0x1C    Outbound Message Register 1
+            0x20    Inbound Doorbell Register
+            0x24    Inbound Interrupt Status Register
+            0x28    Inbound Interrupt Mask Register
+            0x30    Outbound Interrupt Status Register
+            0x34    Outbound Interrupt Mask Register
+            0x40    Inbound Queue Port
+            0x44    Outbound Queue Port
+
+For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
 
      BAR0 offset    Register
             0x10    Inbound Message Register 0
@@ -18,6 +38,24 @@ The controller IOP is accessed via PCI BAR0.
             0x40    Inbound Queue Port
             0x44    Outbound Queue Port
 
+For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
+
+     BAR0 offset    Register
+         0x20400    Inbound Doorbell Register
+         0x20404    Inbound Interrupt Mask Register
+         0x20408    Outbound Doorbell Register
+         0x2040C    Outbound Interrupt Mask Register
+
+     BAR1 offset    Register
+             0x0    Inbound Queue Head Pointer
+             0x4    Inbound Queue Tail Pointer
+             0x8    Outbound Queue Head Pointer
+             0xC    Outbound Queue Tail Pointer
+            0x10    Inbound Message Register
+            0x14    Outbound Message Register
+     0x40-0x1040    Inbound Queue
+   0x1040-0x2040    Outbound Queue
+
 
 I/O Request Workflow
 ----------------------
@@ -73,15 +111,9 @@ The driver exposes following sysfs attributes:
      driver-version        R     driver version string
      firmware-version      R     firmware version string
 
-The driver registers char device "hptiop" to communicate with HighPoint RAID
-management software. Its ioctl routine acts as a general binary interface 
-between the IOP firmware and HighPoint RAID management software. New management
-functions can be implemented in application/firmware without modification
-in driver code.
-
 
 -----------------------------------------------------------------------------
-Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
+Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
 
   This file is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 2d994fd..d330ed5 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
 /*
- * HighPoint RR3xxx controller driver for Linux
- * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
+ * HighPoint RR3xxx/4xxx controller driver for Linux
+ * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/spinlock.h>
-#include <linux/hdreg.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/div64.h>
@@ -38,87 +37,96 @@
 #include "hptiop.h"
 
 MODULE_AUTHOR("HighPoint Technologies, Inc.");
-MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
+MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
 
 static char driver_name[] = "hptiop";
-static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
-static const char driver_ver[] = "v1.0 (060426)";
-
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
-static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
+static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
+static const char driver_ver[] = "v1.6 (090910)";
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+				struct hpt_iop_request_scsi_command *req);
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
 
-static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
-{
-	readl(&iop->outbound_intstatus);
-}
-
-static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
+static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
 {
 	u32 req = 0;
 	int i;
 
 	for (i = 0; i < millisec; i++) {
-		req = readl(&iop->inbound_queue);
+		req = readl(&hba->u.itl.iop->inbound_queue);
 		if (req != IOPMU_QUEUE_EMPTY)
 			break;
 		msleep(1);
 	}
 
 	if (req != IOPMU_QUEUE_EMPTY) {
-		writel(req, &iop->outbound_queue);
-		hptiop_pci_posting_flush(iop);
+		writel(req, &hba->u.itl.iop->outbound_queue);
+		readl(&hba->u.itl.iop->outbound_intstatus);
 		return 0;
 	}
 
 	return -1;
 }
 
-static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
+static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
+{
+	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
+static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
-	if ((tag & IOPMU_QUEUE_MASK_HOST_BITS) == IOPMU_QUEUE_ADDR_HOST_BIT)
-		return hptiop_host_request_callback(hba,
+	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
+		hptiop_host_request_callback_itl(hba,
 				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
 	else
-		return hptiop_iop_request_callback(hba, tag);
+		hptiop_iop_request_callback_itl(hba, tag);
 }
 
-static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
+static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
 {
 	u32 req;
 
-	while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
+	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
+						IOPMU_QUEUE_EMPTY) {
 
 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
-			hptiop_request_callback(hba, req);
+			hptiop_request_callback_itl(hba, req);
 		else {
 			struct hpt_iop_request_header __iomem * p;
 
 			p = (struct hpt_iop_request_header __iomem *)
-				((char __iomem *)hba->iop + req);
+				((char __iomem *)hba->u.itl.iop + req);
 
 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
 				if (readl(&p->context))
-					hptiop_request_callback(hba, req);
+					hptiop_request_callback_itl(hba, req);
 				else
 					writel(1, &p->context);
 			}
 			else
-				hptiop_request_callback(hba, req);
+				hptiop_request_callback_itl(hba, req);
 		}
 	}
 }
 
-static int __iop_intr(struct hptiop_hba *hba)
+static int iop_intr_itl(struct hptiop_hba *hba)
 {
-	struct hpt_iopmu __iomem *iop = hba->iop;
+	struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
+	void __iomem *plx = hba->u.itl.plx;
 	u32 status;
 	int ret = 0;
 
+	if (plx && readl(plx + 0x11C5C) & 0xf)
+		writel(1, plx + 0x11C60);
+
 	status = readl(&iop->outbound_intstatus);
 
 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
 		u32 msg = readl(&iop->outbound_msgaddr0);
+
 		dprintk("received outbound msg %x\n", msg);
 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
 		hptiop_message_callback(hba, msg);
@@ -126,31 +134,115 @@ static int __iop_intr(struct hptiop_hba *hba)
 	}
 
 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
-		hptiop_drain_outbound_queue(hba);
+		hptiop_drain_outbound_queue_itl(hba);
 		ret = 1;
 	}
 
 	return ret;
 }
 
-static int iop_send_sync_request(struct hptiop_hba *hba,
+static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
+{
+	u32 outbound_tail = readl(&mu->outbound_tail);
+	u32 outbound_head = readl(&mu->outbound_head);
+
+	if (outbound_tail != outbound_head) {
+		u64 p;
+
+		memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
+		outbound_tail++;
+
+		if (outbound_tail == MVIOP_QUEUE_LEN)
+			outbound_tail = 0;
+		writel(outbound_tail, &mu->outbound_tail);
+		return p;
+	} else
+		return 0;
+}
+
+static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
+{
+	u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
+	u32 head = inbound_head + 1;
+
+	if (head == MVIOP_QUEUE_LEN)
+		head = 0;
+
+	memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
+	writel(head, &hba->u.mv.mu->inbound_head);
+	writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
+			&hba->u.mv.regs->inbound_doorbell);
+}
+
+static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
+{
+	u32 req_type = (tag >> 5) & 0x7;
+	struct hpt_iop_request_scsi_command *req;
+
+	dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
+
+	BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
+
+	switch (req_type) {
+	case IOP_REQUEST_TYPE_GET_CONFIG:
+	case IOP_REQUEST_TYPE_SET_CONFIG:
+		hba->msg_done = 1;
+		break;
+
+	case IOP_REQUEST_TYPE_SCSI_COMMAND:
+		req = hba->reqs[tag >> 8].req_virt;
+		if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
+			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+
+		hptiop_finish_scsi_req(hba, tag>>8, req);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int iop_intr_mv(struct hptiop_hba *hba)
+{
+	u32 status;
+	int ret = 0;
+
+	status = readl(&hba->u.mv.regs->outbound_doorbell);
+	writel(~status, &hba->u.mv.regs->outbound_doorbell);
+
+	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
+		u32 msg;
+		msg = readl(&hba->u.mv.mu->outbound_msg);
+		dprintk("received outbound msg %x\n", msg);
+		hptiop_message_callback(hba, msg);
+		ret = 1;
+	}
+
+	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
+		u64 tag;
+
+		while ((tag = mv_outbound_read(hba->u.mv.mu)))
+			hptiop_request_callback_mv(hba, tag);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static int iop_send_sync_request_itl(struct hptiop_hba *hba,
 					void __iomem *_req, u32 millisec)
 {
 	struct hpt_iop_request_header __iomem *req = _req;
 	u32 i;
 
-	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
-			&req->flags);
-
+	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
 	writel(0, &req->context);
-
-	writel((unsigned long)req - (unsigned long)hba->iop,
-			&hba->iop->inbound_queue);
-
-	hptiop_pci_posting_flush(hba->iop);
+	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
+			&hba->u.itl.iop->inbound_queue);
+	readl(&hba->u.itl.iop->outbound_intstatus);
 
 	for (i = 0; i < millisec; i++) {
-		__iop_intr(hba);
+		iop_intr_itl(hba);
 		if (readl(&req->context))
 			return 0;
 		msleep(1);
@@ -159,19 +251,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
 	return -1;
 }
 
-static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+static int iop_send_sync_request_mv(struct hptiop_hba *hba,
+					u32 size_bits, u32 millisec)
 {
+	struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
 	u32 i;
 
 	hba->msg_done = 0;
+	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+	mv_inbound_write(hba->u.mv.internal_req_phy |
+			MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
+
+	for (i = 0; i < millisec; i++) {
+		iop_intr_mv(hba);
+		if (hba->msg_done)
+			return 0;
+		msleep(1);
+	}
+	return -1;
+}
 
-	writel(msg, &hba->iop->inbound_msgaddr0);
+static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
+{
+	writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
+	readl(&hba->u.itl.iop->outbound_intstatus);
+}
 
-	hptiop_pci_posting_flush(hba->iop);
+static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
+{
+	writel(msg, &hba->u.mv.mu->inbound_msg);
+	writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
+	readl(&hba->u.mv.regs->inbound_doorbell);
+}
+
+static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
+{
+	u32 i;
+
+	hba->msg_done = 0;
+	hba->ops->post_msg(hba, msg);
 
 	for (i = 0; i < millisec; i++) {
 		spin_lock_irq(hba->host->host_lock);
-		__iop_intr(hba);
+		hba->ops->iop_intr(hba);
 		spin_unlock_irq(hba->host->host_lock);
 		if (hba->msg_done)
 			break;
@@ -181,46 +303,68 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
 	return hba->msg_done? 0 : -1;
 }
 
-static int iop_get_config(struct hptiop_hba *hba,
+static int iop_get_config_itl(struct hptiop_hba *hba,
 				struct hpt_iop_request_get_config *config)
 {
 	u32 req32;
 	struct hpt_iop_request_get_config __iomem *req;
 
-	req32 = readl(&hba->iop->inbound_queue);
+	req32 = readl(&hba->u.itl.iop->inbound_queue);
 	if (req32 == IOPMU_QUEUE_EMPTY)
 		return -1;
 
 	req = (struct hpt_iop_request_get_config __iomem *)
-			((unsigned long)hba->iop + req32);
+			((unsigned long)hba->u.itl.iop + req32);
 
 	writel(0, &req->header.flags);
 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 
-	if (iop_send_sync_request(hba, req, 20000)) {
+	if (iop_send_sync_request_itl(hba, req, 20000)) {
 		dprintk("Get config send cmd failed\n");
 		return -1;
 	}
 
 	memcpy_fromio(config, req, sizeof(*config));
-	writel(req32, &hba->iop->outbound_queue);
+	writel(req32, &hba->u.itl.iop->outbound_queue);
+	return 0;
+}
+
+static int iop_get_config_mv(struct hptiop_hba *hba,
+				struct hpt_iop_request_get_config *config)
+{
+	struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
+
+	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
+	req->header.size =
+		cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
+	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
+	req->header.context_hi32 = 0;
+
+	if (iop_send_sync_request_mv(hba, 0, 20000)) {
+		dprintk("Get config send cmd failed\n");
+		return -1;
+	}
+
+	memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
 	return 0;
 }
 
-static int iop_set_config(struct hptiop_hba *hba,
+static int iop_set_config_itl(struct hptiop_hba *hba,
 				struct hpt_iop_request_set_config *config)
 {
 	u32 req32;
 	struct hpt_iop_request_set_config __iomem *req;
 
-	req32 = readl(&hba->iop->inbound_queue);
+	req32 = readl(&hba->u.itl.iop->inbound_queue);
 	if (req32 == IOPMU_QUEUE_EMPTY)
 		return -1;
 
 	req = (struct hpt_iop_request_set_config __iomem *)
-			((unsigned long)hba->iop + req32);
+			((unsigned long)hba->u.itl.iop + req32);
 
 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
 		(u8 *)config + sizeof(struct hpt_iop_request_header),
@@ -232,22 +376,53 @@ static int iop_set_config(struct hptiop_hba *hba,
 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
 	writel(IOP_RESULT_PENDING, &req->header.result);
 
-	if (iop_send_sync_request(hba, req, 20000)) {
+	if (iop_send_sync_request_itl(hba, req, 20000)) {
 		dprintk("Set config send cmd failed\n");
 		return -1;
 	}
 
-	writel(req32, &hba->iop->outbound_queue);
+	writel(req32, &hba->u.itl.iop->outbound_queue);
 	return 0;
 }
 
-static int hptiop_initialize_iop(struct hptiop_hba *hba)
+static int iop_set_config_mv(struct hptiop_hba *hba,
+				struct hpt_iop_request_set_config *config)
 {
-	struct hpt_iopmu __iomem *iop = hba->iop;
+	struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
 
-	/* enable interrupts */
+	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+	req->header.size =
+		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+	req->header.context_hi32 = 0;
+
+	if (iop_send_sync_request_mv(hba, 0, 20000)) {
+		dprintk("Set config send cmd failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
+{
 	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
-			&iop->outbound_intmask);
+		&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
+{
+	writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
+		&hba->u.mv.regs->outbound_intmask);
+}
+
+static int hptiop_initialize_iop(struct hptiop_hba *hba)
+{
+	/* enable interrupts */
+	hba->ops->enable_intr(hba);
 
 	hba->initialized = 1;
 
@@ -261,37 +436,84 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
 	return 0;
 }
 
-static int hptiop_map_pci_bar(struct hptiop_hba *hba)
+static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
 {
 	u32 mem_base_phy, length;
 	void __iomem *mem_base_virt;
+
 	struct pci_dev *pcidev = hba->pcidev;
 
-	if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
+
+	if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
 		printk(KERN_ERR "scsi%d: pci resource invalid\n",
 				hba->host->host_no);
-		return -1;
+		return NULL;
 	}
 
-	mem_base_phy = pci_resource_start(pcidev, 0);
-	length = pci_resource_len(pcidev, 0);
+	mem_base_phy = pci_resource_start(pcidev, index);
+	length = pci_resource_len(pcidev, index);
 	mem_base_virt = ioremap(mem_base_phy, length);
 
 	if (!mem_base_virt) {
 		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
 				hba->host->host_no);
+		return NULL;
+	}
+	return mem_base_virt;
+}
+
+static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
+{
+	struct pci_dev *pcidev = hba->pcidev;
+	hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
+	if (hba->u.itl.iop == NULL)
+		return -1;
+	if ((pcidev->device & 0xff00) == 0x4400) {
+		hba->u.itl.plx = hba->u.itl.iop;
+		hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
+		if (hba->u.itl.iop == NULL) {
+			iounmap(hba->u.itl.plx);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
+{
+	if (hba->u.itl.plx)
+		iounmap(hba->u.itl.plx);
+	iounmap(hba->u.itl.iop);
+}
+
+static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
+{
+	hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
+	if (hba->u.mv.regs == NULL)
+		return -1;
+
+	hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
+	if (hba->u.mv.mu == NULL) {
+		iounmap(hba->u.mv.regs);
 		return -1;
 	}
 
-	hba->iop = mem_base_virt;
-	dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
 	return 0;
 }
 
+static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
+{
+	iounmap(hba->u.mv.regs);
+	iounmap(hba->u.mv.mu);
+}
+
 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 {
 	dprintk("iop message 0x%x\n", msg);
 
+	if (msg == IOPMU_INBOUND_MSG0_NOP)
+		hba->msg_done = 1;
+
 	if (!hba->initialized)
 		return;
 
@@ -303,7 +525,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
 		hba->msg_done = 1;
 }
 
-static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
+static struct hptiop_request *get_req(struct hptiop_hba *hba)
 {
 	struct hptiop_request *ret;
 
@@ -316,20 +538,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
 	return ret;
 }
 
-static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
+static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
 {
 	dprintk("free_req(%d, %p)\n", req->index, req);
 	req->next = hba->req_list;
 	hba->req_list = req;
 }
 
-static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
+static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
+				struct hpt_iop_request_scsi_command *req)
 {
-	struct hpt_iop_request_scsi_command *req;
 	struct scsi_cmnd *scp;
 
-	req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt;
-	dprintk("hptiop_host_request_callback: req=%p, type=%d, "
+	dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
 			"result=%d, context=0x%x tag=%d\n",
 			req, req->header.type, req->header.result,
 			req->header.context, tag);
@@ -339,25 +560,13 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
 
 	scp = hba->reqs[tag].scp;
 
-	if (HPT_SCP(scp)->mapped) {
-		if (scp->use_sg)
-			pci_unmap_sg(hba->pcidev,
-				(struct scatterlist *)scp->request_buffer,
-				scp->use_sg,
-				scp->sc_data_direction
-			);
-		else
-			pci_unmap_single(hba->pcidev,
-				HPT_SCP(scp)->dma_handle,
-				scp->request_bufflen,
-				scp->sc_data_direction
-			);
-	}
+	if (HPT_SCP(scp)->mapped)
+		scsi_dma_unmap(scp);
 
 	switch (le32_to_cpu(req->header.result)) {
 	case IOP_RESULT_SUCCESS:
-		scp->resid = scp->request_bufflen -
-				le32_to_cpu(req->dataxfer_length);
+		scsi_set_resid(scp,
+			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = (DID_OK<<16);
 		break;
 	case IOP_RESULT_BAD_TARGET:
@@ -375,18 +584,17 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
 	case IOP_RESULT_INVALID_REQUEST:
 		scp->result = (DID_ABORT<<16);
 		break;
-	case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
+	case IOP_RESULT_CHECK_CONDITION:
+		scsi_set_resid(scp,
+			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = SAM_STAT_CHECK_CONDITION;
-		memset(&scp->sense_buffer,
-				0, sizeof(scp->sense_buffer));
-		memcpy(&scp->sense_buffer, &req->sg_list,
-				min_t(size_t, sizeof(scp->sense_buffer),
+		memcpy(scp->sense_buffer, &req->sg_list,
+				min_t(size_t, SCSI_SENSE_BUFFERSIZE,
 					le32_to_cpu(req->dataxfer_length)));
 		break;
 
 	default:
-		scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
-					(DID_ABORT<<16);
+		scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
 		break;
 	}
 
@@ -395,15 +603,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
 	free_req(hba, &hba->reqs[tag]);
 }
 
-void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
+static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
+{
+	struct hpt_iop_request_scsi_command *req;
+	u32 tag;
+
+	if (hba->iopintf_v2) {
+		tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
+		req = hba->reqs[tag].req_virt;
+		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
+	} else {
+		tag = _tag;
+		req = hba->reqs[tag].req_virt;
+	}
+
+	hptiop_finish_scsi_req(hba, tag, req);
+}
+
+void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
 {
 	struct hpt_iop_request_header __iomem *req;
 	struct hpt_iop_request_ioctl_command __iomem *p;
 	struct hpt_ioctl_k *arg;
 
 	req = (struct hpt_iop_request_header __iomem *)
-			((unsigned long)hba->iop + tag);
-	dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
+			((unsigned long)hba->u.itl.iop + tag);
+	dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
 			"result=%d, context=0x%x tag=%d\n",
 			req, readl(&req->type), readl(&req->result),
 			readl(&req->context), tag);
@@ -431,7 +657,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
 		arg->result = HPT_IOCTL_RESULT_FAILED;
 
 	arg->done(arg);
-	writel(tag, &hba->iop->outbound_queue);
+	writel(tag, &hba->u.itl.iop->outbound_queue);
 }
 
 static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -441,7 +667,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs)
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	handled = __iop_intr(hba);
+	handled = hba->ops->iop_intr(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	return handled;
@@ -451,43 +677,77 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
 {
 	struct Scsi_Host *host = scp->device->host;
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
-	struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
-
-	/*
-	 * though we'll not get non-use_sg fields anymore,
-	 * keep use_sg checking anyway
-	 */
-	if (scp->use_sg) {
-		int idx;
-
-		HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
-				sglist, scp->use_sg,
-				scp->sc_data_direction);
-		HPT_SCP(scp)->mapped = 1;
-		BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
-
-		for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
-			psg[idx].pci_address =
-				cpu_to_le64(sg_dma_address(&sglist[idx]));
-			psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
-			psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
-				cpu_to_le32(1) : 0;
-		}
+	struct scatterlist *sg = (struct scatterlist *)scp->request_buffer;
+	int idx, nseg;
 
-		return HPT_SCP(scp)->sgcnt;
-	} else {
-		HPT_SCP(scp)->dma_handle = pci_map_single(
-				hba->pcidev,
-				scp->request_buffer,
-				scp->request_bufflen,
-				scp->sc_data_direction
-			);
-		HPT_SCP(scp)->mapped = 1;
-		psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
-		psg->size = cpu_to_le32(scp->request_bufflen);
-		psg->eot = cpu_to_le32(1);
-		return 1;
+	nseg = scsi_dma_map(scp);
+	BUG_ON(nseg < 0);
+	if (!nseg)
+		return 0;
+
+	HPT_SCP(scp)->sgcnt = nseg;
+	HPT_SCP(scp)->mapped = 1;
+
+	BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
+
+	for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
+		psg[idx].pci_address = cpu_to_le64(sg_dma_address(&sg[idx]));
+		psg[idx].size = cpu_to_le32(sg_dma_len(&sg[idx]));
+		psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
+			cpu_to_le32(1) : 0;
 	}
+	return HPT_SCP(scp)->sgcnt;
+}
+
+static void hptiop_post_req_itl(struct hptiop_hba *hba,
+				struct hptiop_request *_req)
+{
+	struct hpt_iop_request_header *reqhdr = _req->req_virt;
+
+	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+				      (u32)_req->index);
+	reqhdr->context_hi32 = 0;
+
+	if (hba->iopintf_v2) {
+		u32 size, size_bits;
+
+		size = le32_to_cpu(reqhdr->size);
+		if (size < 256)
+			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
+		else if (size < 512)
+			size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
+		else
+			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
+						IOPMU_QUEUE_ADDR_HOST_BIT;
+		writel(_req->req_shifted_phy | size_bits,
+			&hba->u.itl.iop->inbound_queue);
+	} else
+		writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
+					&hba->u.itl.iop->inbound_queue);
+}
+
+static void hptiop_post_req_mv(struct hptiop_hba *hba,
+			       struct hptiop_request *_req)
+{
+	struct hpt_iop_request_header *reqhdr = _req->req_virt;
+	u32 size, size_bit;
+
+	reqhdr->context = cpu_to_le32(_req->index<<8 |
+					IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
+	reqhdr->context_hi32 = 0;
+	size = le32_to_cpu(reqhdr->size);
+
+	if (size <= 256)
+		size_bit = 0;
+	else if (size <= 256*2)
+		size_bit = 1;
+	else if (size <= 256*3)
+		size_bit = 2;
+	else
+		size_bit = 3;
+
+	mv_inbound_write((_req->req_shifted_phy << 5) |
+		MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
 }
 
 static int hptiop_queuecommand(struct scsi_cmnd *scp,
@@ -515,9 +775,9 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
 			scp,
 			host->host_no, scp->device->channel,
 			scp->device->id, scp->device->lun,
-			*((u32 *)&scp->cmnd),
-			*((u32 *)&scp->cmnd + 1),
-			*((u32 *)&scp->cmnd + 2),
+			((u32 *)scp->cmnd)[0],
+			((u32 *)scp->cmnd)[1],
+			((u32 *)scp->cmnd)[2],
 			_req->index, _req->req_virt);
 
 	scp->result = 0;
@@ -529,21 +789,17 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
 		goto cmd_done;
 	}
 
-	req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
+	req = _req->req_virt;
 
 	/* build S/G table */
-	if (scp->request_bufflen)
-		sg_count = hptiop_buildsgl(scp, req->sg_list);
-	else
+	sg_count = hptiop_buildsgl(scp, req->sg_list);
+	if (!sg_count)
 		HPT_SCP(scp)->mapped = 0;
 
 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
-	req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
-							(u32)_req->index);
-	req->header.context_hi32 = 0;
-	req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
+	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
 	req->channel = scp->device->channel;
 	req->target = scp->device->id;
 	req->lun = scp->device->lun;
@@ -553,10 +809,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
 				 + sg_count * sizeof(struct hpt_iopsg));
 
 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
-
-	writel(IOPMU_QUEUE_ADDR_HOST_BIT | _req->req_shifted_phy,
-			&hba->iop->inbound_queue);
-
+	hba->ops->post_req(hba, _req);
 	return 0;
 
 cmd_done:
@@ -574,9 +827,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
 {
 	if (atomic_xchg(&hba->resetting, 1) == 0) {
 		atomic_inc(&hba->reset_count);
-		writel(IOPMU_INBOUND_MSG0_RESET,
-				&hba->iop->inbound_msgaddr0);
-		hptiop_pci_posting_flush(hba->iop);
+		hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
 	}
 
 	wait_event_timeout(hba->reset_wq,
@@ -612,20 +863,22 @@ static int hptiop_reset(struct scsi_cmnd *scp)
 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
 						int queue_depth)
 {
-	if(queue_depth > 256)
-		queue_depth = 256;
+	struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
+
+	if (queue_depth > hba->max_requests)
+		queue_depth = hba->max_requests;
 	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
 	return queue_depth;
 }
 
-static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
+static ssize_t hptiop_show_version(struct class_device *dev, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
 }
 
-static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
+static ssize_t hptiop_show_fw_version(struct class_device *dev, char *buf)
 {
-	struct Scsi_Host *host = class_to_shost(class_dev);
+	struct Scsi_Host *host = class_to_shost(dev);
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
 
 	return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
@@ -664,7 +917,6 @@ static struct scsi_host_template driver_template = {
 	.eh_device_reset_handler    = hptiop_reset,
 	.eh_bus_reset_handler       = hptiop_reset,
 	.info                       = hptiop_info,
-	.unchecked_isa_dma          = 0,
 	.emulated                   = 0,
 	.use_clustering             = ENABLE_CLUSTERING,
 	.proc_name                  = driver_name,
@@ -673,6 +925,26 @@ static struct scsi_host_template driver_template = {
 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
 };
 
+static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
+{
+	hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
+			0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
+	if (hba->u.mv.internal_req)
+		return 0;
+	else
+		return -1;
+}
+
+static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
+{
+	if (hba->u.mv.internal_req) {
+		dma_free_coherent(&hba->pcidev->dev, 0x800,
+			hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
+		return 0;
+	} else
+		return -1;
+}
+
 static int __devinit hptiop_probe(struct pci_dev *pcidev,
 					const struct pci_device_id *id)
 {
@@ -698,8 +970,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	pci_set_master(pcidev);
 
 	/* Enable 64bit DMA if possible */
-	if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
-		if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
+	if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+		if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
 			printk(KERN_ERR "hptiop: fail to set dma_mask\n");
 			goto disable_pci_device;
 		}
@@ -718,9 +990,11 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
 	hba = (struct hptiop_hba *)host->hostdata;
 
+	hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
 	hba->pcidev = pcidev;
 	hba->host = host;
 	hba->initialized = 0;
+	hba->iopintf_v2 = 0;
 
 	atomic_set(&hba->resetting, 0);
 	atomic_set(&hba->reset_count, 0);
@@ -734,16 +1008,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	host->n_io_port = 0;
 	host->irq = pcidev->irq;
 
-	if (hptiop_map_pci_bar(hba))
+	if (hba->ops->map_pci_bar(hba))
 		goto free_scsi_host;
 
-	if (iop_wait_ready(hba->iop, 20000)) {
+	if (hba->ops->iop_wait_ready(hba, 20000)) {
 		printk(KERN_ERR "scsi%d: firmware not ready\n",
 				hba->host->host_no);
 		goto unmap_pci_bar;
 	}
 
-	if (iop_get_config(hba, &iop_config)) {
+	if (hba->ops->internal_memalloc) {
+		if (hba->ops->internal_memalloc(hba)) {
+			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+				hba->host->host_no);
+			goto unmap_pci_bar;
+		}
+	}
+
+	if (hba->ops->get_config(hba, &iop_config)) {
 		printk(KERN_ERR "scsi%d: get config failed\n",
 				hba->host->host_no);
 		goto unmap_pci_bar;
@@ -755,8 +1037,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	hba->max_request_size = le32_to_cpu(iop_config.request_size);
 	hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
 	hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
+	hba->interface_version = le32_to_cpu(iop_config.interface_version);
 	hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
 
+	if (hba->firmware_version > 0x01020000 ||
+			hba->interface_version > 0x01020000)
+		hba->iopintf_v2 = 1;
+
 	host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
 	host->max_id = le32_to_cpu(iop_config.max_devices);
 	host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
@@ -764,10 +1051,17 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
 	host->max_cmd_len = 16;
 
-	set_config.vbus_id = cpu_to_le32(host->host_no);
+	req_size = sizeof(struct hpt_iop_request_scsi_command)
+		+ sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+	if ((req_size & 0x1f) != 0)
+		req_size = (req_size + 0x1f) & ~0x1f;
+
+	memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
 	set_config.iop_id = cpu_to_le32(host->host_no);
+	set_config.vbus_id = cpu_to_le16(host->host_no);
+	set_config.max_host_request_size = cpu_to_le16(req_size);
 
-	if (iop_set_config(hba, &set_config)) {
+	if (hba->ops->set_config(hba, &set_config)) {
 		printk(KERN_ERR "scsi%d: set config failed\n",
 				hba->host->host_no);
 		goto unmap_pci_bar;
@@ -783,10 +1077,6 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 	}
 
 	/* Allocate request mem */
-	req_size = sizeof(struct hpt_iop_request_scsi_command)
-		+ sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
-	if ((req_size& 0x1f) != 0)
-		req_size = (req_size + 0x1f) & ~0x1f;
 
 	dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
 
@@ -840,14 +1130,17 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
 
 free_request_mem:
 	dma_free_coherent(&hba->pcidev->dev,
-			hba->req_size*hba->max_requests + 0x20,
+			hba->req_size * hba->max_requests + 0x20,
 			hba->dma_coherent, hba->dma_coherent_handle);
 
 free_request_irq:
 	free_irq(hba->pcidev->irq, hba);
 
 unmap_pci_bar:
-	iounmap(hba->iop);
+	if (hba->ops->internal_memfree)
+		hba->ops->internal_memfree(hba);
+
+	hba->ops->unmap_pci_bar(hba);
 
 free_scsi_host:
 	scsi_host_put(host);
@@ -866,8 +1159,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
 {
 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
-	struct hpt_iopmu __iomem *iop = hba->iop;
-	u32    int_mask;
 
 	dprintk("hptiop_shutdown(%p)\n", hba);
 
@@ -877,11 +1168,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
 					hba->host->host_no);
 
 	/* disable all outbound interrupts */
-	int_mask = readl(&iop->outbound_intmask);
+	hba->ops->disable_intr(hba);
+}
+
+static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
+{
+	u32 int_mask;
+
+	int_mask = readl(&hba->u.itl.iop->outbound_intmask);
 	writel(int_mask |
 		IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
-		&iop->outbound_intmask);
-	hptiop_pci_posting_flush(iop);
+		&hba->u.itl.iop->outbound_intmask);
+	readl(&hba->u.itl.iop->outbound_intmask);
+}
+
+static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
+{
+	writel(0, &hba->u.mv.regs->outbound_intmask);
+	readl(&hba->u.mv.regs->outbound_intmask);
 }
 
 static void hptiop_remove(struct pci_dev *pcidev)
@@ -902,7 +1206,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
 			hba->dma_coherent,
 			hba->dma_coherent_handle);
 
-	iounmap(hba->iop);
+	if (hba->ops->internal_memfree)
+		hba->ops->internal_memfree(hba);
+
+	hba->ops->unmap_pci_bar(hba);
 
 	pci_release_regions(hba->pcidev);
 	pci_set_drvdata(hba->pcidev, NULL);
@@ -911,17 +1218,59 @@ static void hptiop_remove(struct pci_dev *pcidev)
 	scsi_host_put(host);
 }
 
+static struct hptiop_adapter_ops hptiop_itl_ops = {
+	.iop_wait_ready    = iop_wait_ready_itl,
+	.internal_memalloc = NULL,
+	.internal_memfree  = NULL,
+	.map_pci_bar       = hptiop_map_pci_bar_itl,
+	.unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
+	.enable_intr       = hptiop_enable_intr_itl,
+	.disable_intr      = hptiop_disable_intr_itl,
+	.get_config        = iop_get_config_itl,
+	.set_config        = iop_set_config_itl,
+	.iop_intr          = iop_intr_itl,
+	.post_msg          = hptiop_post_msg_itl,
+	.post_req          = hptiop_post_req_itl,
+};
+
+static struct hptiop_adapter_ops hptiop_mv_ops = {
+	.iop_wait_ready    = iop_wait_ready_mv,
+	.internal_memalloc = hptiop_internal_memalloc_mv,
+	.internal_memfree  = hptiop_internal_memfree_mv,
+	.map_pci_bar       = hptiop_map_pci_bar_mv,
+	.unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
+	.enable_intr       = hptiop_enable_intr_mv,
+	.disable_intr      = hptiop_disable_intr_mv,
+	.get_config        = iop_get_config_mv,
+	.set_config        = iop_set_config_mv,
+	.iop_intr          = iop_intr_mv,
+	.post_msg          = hptiop_post_msg_mv,
+	.post_req          = hptiop_post_req_mv,
+};
+
 static struct pci_device_id hptiop_id_table[] = {
-	{ PCI_DEVICE(0x1103, 0x3220) },
-	{ PCI_DEVICE(0x1103, 0x3320) },
-	{ PCI_DEVICE(0x1103, 0x3520) },
-	{ PCI_DEVICE(0x1103, 0x4320) },
-	{ PCI_DEVICE(0x1103, 0x3510) },
-	{ PCI_DEVICE(0x1103, 0x3511) },
-	{ PCI_DEVICE(0x1103, 0x3521) },
-	{ PCI_DEVICE(0x1103, 0x3522) },
-	{ PCI_DEVICE(0x1103, 0x3410) },
-	{ PCI_DEVICE(0x1103, 0x3540) },
+	{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
+	{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
+	{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
+	{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
 	{},
 };
 
@@ -951,3 +1300,4 @@ module_init(hptiop_module_init);
 module_exit(hptiop_module_exit);
 
 MODULE_LICENSE("GPL");
+
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index f04f7e8..135327f 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
 /*
- * HighPoint RR3xxx controller driver for Linux
- * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
+ * HighPoint RR3xxx/4xxx controller driver for Linux
+ * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -18,221 +18,7 @@
 #ifndef _HPTIOP_H_
 #define _HPTIOP_H_
 
-/*
- * logical device type.
- * Identify array (logical device) and physical device.
- */
-#define LDT_ARRAY   1
-#define LDT_DEVICE  2
-
-/*
- * Array types
- */
-#define AT_UNKNOWN  0
-#define AT_RAID0    1
-#define AT_RAID1    2
-#define AT_RAID5    3
-#define AT_RAID6    4
-#define AT_JBOD     7
-
-#define MAX_NAME_LENGTH     36
-#define MAX_ARRAYNAME_LEN   16
-
-#define MAX_ARRAY_MEMBERS_V1 8
-#define MAX_ARRAY_MEMBERS_V2 16
-
-/* keep definition for source code compatiblity */
-#define MAX_ARRAY_MEMBERS MAX_ARRAY_MEMBERS_V1
-
-/*
- * array flags
- */
-#define ARRAY_FLAG_DISABLED         0x00000001 /* The array is disabled */
-#define ARRAY_FLAG_NEEDBUILDING     0x00000002 /* need to be rebuilt */
-#define ARRAY_FLAG_REBUILDING       0x00000004 /* in rebuilding process */
-#define ARRAY_FLAG_BROKEN           0x00000008 /* broken but still working */
-#define ARRAY_FLAG_BOOTDISK         0x00000010 /* has a active partition */
-#define ARRAY_FLAG_BOOTMARK         0x00000040 /* array has boot mark set */
-#define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */
-#define ARRAY_FLAG_VERIFYING        0x00000100 /* is being verified */
-#define ARRAY_FLAG_INITIALIZING     0x00000200 /* is being initialized */
-#define ARRAY_FLAG_TRANSFORMING     0x00000400 /* tranform in progress */
-#define ARRAY_FLAG_NEEDTRANSFORM    0x00000800 /* array need tranform */
-#define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* initialization not done */
-#define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant */
-
-/*
- * device flags
- */
-#define DEVICE_FLAG_DISABLED        0x00000001 /* device is disabled */
-#define DEVICE_FLAG_UNINITIALIZED   0x00010000 /* device is not initialized */
-#define DEVICE_FLAG_LEGACY          0x00020000 /* lagacy drive */
-#define DEVICE_FLAG_IS_SPARE        0x80000000 /* is a spare disk */
-
-/*
- * ioctl codes
- */
-#define HPT_CTL_CODE(x) (x+0xFF00)
-#define HPT_CTL_CODE_LINUX_TO_IOP(x) ((x)-0xff00)
-
-#define HPT_IOCTL_GET_CONTROLLER_INFO       HPT_CTL_CODE(2)
-#define HPT_IOCTL_GET_CHANNEL_INFO          HPT_CTL_CODE(3)
-#define HPT_IOCTL_GET_LOGICAL_DEVICES       HPT_CTL_CODE(4)
-#define HPT_IOCTL_GET_DRIVER_CAPABILITIES   HPT_CTL_CODE(19)
-#define HPT_IOCTL_GET_DEVICE_INFO_V3        HPT_CTL_CODE(46)
-#define HPT_IOCTL_GET_CONTROLLER_INFO_V2    HPT_CTL_CODE(47)
-
-/*
- * Controller information.
- */
-struct hpt_controller_info {
-	u8      chip_type;                    /* chip type */
-	u8      interrupt_level;              /* IRQ level */
-	u8      num_buses;                    /* bus count */
-	u8      chip_flags;
-
-	u8      product_id[MAX_NAME_LENGTH];/* product name */
-	u8      vendor_id[MAX_NAME_LENGTH]; /* vendor name */
-}
-__attribute__((packed));
-
-/*
- * Channel information.
- */
-struct hpt_channel_info {
-	__le32  io_port;         /* IDE Base Port Address */
-	__le32  control_port;    /* IDE Control Port Address */
-	__le32  devices[2];      /* device connected to this channel */
-}
-__attribute__((packed));
-
-/*
- * Array information.
- */
-struct hpt_array_info_v3 {
-	u8      name[MAX_ARRAYNAME_LEN]; /* array name */
-	u8      description[64];         /* array description */
-	u8      create_manager[16];      /* who created it */
-	__le32  create_time;             /* when created it */
-
-	u8      array_type;              /* array type */
-	u8      block_size_shift;        /* stripe size */
-	u8      ndisk;                   /* Number of ID in Members[] */
-	u8      reserved;
-
-	__le32  flags;                   /* working flags, see ARRAY_FLAG_XXX */
-	__le32  members[MAX_ARRAY_MEMBERS_V2];  /* member array/disks */
-
-	__le32  rebuilding_progress;
-	__le64  rebuilt_sectors; /* rebuilding point (LBA) for single member */
-
-	__le32  transform_source;
-	__le32  transform_target;    /* destination device ID */
-	__le32  transforming_progress;
-	__le32  signature;              /* persistent identification*/
-	__le16  critical_members;       /* bit mask of critical members */
-	__le16  reserve2;
-	__le32  reserve;
-}
-__attribute__((packed));
-
-/*
- * physical device information.
- */
-#define MAX_PARENTS_PER_DISK    8
-
-struct hpt_device_info_v2 {
-	u8   ctlr_id;             /* controller id */
-	u8   path_id;             /* bus */
-	u8   target_id;           /* id */
-	u8   device_mode_setting; /* Current Data Transfer mode: 0-4 PIO0-4 */
-				  /* 5-7 MW DMA0-2, 8-13 UDMA0-5 */
-	u8   device_type;         /* device type */
-	u8   usable_mode;         /* highest usable mode */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	u8   NCQ_enabled: 1;
-	u8   NCQ_supported: 1;
-	u8   TCQ_enabled: 1;
-	u8   TCQ_supported: 1;
-	u8   write_cache_enabled: 1;
-	u8   write_cache_supported: 1;
-	u8   read_ahead_enabled: 1;
-	u8   read_ahead_supported: 1;
-	u8   reserved6: 6;
-	u8   spin_up_mode: 2;
-#else
-	u8   read_ahead_supported: 1;
-	u8   read_ahead_enabled: 1;
-	u8   write_cache_supported: 1;
-	u8   write_cache_enabled: 1;
-	u8   TCQ_supported: 1;
-	u8   TCQ_enabled: 1;
-	u8   NCQ_supported: 1;
-	u8   NCQ_enabled: 1;
-	u8   spin_up_mode: 2;
-	u8   reserved6: 6;
-#endif
-
-	__le32  flags;         /* working flags, see DEVICE_FLAG_XXX */
-	u8      ident[150];    /* (partitial) Identify Data of this device */
-
-	__le64  total_free;
-	__le64  max_free;
-	__le64  bad_sectors;
-	__le32  parent_arrays[MAX_PARENTS_PER_DISK];
-}
-__attribute__((packed));
-
-/*
- * Logical device information.
- */
-#define INVALID_TARGET_ID   0xFF
-#define INVALID_BUS_ID      0xFF
-
-struct hpt_logical_device_info_v3 {
-	u8       type;                   /* LDT_ARRAY or LDT_DEVICE */
-	u8       cache_policy;           /* refer to CACHE_POLICY_xxx */
-	u8       vbus_id;                /* vbus sequence in vbus_list */
-	u8       target_id;              /* OS target id. 0xFF is invalid */
-					 /* OS name: DISK $VBusId_$TargetId */
-	__le64   capacity;               /* array capacity */
-	__le32   parent_array;           /* don't use this field for physical
-					    device. use ParentArrays field in
-					    hpt_device_info_v2 */
-	/* reserved statistic fields */
-	__le32   stat1;
-	__le32   stat2;
-	__le32   stat3;
-	__le32   stat4;
-
-	union {
-		struct hpt_array_info_v3 array;
-		struct hpt_device_info_v2 device;
-	} __attribute__((packed)) u;
-
-}
-__attribute__((packed));
-
-/*
- * ioctl structure
- */
-#define HPT_IOCTL_MAGIC   0xA1B2C3D4
-
-struct hpt_ioctl_u {
-	u32   magic;            /* used to check if it's a valid ioctl packet */
-	u32   ioctl_code;       /* operation control code */
-	void __user *inbuf;     /* input data buffer */
-	u32   inbuf_size;       /* size of input data buffer */
-	void __user *outbuf;    /* output data buffer */
-	u32   outbuf_size;      /* size of output data buffer */
-	void __user *bytes_returned;   /* count of bytes returned */
-}
-__attribute__((packed));
-
-
-struct hpt_iopmu
-{
+struct hpt_iopmu_itl {
 	__le32 resrved0[4];
 	__le32 inbound_msgaddr0;
 	__le32 inbound_msgaddr1;
@@ -252,6 +38,8 @@ struct hpt_iopmu
 #define IOPMU_QUEUE_EMPTY            0xffffffff
 #define IOPMU_QUEUE_MASK_HOST_BITS   0xf0000000
 #define IOPMU_QUEUE_ADDR_HOST_BIT    0x80000000
+#define IOPMU_QUEUE_REQUEST_SIZE_BIT    0x40000000
+#define IOPMU_QUEUE_REQUEST_RESULT_BIT   0x40000000
 
 #define IOPMU_OUTBOUND_INT_MSG0      1
 #define IOPMU_OUTBOUND_INT_MSG1      2
@@ -265,6 +53,40 @@ struct hpt_iopmu
 #define IOPMU_INBOUND_INT_ERROR      8
 #define IOPMU_INBOUND_INT_POSTQUEUE  0x10
 
+#define MVIOP_QUEUE_LEN  512
+
+struct hpt_iopmu_mv {
+	__le32 inbound_head;
+	__le32 inbound_tail;
+	__le32 outbound_head;
+	__le32 outbound_tail;
+	__le32 inbound_msg;
+	__le32 outbound_msg;
+	__le32 reserve[10];
+	__le64 inbound_q[MVIOP_QUEUE_LEN];
+	__le64 outbound_q[MVIOP_QUEUE_LEN];
+};
+
+struct hpt_iopmv_regs {
+	__le32 reserved[0x20400 / 4];
+	__le32 inbound_doorbell;
+	__le32 inbound_intmask;
+	__le32 outbound_doorbell;
+	__le32 outbound_intmask;
+};
+
+#define MVIOP_MU_QUEUE_ADDR_HOST_MASK   (~(0x1full))
+#define MVIOP_MU_QUEUE_ADDR_HOST_BIT    4
+
+#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32  0xffffffff
+#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT   1
+#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
+
+#define MVIOP_MU_INBOUND_INT_MSG        1
+#define MVIOP_MU_INBOUND_INT_POSTQUEUE  2
+#define MVIOP_MU_OUTBOUND_INT_MSG       1
+#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+
 enum hpt_iopmu_message {
 	/* host-to-iop messages */
 	IOPMU_INBOUND_MSG0_NOP = 0,
@@ -283,8 +105,7 @@ enum hpt_iopmu_message {
 	IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
 };
 
-struct hpt_iop_request_header
-{
+struct hpt_iop_request_header {
 	__le32 size;
 	__le32 type;
 	__le32 flags;
@@ -315,11 +136,10 @@ enum hpt_iop_result_type {
 	IOP_RESULT_RESET,
 	IOP_RESULT_INVALID_REQUEST,
 	IOP_RESULT_BAD_TARGET,
-	IOP_RESULT_MODE_SENSE_CHECK_CONDITION,
+	IOP_RESULT_CHECK_CONDITION,
 };
 
-struct hpt_iop_request_get_config
-{
+struct hpt_iop_request_get_config {
 	struct hpt_iop_request_header header;
 	__le32 interface_version;
 	__le32 firmware_version;
@@ -332,23 +152,21 @@ struct hpt_iop_request_get_config
 	__le32 sdram_size;
 };
 
-struct hpt_iop_request_set_config
-{
+struct hpt_iop_request_set_config {
 	struct hpt_iop_request_header header;
 	__le32 iop_id;
-	__le32 vbus_id;
+	__le16 vbus_id;
+	__le16 max_host_request_size;
 	__le32 reserve[6];
 };
 
-struct hpt_iopsg
-{
+struct hpt_iopsg {
 	__le32 size;
 	__le32 eot; /* non-zero: end of table */
 	__le64 pci_address;
 };
 
-struct hpt_iop_request_block_command
-{
+struct hpt_iop_request_block_command {
 	struct hpt_iop_request_header header;
 	u8     channel;
 	u8     target;
@@ -366,8 +184,7 @@ struct hpt_iop_request_block_command
 #define IOP_BLOCK_COMMAND_FLUSH    4
 #define IOP_BLOCK_COMMAND_SHUTDOWN 5
 
-struct hpt_iop_request_scsi_command
-{
+struct hpt_iop_request_scsi_command {
 	struct hpt_iop_request_header header;
 	u8     channel;
 	u8     target;
@@ -378,8 +195,7 @@ struct hpt_iop_request_scsi_command
 	struct hpt_iopsg sg_list[1];
 };
 
-struct hpt_iop_request_ioctl_command
-{
+struct hpt_iop_request_ioctl_command {
 	struct hpt_iop_request_header header;
 	__le32 ioctl_code;
 	__le32 inbuf_size;
@@ -392,11 +208,11 @@ struct hpt_iop_request_ioctl_command
 #define HPTIOP_MAX_REQUESTS  256u
 
 struct hptiop_request {
-	struct hptiop_request * next;
-	void *                  req_virt;
-	u32                     req_shifted_phy;
-	struct scsi_cmnd *      scp;
-	int                     index;
+	struct hptiop_request *next;
+	void                  *req_virt;
+	u32                   req_shifted_phy;
+	struct scsi_cmnd      *scp;
+	int                   index;
 };
 
 struct hpt_scsi_pointer {
@@ -408,13 +224,25 @@ struct hpt_scsi_pointer {
 #define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
 
 struct hptiop_hba {
-	struct hpt_iopmu __iomem * iop;
-	struct Scsi_Host * host;
-	struct pci_dev * pcidev;
-
-	struct list_head link;
+	struct hptiop_adapter_ops *ops;
+	union {
+		struct {
+			struct hpt_iopmu_itl __iomem *iop;
+			void __iomem *plx;
+		} itl;
+		struct {
+			struct hpt_iopmv_regs *regs;
+			struct hpt_iopmu_mv __iomem *mu;
+			void *internal_req;
+			dma_addr_t internal_req_phy;
+		} mv;
+	} u;
+
+	struct Scsi_Host *host;
+	struct pci_dev *pcidev;
 
 	/* IOP config info */
+	u32     interface_version;
 	u32     firmware_version;
 	u32     sdram_size;
 	u32     max_devices;
@@ -423,14 +251,16 @@ struct hptiop_hba {
 	u32     max_sg_descriptors;
 
 	u32     req_size; /* host-allocated request buffer size */
-	int     initialized;
-	int     msg_done;
+
+	u32     iopintf_v2:1;
+	u32     initialized:1;
+	u32     msg_done:1;
 
 	struct hptiop_request * req_list;
 	struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
 
 	/* used to free allocated dma area */
-	void *      dma_coherent;
+	void        *dma_coherent;
 	dma_addr_t  dma_coherent_handle;
 
 	atomic_t    reset_count;
@@ -440,19 +270,35 @@ struct hptiop_hba {
 	wait_queue_head_t ioctl_wq;
 };
 
-struct hpt_ioctl_k
-{
+struct hpt_ioctl_k {
 	struct hptiop_hba * hba;
 	u32    ioctl_code;
 	u32    inbuf_size;
 	u32    outbuf_size;
-	void * inbuf;
-	void * outbuf;
-	u32  * bytes_returned;
+	void   *inbuf;
+	void   *outbuf;
+	u32    *bytes_returned;
 	void (*done)(struct hpt_ioctl_k *);
 	int    result; /* HPT_IOCTL_RESULT_ */
 };
 
+struct hptiop_adapter_ops {
+	int  (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
+	int  (*internal_memalloc)(struct hptiop_hba *hba);
+	int  (*internal_memfree)(struct hptiop_hba *hba);
+	int  (*map_pci_bar)(struct hptiop_hba *hba);
+	void (*unmap_pci_bar)(struct hptiop_hba *hba);
+	void (*enable_intr)(struct hptiop_hba *hba);
+	void (*disable_intr)(struct hptiop_hba *hba);
+	int  (*get_config)(struct hptiop_hba *hba,
+				struct hpt_iop_request_get_config *config);
+	int  (*set_config)(struct hptiop_hba *hba,
+				struct hpt_iop_request_set_config *config);
+	int  (*iop_intr)(struct hptiop_hba *hba);
+	void (*post_msg)(struct hptiop_hba *hba, u32 msg);
+	void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
+};
+
 #define HPT_IOCTL_RESULT_OK         0
 #define HPT_IOCTL_RESULT_FAILED     (-1)