Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 1649

kernel-2.6.18-238.el5.src.rpm

diff -Naurp linux-2.6.17.noarch/drivers/scsi/Kconfig linux-2.6.17.noarch.qla4xxx/drivers/scsi/Kconfig
--- linux-2.6.17.noarch/drivers/scsi/Kconfig	2006-08-15 14:19:43.000000000 -0400
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/Kconfig	2006-08-17 12:57:22.000000000 -0400
@@ -1352,6 +1352,7 @@ config SCSI_QLOGICPTI
 	  module will be called qlogicpti.
 
 source "drivers/scsi/qla2xxx/Kconfig"
+source "drivers/scsi/qla4xxx/Kconfig"
 
 config SCSI_LPFC
 	tristate "Emulex LightPulse Fibre Channel Support"
diff -Naurp linux-2.6.17.noarch/drivers/scsi/Makefile linux-2.6.17.noarch.qla4xxx/drivers/scsi/Makefile
--- linux-2.6.17.noarch/drivers/scsi/Makefile	2006-08-15 14:19:41.000000000 -0400
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/Makefile	2006-08-17 12:57:22.000000000 -0400
@@ -81,6 +81,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS)	+= qlogicf
 obj-$(CONFIG_PCMCIA_QLOGIC)	+= qlogicfas408.o
 obj-$(CONFIG_SCSI_QLOGIC_1280)	+= qla1280.o 
 obj-$(CONFIG_SCSI_QLA_FC)	+= qla2xxx/
+obj-$(CONFIG_SCSI_QLA_ISCSI)	+= qla4xxx/
 obj-$(CONFIG_SCSI_LPFC)		+= lpfc/
 obj-$(CONFIG_SCSI_PAS16)	+= pas16.o
 obj-$(CONFIG_SCSI_SEAGATE)	+= seagate.o
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/Kconfig linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/Kconfig
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/Kconfig	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/Kconfig	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,7 @@
+config SCSI_QLA_ISCSI
+	tristate "QLogic ISP4XXX host adapter family support"
+	depends on PCI && SCSI
+	select SCSI_ISCSI_ATTRS
+	---help---
+	This driver supports the QLogic 40xx (ISP4XXX) iSCSI host 
+	adapter family.
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/Makefile linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/Makefile
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/Makefile	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/Makefile	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,5 @@
+qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
+		ql4_nvram.o ql4_dbg.o
+
+obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
+
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_dbg.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_dbg.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_dbg.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_dbg.c	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,197 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include <scsi/scsi_dbg.h>
+
+static void qla4xxx_print_srb_info(struct srb * srb)
+{
+	printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
+	printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
+	       __func__, srb->cmd, (unsigned long) srb->dma_handle);
+	printk("%s: fw_ddb_index = %d, lun = %d\n",
+	       __func__, srb->fw_ddb_index, srb->cmd->device->lun);
+	printk("%s: iocb_tov = %d\n",
+	       __func__, srb->iocb_tov);
+	printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
+	       __func__, srb->cc_stat, srb->r_start, srb->u_start);
+}
+
+void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
+{
+	printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
+	printk("  b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
+	       cmd->device->channel, cmd->device->id, cmd->device->lun,
+	       cmd->cmd_len);
+	scsi_print_command(cmd);
+	printk("  seg_cnt = %d\n", cmd->use_sg);
+	printk("  request buffer = 0x%p, request buffer len = 0x%x\n",
+	       cmd->request_buffer, cmd->request_bufflen);
+	if (cmd->use_sg) {
+		struct scatterlist *sg;
+		sg = (struct scatterlist *)cmd->request_buffer;
+		printk("  SG buffer: \n");
+		qla4xxx_dump_buffer((caddr_t) sg,
+				    (cmd->use_sg * sizeof(*sg)));
+	}
+	printk("  tag = %d, transfersize = 0x%x \n", cmd->tag,
+	       cmd->transfersize);
+	printk("  Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
+	printk("  underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
+	       cmd->sc_data_direction);
+	printk("  Current time (jiffies) = 0x%lx, "
+	       "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
+	qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
+}
+
+void __dump_registers(struct scsi_qla_host *ha)
+{
+	uint8_t i;
+	for (i = 0; i < MBOX_REG_COUNT; i++) {
+		printk(KERN_INFO "0x%02X mailbox[%d]	  = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+		       readw(&ha->reg->mailbox[i]));
+	}
+	printk(KERN_INFO "0x%02X flash_address	 = 0x%08X\n",
+	       (uint8_t) offsetof(struct isp_reg, flash_address),
+	       readw(&ha->reg->flash_address));
+	printk(KERN_INFO "0x%02X flash_data	 = 0x%08X\n",
+	       (uint8_t) offsetof(struct isp_reg, flash_data),
+	       readw(&ha->reg->flash_data));
+	printk(KERN_INFO "0x%02X ctrl_status	 = 0x%08X\n",
+	       (uint8_t) offsetof(struct isp_reg, ctrl_status),
+	       readw(&ha->reg->ctrl_status));
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X nvram		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+		       readw(&ha->reg->u1.isp4010.nvram));
+	}
+
+	else if (is_qla4022(ha)) {
+		printk(KERN_INFO "0x%02X intr_mask	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u1.isp4022.intr_mask),
+		       readw(&ha->reg->u1.isp4022.intr_mask));
+		printk(KERN_INFO "0x%02X nvram		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+		       readw(&ha->reg->u1.isp4022.nvram));
+		printk(KERN_INFO "0x%02X semaphore	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u1.isp4022.semaphore),
+		       readw(&ha->reg->u1.isp4022.semaphore));
+	}
+	printk(KERN_INFO "0x%02X req_q_in	 = 0x%08X\n",
+	       (uint8_t) offsetof(struct isp_reg, req_q_in),
+	       readw(&ha->reg->req_q_in));
+	printk(KERN_INFO "0x%02X rsp_q_out	 = 0x%08X\n",
+	       (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+	       readw(&ha->reg->rsp_q_out));
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X ext_hw_conf	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4010.ext_hw_conf),
+		       readw(&ha->reg->u2.isp4010.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4010.port_ctrl),
+		       readw(&ha->reg->u2.isp4010.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4010.port_status),
+		       readw(&ha->reg->u2.isp4010.port_status));
+		printk(KERN_INFO "0x%02X req_q_out	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4010.req_q_out),
+		       readw(&ha->reg->u2.isp4010.req_q_out));
+		printk(KERN_INFO "0x%02X gp_out		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+		       readw(&ha->reg->u2.isp4010.gp_out));
+		printk(KERN_INFO "0x%02X gp_in		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+		       readw(&ha->reg->u2.isp4010.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4010.port_err_status),
+		       readw(&ha->reg->u2.isp4010.port_err_status));
+	}
+
+	else if (is_qla4022(ha)) {
+		printk(KERN_INFO "Page 0 Registers:\n");
+		printk(KERN_INFO "0x%02X ext_hw_conf	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p0.ext_hw_conf),
+		       readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p0.port_ctrl),
+		       readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p0.port_status),
+		       readw(&ha->reg->u2.isp4022.p0.port_status));
+		printk(KERN_INFO "0x%02X gp_out		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p0.gp_out),
+		       readw(&ha->reg->u2.isp4022.p0.gp_out));
+		printk(KERN_INFO "0x%02X gp_in		 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+		       readw(&ha->reg->u2.isp4022.p0.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p0.port_err_status),
+		       readw(&ha->reg->u2.isp4022.p0.port_err_status));
+		printk(KERN_INFO "Page 1 Registers:\n");
+		writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		       &ha->reg->ctrl_status);
+		printk(KERN_INFO "0x%02X req_q_out	 = 0x%08X\n",
+		       (uint8_t) offsetof(struct isp_reg,
+					  u2.isp4022.p1.req_q_out),
+		       readw(&ha->reg->u2.isp4022.p1.req_q_out));
+		writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		       &ha->reg->ctrl_status);
+	}
+}
+
+void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
+{
+	unsigned long flags = 0;
+	int i = 0;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 1; i < MBOX_REG_COUNT; i++)
+		printk(KERN_INFO "  Mailbox[%d] = %08x\n", i,
+		       readw(&ha->reg->mailbox[i]));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+	unsigned long flags = 0;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	__dump_registers(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qla4xxx_dump_buffer(void *b, uint32_t size)
+{
+	uint32_t cnt;
+	uint8_t *c = b;
+
+	printk(" 0   1	 2   3	 4   5	 6   7	 8   9	Ah  Bh	Ch  Dh	Eh  "
+	       "Fh\n");
+	printk("------------------------------------------------------------"
+	       "--\n");
+	for (cnt = 0; cnt < size; cnt++, c++) {
+		printk(KERN_DEBUG "%02x", *c);
+		if (!(cnt % 16))
+			printk(KERN_DEBUG "\n");
+
+		else
+			printk(KERN_DEBUG "  ");
+	}
+	if (cnt % 16)
+		printk(KERN_DEBUG "\n");
+}
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_dbg.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_dbg.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_dbg.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_dbg.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ * Driver debug definitions.
+ */
+/* #define QL_DEBUG  */			/* DEBUG messages */
+/* #define QL_DEBUG_LEVEL_3  */		/* Output function tracing */
+/* #define QL_DEBUG_LEVEL_4  */
+/* #define QL_DEBUG_LEVEL_5  */
+/* #define QL_DEBUG_LEVEL_9  */
+
+#define QL_DEBUG_LEVEL_2	/* ALways enable error messagess */
+#if defined(QL_DEBUG)
+#define DEBUG(x)   do {x;} while (0);
+#else
+#define DEBUG(x)	do {} while (0);
+#endif
+
+#if defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2(x)      do {if(extended_error_logging == 2) x;} while (0);
+#define DEBUG2_3(x)   do {x;} while (0);
+#else				/*  */
+#define DEBUG2(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_3)
+#define DEBUG3(x)      do {if(extended_error_logging == 3) x;} while (0);
+#else				/*  */
+#define DEBUG3(x)	do {} while (0);
+#if !defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2_3(x)	do {} while (0);
+#endif				/*  */
+#endif				/*  */
+#if defined(QL_DEBUG_LEVEL_4)
+#define DEBUG4(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG4(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_5)
+#define DEBUG5(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG5(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_9)
+#define DEBUG9(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG9(x)	do {} while (0);
+#endif				/*  */
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_def.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_def.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_def.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,599 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL4_DEF_H
+#define __QL4_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
+#define PCI_DEVICE_ID_QLOGIC_ISP4010	0x4010
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
+#define PCI_DEVICE_ID_QLOGIC_ISP4022	0x4022
+#endif				/*  */
+
+#define QLA_SUCCESS			0
+#define QLA_ERROR			1
+
+/*
+ * Data bit definitions
+ */
+#define BIT_0	0x1
+#define BIT_1	0x2
+#define BIT_2	0x4
+#define BIT_3	0x8
+#define BIT_4	0x10
+#define BIT_5	0x20
+#define BIT_6	0x40
+#define BIT_7	0x80
+#define BIT_8	0x100
+#define BIT_9	0x200
+#define BIT_10	0x400
+#define BIT_11	0x800
+#define BIT_12	0x1000
+#define BIT_13	0x2000
+#define BIT_14	0x4000
+#define BIT_15	0x8000
+#define BIT_16	0x10000
+#define BIT_17	0x20000
+#define BIT_18	0x40000
+#define BIT_19	0x80000
+#define BIT_20	0x100000
+#define BIT_21	0x200000
+#define BIT_22	0x400000
+#define BIT_23	0x800000
+#define BIT_24	0x1000000
+#define BIT_25	0x2000000
+#define BIT_26	0x4000000
+#define BIT_27	0x8000000
+#define BIT_28	0x10000000
+#define BIT_29	0x20000000
+#define BIT_30	0x40000000
+#define BIT_31	0x80000000
+
+/*
+ * Host adapter default definitions
+ ***********************************/
+#define MAX_HBAS		16
+#define MAX_BUSES		1
+#define MAX_TARGETS		(MAX_PRST_DEV_DB_ENTRIES +  MAX_DEV_DB_ENTRIES)
+#define MAX_LUNS		0xffff
+#define MAX_AEN_ENTRIES		256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
+#define MAX_DDB_ENTRIES		(MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
+#define MAX_PDU_ENTRIES		32
+#define INVALID_ENTRY		0xFFFF
+#define MAX_CMDS_TO_RISC	1024
+#define MAX_SRBS		MAX_CMDS_TO_RISC
+#define MBOX_AEN_REG_COUNT	5
+#define MAX_INIT_RETRIES	5
+#define IOCB_HIWAT_CUSHION	16
+
+/*
+ * Buffer sizes
+ */
+#define REQUEST_QUEUE_DEPTH		MAX_CMDS_TO_RISC
+#define RESPONSE_QUEUE_DEPTH		64
+#define QUEUE_SIZE			64
+#define DMA_BUFFER_SIZE			512
+
+/*
+ * Misc
+ */
+#define MAC_ADDR_LEN			6	/* in bytes */
+#define IP_ADDR_LEN			4	/* in bytes */
+#define DRIVER_NAME			"qla4xxx"
+
+#define MAX_LINKED_CMDS_PER_LUN		3
+#define MAX_REQS_SERVICED_PER_INTR	16
+
+#define ISCSI_IPADDR_SIZE		4	/* IP address size */
+#define ISCSI_ALIAS_SIZE		32	/* ISCSI Alais name size */
+#define ISCSI_NAME_SIZE			255	/* ISCSI Name size -
+						 * usually a string */
+
+#define LSDW(x) ((u32)((u64)(x)))
+#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
+
+/*
+ * Retry & Timeout Values
+ */
+#define MBOX_TOV			60
+#define SOFT_RESET_TOV			30
+#define RESET_INTR_TOV			3
+#define SEMAPHORE_TOV			10
+#define ADAPTER_INIT_TOV		120
+#define ADAPTER_RESET_TOV		180
+#define EXTEND_CMD_TOV			60
+#define WAIT_CMD_TOV			30
+#define EH_WAIT_CMD_TOV			120
+#define FIRMWARE_UP_TOV			60
+#define RESET_FIRMWARE_TOV		30
+#define LOGOUT_TOV			10
+#define IOCB_TOV_MARGIN			10
+#define RELOGIN_TOV			18
+#define ISNS_DEREG_TOV			5
+
+#define MAX_RESET_HA_RETRIES		2
+
+/*
+ * SCSI Request Block structure	 (srb)	that is placed
+ * on cmd->SCp location of every I/O	 [We have 22 bytes available]
+ */
+struct srb {
+	struct list_head list;	/* (8)	 */
+	struct scsi_qla_host *ha;	/* HA the SP is queued on */
+	struct ddb_entry	*ddb;
+	uint16_t flags;		/* (1) Status flags. */
+
+#define SRB_DMA_VALID		BIT_3	/* DMA Buffer mapped. */
+#define SRB_GOT_SENSE		BIT_4	/* sense data recieved. */
+	uint8_t state;		/* (1) Status flags. */
+
+#define SRB_NO_QUEUE_STATE	 0	/* Request is in between states */
+#define SRB_FREE_STATE		 1
+#define SRB_ACTIVE_STATE	 3
+#define SRB_ACTIVE_TIMEOUT_STATE 4
+#define SRB_SUSPENDED_STATE	 7	/* Request in suspended state */
+
+	struct scsi_cmnd *cmd;	/* (4) SCSI command block */
+	dma_addr_t dma_handle;	/* (4) for unmap of single transfers */
+	atomic_t ref_count;	/* reference count for this srb */
+	uint32_t fw_ddb_index;
+	uint8_t err_id;		/* error id */
+#define SRB_ERR_PORT	   1	/* Request failed because "port down" */
+#define SRB_ERR_LOOP	   2	/* Request failed because "loop down" */
+#define SRB_ERR_DEVICE	   3	/* Request failed because "device error" */
+#define SRB_ERR_OTHER	   4
+
+	uint16_t reserved;
+	uint16_t iocb_tov;
+	uint16_t iocb_cnt;	/* Number of used iocbs */
+	uint16_t cc_stat;
+	u_long r_start;		/* Time we recieve a cmd from OS */
+	u_long u_start;		/* Time when we handed the cmd to F/W */
+};
+
+	/*
+	 * Device Database (DDB) structure
+	 */
+struct ddb_entry {
+	struct list_head list;	/* ddb list */
+	struct scsi_qla_host *ha;
+	struct iscsi_cls_session *sess;
+	struct iscsi_cls_conn *conn;
+
+	atomic_t state;		/* DDB State */
+
+	unsigned long flags;	/* DDB Flags */
+
+	unsigned long dev_scan_wait_to_start_relogin;
+	unsigned long dev_scan_wait_to_complete_relogin;
+
+	uint16_t os_target_id;	/* Target ID */
+	uint16_t fw_ddb_index;	/* DDB firmware index */
+	uint8_t reserved[2];
+	uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+
+	uint32_t CmdSn;
+	uint16_t target_session_id;
+	uint16_t connection_id;
+	uint16_t exe_throttle;	/* Max mumber of cmds outstanding
+				 * simultaneously */
+	uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
+				     * complete */
+	uint16_t default_relogin_timeout; /*  Max time to wait for
+					   *  relogin to complete */
+	uint16_t tcp_source_port_num;
+	uint32_t default_time2wait; /* Default Min time between
+				     * relogins (+aens) */
+
+	atomic_t port_down_timer; /* Device connection timer */
+	atomic_t retry_relogin_timer; /* Min Time between relogins
+				       * (4000 only) */
+	atomic_t relogin_timer;	/* Max Time to wait for relogin to complete */
+	atomic_t relogin_retry_count; /* Num of times relogin has been
+				       * retried */
+
+	uint16_t port;
+	uint32_t tpgt;
+	uint8_t ip_addr[ISCSI_IPADDR_SIZE];
+	uint8_t iscsi_name[ISCSI_NAME_SIZE];	/* 72 x48 */
+	uint8_t iscsi_alias[0x20];
+};
+
+/*
+ * DDB states.
+ */
+#define DDB_STATE_DEAD		0	/* We can no longer talk to
+					 * this device */
+#define DDB_STATE_ONLINE	1	/* Device ready to accept
+					 * commands */
+#define DDB_STATE_MISSING	2	/* Device logged off, trying
+					 * to re-login */
+
+/*
+ * DDB flags.
+ */
+#define DF_RELOGIN		0	/* Relogin to device */
+#define DF_NO_RELOGIN		1	/* Do not relogin if IOCTL
+					 * logged it out */
+#define DF_ISNS_DISCOVERED	2	/* Device was discovered via iSNS */
+#define DF_FO_MASKED		3
+
+/*
+ * Asynchronous Event Queue structure
+ */
+struct aen {
+	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+};
+
+
+#include "ql4_fw.h"
+#include "ql4_nvram.h"
+
+/*
+ * Linux Host Adapter structure
+ */
+struct scsi_qla_host {
+	/* Linux adapter configuration data */
+	struct Scsi_Host *host; /* pointer to host data */
+	uint32_t tot_ddbs;
+	unsigned long flags;
+
+#define AF_ONLINE		      0 /* 0x00000001 */
+#define AF_INIT_DONE		      1 /* 0x00000002 */
+#define AF_MBOX_COMMAND		      2 /* 0x00000004 */
+#define AF_MBOX_COMMAND_DONE	      3 /* 0x00000008 */
+#define AF_INTERRUPTS_ON	      6 /* 0x00000040 Not Used */
+#define AF_GET_CRASH_RECORD	      7 /* 0x00000080 */
+#define AF_LINK_UP		      8 /* 0x00000100 */
+#define AF_TOPCAT_CHIP_PRESENT	      9 /* 0x00000200 */
+#define AF_IRQ_ATTACHED		     10 /* 0x00000400 */
+#define AF_ISNS_CMD_IN_PROCESS	     12 /* 0x00001000 */
+#define AF_ISNS_CMD_DONE	     13 /* 0x00002000 */
+
+	unsigned long dpc_flags;
+
+#define DPC_RESET_HA		      1 /* 0x00000002 */
+#define DPC_RETRY_RESET_HA	      2 /* 0x00000004 */
+#define DPC_RELOGIN_DEVICE	      3 /* 0x00000008 */
+#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
+#define DPC_RESET_HA_INTR	      5 /* 0x00000020 */
+#define DPC_ISNS_RESTART	      7 /* 0x00000080 */
+#define DPC_AEN			      9 /* 0x00000200 */
+#define DPC_GET_DHCP_IP_ADDR	     15 /* 0x00008000 */
+
+	uint16_t	iocb_cnt;
+	uint16_t	iocb_hiwat;
+
+	/* SRB cache. */
+#define SRB_MIN_REQ	128
+	mempool_t *srb_mempool;
+
+	/* pci information */
+	struct pci_dev *pdev;
+
+	struct isp_reg __iomem *reg; /* Base I/O address */
+	unsigned long pio_address;
+	unsigned long pio_length;
+#define MIN_IOBASE_LEN		0x100
+
+	uint16_t req_q_count;
+	uint8_t marker_needed;
+	uint8_t rsvd1;
+
+	unsigned long host_no;
+
+	/* NVRAM registers */
+	struct eeprom_data *nvram;
+	spinlock_t hardware_lock ____cacheline_aligned;
+	spinlock_t list_lock;
+	uint32_t   eeprom_cmd_data;
+
+	/* Counters for general statistics */
+	uint64_t adapter_error_count;
+	uint64_t device_error_count;
+	uint64_t total_io_count;
+	uint64_t total_mbytes_xferred;
+	uint64_t link_failure_count;
+	uint64_t invalid_crc_count;
+	uint32_t spurious_int_count;
+	uint32_t aborted_io_count;
+	uint32_t io_timeout_count;
+	uint32_t mailbox_timeout_count;
+	uint32_t seconds_since_last_intr;
+	uint32_t seconds_since_last_heartbeat;
+	uint32_t mac_index;
+
+	/* Info Needed for Management App */
+	/* --- From GetFwVersion --- */
+	uint32_t firmware_version[2];
+	uint32_t patch_number;
+	uint32_t build_number;
+
+	/* --- From Init_FW --- */
+	/* init_cb_t *init_cb; */
+	uint16_t firmware_options;
+	uint16_t tcp_options;
+	uint8_t ip_address[IP_ADDR_LEN];
+	uint8_t subnet_mask[IP_ADDR_LEN];
+	uint8_t gateway[IP_ADDR_LEN];
+	uint8_t alias[32];
+	uint8_t name_string[256];
+	uint8_t heartbeat_interval;
+	uint8_t rsvd;
+
+	/* --- From FlashSysInfo --- */
+	uint8_t my_mac[MAC_ADDR_LEN];
+	uint8_t serial_number[16];
+
+	/* --- From GetFwState --- */
+	uint32_t firmware_state;
+	uint32_t board_id;
+	uint32_t addl_fw_state;
+
+	/* Linux kernel thread */
+	struct workqueue_struct *dpc_thread;
+	struct work_struct dpc_work;
+
+	/* Linux timer thread */
+	struct timer_list timer;
+	uint32_t timer_active;
+
+	/* Recovery Timers */
+	uint32_t port_down_retry_count;
+	uint32_t discovery_wait;
+	atomic_t check_relogin_timeouts;
+	uint32_t retry_reset_ha_cnt;
+	uint32_t isp_reset_timer;	/* reset test timer */
+	uint32_t nic_reset_timer;	/* simulated nic reset test timer */
+	int eh_start;
+	struct list_head free_srb_q;
+	uint16_t free_srb_q_count;
+	uint16_t num_srbs_allocated;
+
+	/* Active array */
+	struct srb *active_srb_array[MAX_SRBS];
+	uint16_t current_active_index;
+
+	/* DMA Memory Block */
+	void *queues;
+	dma_addr_t queues_dma;
+	unsigned long queues_len;
+
+#define MEM_ALIGN_VALUE \
+	    ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
+	     sizeof(struct queue_entry))
+	/* request and response queue variables */
+	dma_addr_t request_dma;
+	struct queue_entry *request_ring;
+	struct queue_entry *request_ptr;
+	dma_addr_t response_dma;
+	struct queue_entry *response_ring;
+	struct queue_entry *response_ptr;
+	dma_addr_t shadow_regs_dma;
+	struct shadow_regs *shadow_regs;
+	uint16_t request_in;	/* Current indexes. */
+	uint16_t request_out;
+	uint16_t response_in;
+	uint16_t response_out;
+
+	/* aen queue variables */
+	uint16_t aen_q_count;	/* Number of available aen_q entries */
+	uint16_t aen_in;	/* Current indexes */
+	uint16_t aen_out;
+	struct aen aen_q[MAX_AEN_ENTRIES];
+
+	/* pdu variables */
+	uint16_t pdu_count;	/* Number of available aen_q entries */
+	uint16_t pdu_in;	/* Current indexes */
+	uint16_t pdu_out;
+	uint16_t pdu_active;
+	struct pdu_entry *free_pdu_top;
+	struct pdu_entry *free_pdu_bottom;
+	struct pdu_entry pdu_queue[MAX_PDU_ENTRIES];
+
+	/* This mutex protects several threads to do mailbox commands
+	 * concurrently.
+	 */
+	struct mutex  mbox_sem;
+	wait_queue_head_t mailbox_wait_queue;
+
+	/* temporary mailbox status registers */
+	volatile uint8_t mbox_status_count;
+	volatile uint32_t mbox_status[MBOX_REG_COUNT];
+
+	/* local device database list (contains internal ddb entries) */
+	struct list_head ddb_list;
+
+	/* Map ddb_list entry by FW ddb index */
+	struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
+
+};
+
+static inline int is_qla4010(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
+}
+
+static inline int is_qla4022(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
+}
+
+static inline int adapter_up(struct scsi_qla_host *ha)
+{
+	return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
+		(test_bit(AF_LINK_UP, &ha->flags) != 0);
+}
+
+static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
+{
+	return (struct scsi_qla_host *)shost->hostdata;
+}
+
+static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u1.isp4022.semaphore :
+		&ha->reg->u1.isp4010.nvram);
+}
+
+static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u1.isp4022.nvram :
+		&ha->reg->u1.isp4010.nvram);
+}
+
+static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u2.isp4022.p0.ext_hw_conf :
+		&ha->reg->u2.isp4010.ext_hw_conf);
+}
+
+static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u2.isp4022.p0.port_status :
+		&ha->reg->u2.isp4010.port_status);
+}
+
+static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u2.isp4022.p0.port_ctrl :
+		&ha->reg->u2.isp4010.port_ctrl);
+}
+
+static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u2.isp4022.p0.port_err_status :
+		&ha->reg->u2.isp4010.port_err_status);
+}
+
+static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		&ha->reg->u2.isp4022.p0.gp_out :
+		&ha->reg->u2.isp4010.gp_out);
+}
+
+static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
+{
+	return (is_qla4022(ha) ?
+		offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
+		offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
+}
+
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+
+static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
+					   (QL4022_RESOURCE_BITS_BASE_CODE |
+					    (a->mac_index)) << 13);
+	else
+		return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
+					   QL4010_FLASH_SEM_BITS);
+}
+
+static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
+					   (QL4022_RESOURCE_BITS_BASE_CODE |
+					    (a->mac_index)) << 10);
+	else
+		return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
+					   QL4010_NVRAM_SEM_BITS);
+}
+
+static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
+				       (QL4022_RESOURCE_BITS_BASE_CODE |
+					(a->mac_index)) << 1);
+	else
+		return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
+				       QL4010_DRVR_SEM_BITS);
+}
+
+static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
+{
+	if (is_qla4022(a))
+		ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+#define PRESERVE_DDB_LIST	0
+#define REBUILD_DDB_LIST	1
+
+/* Defines for process_aen() */
+#define PROCESS_ALL_AENS	 0
+#define FLUSH_DDB_CHANGED_AENS	 1
+#define RELOGIN_DDB_CHANGED_AENS 2
+
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+
+#endif	/*_QLA4XXX_H */
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_fw.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_fw.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_fw.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_fw.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,852 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QLA4X_FW_H
+#define _QLA4X_FW_H
+
+
+#define MAX_PRST_DEV_DB_ENTRIES		64
+#define MIN_DISC_DEV_DB_ENTRY		MAX_PRST_DEV_DB_ENTRIES
+#define MAX_DEV_DB_ENTRIES 512
+
+/*************************************************************************
+ *
+ *		ISP 4010 I/O Register Set Structure and Definitions
+ *
+ *************************************************************************/
+
+struct port_ctrl_stat_regs {
+	__le32 ext_hw_conf;	/*  80 x50  R/W */
+	__le32 intChipConfiguration; /*	 84 x54 */
+	__le32 port_ctrl;	/*  88 x58 */
+	__le32 port_status;	/*  92 x5c */
+	__le32 HostPrimMACHi;	/*  96 x60 */
+	__le32 HostPrimMACLow;	/* 100 x64 */
+	__le32 HostSecMACHi;	/* 104 x68 */
+	__le32 HostSecMACLow;	/* 108 x6c */
+	__le32 EPPrimMACHi;	/* 112 x70 */
+	__le32 EPPrimMACLow;	/* 116 x74 */
+	__le32 EPSecMACHi;	/* 120 x78 */
+	__le32 EPSecMACLow;	/* 124 x7c */
+	__le32 HostPrimIPHi;	/* 128 x80 */
+	__le32 HostPrimIPMidHi; /* 132 x84 */
+	__le32 HostPrimIPMidLow;	/* 136 x88 */
+	__le32 HostPrimIPLow;	/* 140 x8c */
+	__le32 HostSecIPHi;	/* 144 x90 */
+	__le32 HostSecIPMidHi;	/* 148 x94 */
+	__le32 HostSecIPMidLow; /* 152 x98 */
+	__le32 HostSecIPLow;	/* 156 x9c */
+	__le32 EPPrimIPHi;	/* 160 xa0 */
+	__le32 EPPrimIPMidHi;	/* 164 xa4 */
+	__le32 EPPrimIPMidLow;	/* 168 xa8 */
+	__le32 EPPrimIPLow;	/* 172 xac */
+	__le32 EPSecIPHi;	/* 176 xb0 */
+	__le32 EPSecIPMidHi;	/* 180 xb4 */
+	__le32 EPSecIPMidLow;	/* 184 xb8 */
+	__le32 EPSecIPLow;	/* 188 xbc */
+	__le32 IPReassemblyTimeout; /* 192 xc0 */
+	__le32 EthMaxFramePayload; /* 196 xc4 */
+	__le32 TCPMaxWindowSize; /* 200 xc8 */
+	__le32 TCPCurrentTimestampHi; /* 204 xcc */
+	__le32 TCPCurrentTimestampLow; /* 208 xd0 */
+	__le32 LocalRAMAddress; /* 212 xd4 */
+	__le32 LocalRAMData;	/* 216 xd8 */
+	__le32 PCSReserved1;	/* 220 xdc */
+	__le32 gp_out;		/* 224 xe0 */
+	__le32 gp_in;		/* 228 xe4 */
+	__le32 ProbeMuxAddr;	/* 232 xe8 */
+	__le32 ProbeMuxData;	/* 236 xec */
+	__le32 ERMQueueBaseAddr0; /* 240 xf0 */
+	__le32 ERMQueueBaseAddr1; /* 244 xf4 */
+	__le32 MACConfiguration; /* 248 xf8 */
+	__le32 port_err_status; /* 252 xfc  COR */
+};
+
+struct host_mem_cfg_regs {
+	__le32 NetRequestQueueOut; /*  80 x50 */
+	__le32 NetRequestQueueOutAddrHi; /*  84 x54 */
+	__le32 NetRequestQueueOutAddrLow; /*  88 x58 */
+	__le32 NetRequestQueueBaseAddrHi; /*  92 x5c */
+	__le32 NetRequestQueueBaseAddrLow; /*  96 x60 */
+	__le32 NetRequestQueueLength; /* 100 x64 */
+	__le32 NetResponseQueueIn; /* 104 x68 */
+	__le32 NetResponseQueueInAddrHi; /* 108 x6c */
+	__le32 NetResponseQueueInAddrLow; /* 112 x70 */
+	__le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
+	__le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
+	__le32 NetResponseQueueLength; /* 124 x7c */
+	__le32 req_q_out;	/* 128 x80 */
+	__le32 RequestQueueOutAddrHi; /* 132 x84 */
+	__le32 RequestQueueOutAddrLow; /* 136 x88 */
+	__le32 RequestQueueBaseAddrHi; /* 140 x8c */
+	__le32 RequestQueueBaseAddrLow; /* 144 x90 */
+	__le32 RequestQueueLength; /* 148 x94 */
+	__le32 ResponseQueueIn; /* 152 x98 */
+	__le32 ResponseQueueInAddrHi; /* 156 x9c */
+	__le32 ResponseQueueInAddrLow; /* 160 xa0 */
+	__le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
+	__le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
+	__le32 ResponseQueueLength; /* 172 xac */
+	__le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
+	__le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
+	__le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
+	__le32 NetRxLargeBufferQueueLength; /* 188 xbc */
+	__le32 NetRxLargeBufferLength; /* 192 xc0 */
+	__le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
+	__le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
+	__le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
+	__le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
+	__le32 NetRxSmallBufferLength; /* 212 xd4 */
+	__le32 HMCReserved0[10]; /* 216 xd8 */
+};
+
+struct local_ram_cfg_regs {
+	__le32 BufletSize;	/*  80 x50 */
+	__le32 BufletMaxCount;	/*  84 x54 */
+	__le32 BufletCurrCount; /*  88 x58 */
+	__le32 BufletPauseThresholdCount; /*  92 x5c */
+	__le32 BufletTCPWinThresholdHi; /*  96 x60 */
+	__le32 BufletTCPWinThresholdLow; /* 100 x64 */
+	__le32 IPHashTableBaseAddr; /* 104 x68 */
+	__le32 IPHashTableSize; /* 108 x6c */
+	__le32 TCPHashTableBaseAddr; /* 112 x70 */
+	__le32 TCPHashTableSize; /* 116 x74 */
+	__le32 NCBAreaBaseAddr; /* 120 x78 */
+	__le32 NCBMaxCount;	/* 124 x7c */
+	__le32 NCBCurrCount;	/* 128 x80 */
+	__le32 DRBAreaBaseAddr; /* 132 x84 */
+	__le32 DRBMaxCount;	/* 136 x88 */
+	__le32 DRBCurrCount;	/* 140 x8c */
+	__le32 LRCReserved[28]; /* 144 x90 */
+};
+
+struct prot_stat_regs {
+	__le32 MACTxFrameCount; /*  80 x50   R */
+	__le32 MACTxByteCount;	/*  84 x54   R */
+	__le32 MACRxFrameCount; /*  88 x58   R */
+	__le32 MACRxByteCount;	/*  92 x5c   R */
+	__le32 MACCRCErrCount;	/*  96 x60   R */
+	__le32 MACEncErrCount;	/* 100 x64   R */
+	__le32 MACRxLengthErrCount; /* 104 x68	 R */
+	__le32 IPTxPacketCount; /* 108 x6c   R */
+	__le32 IPTxByteCount;	/* 112 x70   R */
+	__le32 IPTxFragmentCount; /* 116 x74   R */
+	__le32 IPRxPacketCount; /* 120 x78   R */
+	__le32 IPRxByteCount;	/* 124 x7c   R */
+	__le32 IPRxFragmentCount; /* 128 x80   R */
+	__le32 IPDatagramReassemblyCount; /* 132 x84   R */
+	__le32 IPV6RxPacketCount; /* 136 x88   R */
+	__le32 IPErrPacketCount; /* 140 x8c   R */
+	__le32 IPReassemblyErrCount; /* 144 x90	  R */
+	__le32 TCPTxSegmentCount; /* 148 x94   R */
+	__le32 TCPTxByteCount;	/* 152 x98   R */
+	__le32 TCPRxSegmentCount; /* 156 x9c   R */
+	__le32 TCPRxByteCount;	/* 160 xa0   R */
+	__le32 TCPTimerExpCount; /* 164 xa4   R */
+	__le32 TCPRxAckCount;	/* 168 xa8   R */
+	__le32 TCPTxAckCount;	/* 172 xac   R */
+	__le32 TCPRxErrOOOCount; /* 176 xb0   R */
+	__le32 PSReserved0;	/* 180 xb4 */
+	__le32 TCPRxWindowProbeUpdateCount; /* 184 xb8	 R */
+	__le32 ECCErrCorrectionCount; /* 188 xbc   R */
+	__le32 PSReserved1[16]; /* 192 xc0 */
+};
+
+
+/*  remote register set (access via PCI memory read/write) */
+struct isp_reg {
+#define MBOX_REG_COUNT 8
+	__le32 mailbox[MBOX_REG_COUNT];
+
+	__le32 flash_address;	/* 0x20 */
+	__le32 flash_data;
+	__le32 ctrl_status;
+
+	union {
+		struct {
+			__le32 nvram;
+			__le32 reserved1[2]; /* 0x30 */
+		} __attribute__ ((packed)) isp4010;
+		struct {
+			__le32 intr_mask;
+			__le32 nvram; /* 0x30 */
+			__le32 semaphore;
+		} __attribute__ ((packed)) isp4022;
+	} u1;
+
+	__le32 req_q_in;    /* SCSI Request Queue Producer Index */
+	__le32 rsp_q_out;   /* SCSI Completion Queue Consumer Index */
+
+	__le32 reserved2[4];	/* 0x40 */
+
+	union {
+		struct {
+			__le32 ext_hw_conf; /* 0x50 */
+			__le32 flow_ctrl;
+			__le32 port_ctrl;
+			__le32 port_status;
+
+			__le32 reserved3[8]; /* 0x60 */
+
+			__le32 req_q_out; /* 0x80 */
+
+			__le32 reserved4[23]; /* 0x84 */
+
+			__le32 gp_out; /* 0xe0 */
+			__le32 gp_in;
+
+			__le32 reserved5[5];
+
+			__le32 port_err_status; /* 0xfc */
+		} __attribute__ ((packed)) isp4010;
+		struct {
+			union {
+				struct port_ctrl_stat_regs p0;
+				struct host_mem_cfg_regs p1;
+				struct local_ram_cfg_regs p2;
+				struct prot_stat_regs p3;
+				__le32 r_union[44];
+			};
+
+		} __attribute__ ((packed)) isp4022;
+	} u2;
+};				/* 256 x100 */
+
+
+/* Semaphore Defines for 4010 */
+#define QL4010_DRVR_SEM_BITS	0x00000030
+#define QL4010_GPIO_SEM_BITS	0x000000c0
+#define QL4010_SDRAM_SEM_BITS	0x00000300
+#define QL4010_PHY_SEM_BITS	0x00000c00
+#define QL4010_NVRAM_SEM_BITS	0x00003000
+#define QL4010_FLASH_SEM_BITS	0x0000c000
+
+#define QL4010_DRVR_SEM_MASK	0x00300000
+#define QL4010_GPIO_SEM_MASK	0x00c00000
+#define QL4010_SDRAM_SEM_MASK	0x03000000
+#define QL4010_PHY_SEM_MASK	0x0c000000
+#define QL4010_NVRAM_SEM_MASK	0x30000000
+#define QL4010_FLASH_SEM_MASK	0xc0000000
+
+/* Semaphore Defines for 4022 */
+#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
+#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
+
+
+#define QL4022_DRVR_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
+#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
+#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
+#define QL4022_NVRAM_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
+#define QL4022_FLASH_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
+
+
+
+/* Page # defines for 4022 */
+#define PORT_CTRL_STAT_PAGE			0	/* 4022 */
+#define HOST_MEM_CFG_PAGE			1	/* 4022 */
+#define LOCAL_RAM_CFG_PAGE			2	/* 4022 */
+#define PROT_STAT_PAGE				3	/* 4022 */
+
+/* Register Mask - sets corresponding mask bits in the upper word */
+static inline uint32_t set_rmask(uint32_t val)
+{
+	return (val & 0xffff) | (val << 16);
+}
+
+
+static inline uint32_t clr_rmask(uint32_t val)
+{
+	return 0 | (val << 16);
+}
+
+/*  ctrl_status definitions */
+#define CSR_SCSI_PAGE_SELECT			0x00000003
+#define CSR_SCSI_INTR_ENABLE			0x00000004	/* 4010 */
+#define CSR_SCSI_RESET_INTR			0x00000008
+#define CSR_SCSI_COMPLETION_INTR		0x00000010
+#define CSR_SCSI_PROCESSOR_INTR			0x00000020
+#define CSR_INTR_RISC				0x00000040
+#define CSR_BOOT_ENABLE				0x00000080
+#define CSR_NET_PAGE_SELECT			0x00000300	/* 4010 */
+#define CSR_FUNC_NUM				0x00000700	/* 4022 */
+#define CSR_NET_RESET_INTR			0x00000800	/* 4010 */
+#define CSR_FORCE_SOFT_RESET			0x00002000	/* 4022 */
+#define CSR_FATAL_ERROR				0x00004000
+#define CSR_SOFT_RESET				0x00008000
+#define ISP_CONTROL_FN_MASK			CSR_FUNC_NUM
+#define ISP_CONTROL_FN0_SCSI			0x0500
+#define ISP_CONTROL_FN1_SCSI			0x0700
+
+#define INTR_PENDING				(CSR_SCSI_COMPLETION_INTR |\
+						 CSR_SCSI_PROCESSOR_INTR |\
+						 CSR_SCSI_RESET_INTR)
+
+/* ISP InterruptMask definitions */
+#define IMR_SCSI_INTR_ENABLE			0x00000004	/* 4022 */
+
+/* ISP 4022 nvram definitions */
+#define NVR_WRITE_ENABLE			0x00000010	/* 4022 */
+
+/*  ISP port_status definitions */
+
+/*  ISP Semaphore definitions */
+
+/*  ISP General Purpose Output definitions */
+#define GPOR_TOPCAT_RESET			0x00000004
+
+/*  shadow registers (DMA'd from HA to system memory.  read only) */
+struct shadow_regs {
+	/* SCSI Request Queue Consumer Index */
+	__le32 req_q_out;	/*  0 x0   R */
+
+	/* SCSI Completion Queue Producer Index */
+	__le32 rsp_q_in;	/*  4 x4   R */
+};		  /*  8 x8 */
+
+
+/*  External hardware configuration register */
+union external_hw_config_reg {
+	struct {
+		/* FIXME: Do we even need this?	 All values are
+		 * referred to by 16 bit quantities.  Platform and
+		 * endianess issues. */
+		__le32 bReserved0:1;
+		__le32 bSDRAMProtectionMethod:2;
+		__le32 bSDRAMBanks:1;
+		__le32 bSDRAMChipWidth:1;
+		__le32 bSDRAMChipSize:2;
+		__le32 bParityDisable:1;
+		__le32 bExternalMemoryType:1;
+		__le32 bFlashBIOSWriteEnable:1;
+		__le32 bFlashUpperBankSelect:1;
+		__le32 bWriteBurst:2;
+		__le32 bReserved1:3;
+		__le32 bMask:16;
+	};
+	uint32_t Asuint32_t;
+};
+
+/*************************************************************************
+ *
+ *		Mailbox Commands Structures and Definitions
+ *
+ *************************************************************************/
+
+/*  Mailbox command definitions */
+#define MBOX_CMD_ABOUT_FW			0x0009
+#define MBOX_CMD_LUN_RESET			0x0016
+#define MBOX_CMD_GET_FW_STATUS			0x001F
+#define MBOX_CMD_SET_ISNS_SERVICE		0x0021
+#define ISNS_DISABLE				0
+#define ISNS_ENABLE				1
+#define MBOX_CMD_READ_FLASH			0x0026
+#define MBOX_CMD_CLEAR_DATABASE_ENTRY		0x0031
+#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT		0x0056
+#define LOGOUT_OPTION_CLOSE_SESSION		0x01
+#define LOGOUT_OPTION_RELOGIN			0x02
+#define MBOX_CMD_EXECUTE_IOCB_A64		0x005A
+#define MBOX_CMD_INITIALIZE_FIRMWARE		0x0060
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK		0x0061
+#define MBOX_CMD_REQUEST_DATABASE_ENTRY		0x0062
+#define MBOX_CMD_SET_DATABASE_ENTRY		0x0063
+#define MBOX_CMD_GET_DATABASE_ENTRY		0x0064
+#define DDB_DS_UNASSIGNED			0x00
+#define DDB_DS_NO_CONNECTION_ACTIVE		0x01
+#define DDB_DS_SESSION_ACTIVE			0x04
+#define DDB_DS_SESSION_FAILED			0x06
+#define DDB_DS_LOGIN_IN_PROCESS			0x07
+#define MBOX_CMD_GET_FW_STATE			0x0069
+
+/* Mailbox 1 */
+#define FW_STATE_READY				0x0000
+#define FW_STATE_CONFIG_WAIT			0x0001
+#define FW_STATE_ERROR				0x0004
+#define FW_STATE_DHCP_IN_PROGRESS		0x0008
+
+/* Mailbox 3 */
+#define FW_ADDSTATE_OPTICAL_MEDIA		0x0001
+#define FW_ADDSTATE_DHCP_ENABLED		0x0002
+#define FW_ADDSTATE_LINK_UP			0x0010
+#define FW_ADDSTATE_ISNS_SVC_ENABLED		0x0020
+#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS	0x006B
+#define MBOX_CMD_CONN_OPEN_SESS_LOGIN		0x0074
+#define MBOX_CMD_GET_CRASH_RECORD		0x0076	/* 4010 only */
+#define MBOX_CMD_GET_CONN_EVENT_LOG		0x0077
+
+/*  Mailbox status definitions */
+#define MBOX_COMPLETION_STATUS			4
+#define MBOX_STS_BUSY				0x0007
+#define MBOX_STS_INTERMEDIATE_COMPLETION	0x1000
+#define MBOX_STS_COMMAND_COMPLETE		0x4000
+#define MBOX_STS_COMMAND_ERROR			0x4005
+
+#define MBOX_ASYNC_EVENT_STATUS			8
+#define MBOX_ASTS_SYSTEM_ERROR			0x8002
+#define MBOX_ASTS_REQUEST_TRANSFER_ERROR	0x8003
+#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR	0x8004
+#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM	0x8005
+#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED	0x8006
+#define MBOX_ASTS_LINK_UP			0x8010
+#define MBOX_ASTS_LINK_DOWN			0x8011
+#define MBOX_ASTS_DATABASE_CHANGED		0x8014
+#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED	0x8015
+#define MBOX_ASTS_SELF_TEST_FAILED		0x8016
+#define MBOX_ASTS_LOGIN_FAILED			0x8017
+#define MBOX_ASTS_DNS				0x8018
+#define MBOX_ASTS_HEARTBEAT			0x8019
+#define MBOX_ASTS_NVRAM_INVALID			0x801A
+#define MBOX_ASTS_MAC_ADDRESS_CHANGED		0x801B
+#define MBOX_ASTS_IP_ADDRESS_CHANGED		0x801C
+#define MBOX_ASTS_DHCP_LEASE_EXPIRED		0x801D
+#define MBOX_ASTS_DHCP_LEASE_ACQUIRED		0x801F
+#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
+#define ISNS_EVENT_DATA_RECEIVED		0x0000
+#define ISNS_EVENT_CONNECTION_OPENED		0x0001
+#define ISNS_EVENT_CONNECTION_FAILED		0x0002
+#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR	0x8022
+#define MBOX_ASTS_SUBNET_STATE_CHANGE		0x8027
+
+/*************************************************************************/
+
+/* Host Adapter Initialization Control Block (from host) */
+struct init_fw_ctrl_blk {
+	uint8_t Version;	/* 00 */
+	uint8_t Control;	/* 01 */
+
+	uint16_t FwOptions;	/* 02-03 */
+#define	 FWOPT_HEARTBEAT_ENABLE		  0x1000
+#define	 FWOPT_SESSION_MODE		  0x0040
+#define	 FWOPT_INITIATOR_MODE		  0x0020
+#define	 FWOPT_TARGET_MODE		  0x0010
+
+	uint16_t ExecThrottle;	/* 04-05 */
+	uint8_t RetryCount;	/* 06 */
+	uint8_t RetryDelay;	/* 07 */
+	uint16_t MaxEthFrPayloadSize;	/* 08-09 */
+	uint16_t AddFwOptions;	/* 0A-0B */
+
+	uint8_t HeartbeatInterval;	/* 0C */
+	uint8_t InstanceNumber; /* 0D */
+	uint16_t RES2;		/* 0E-0F */
+	uint16_t ReqQConsumerIndex;	/* 10-11 */
+	uint16_t ComplQProducerIndex;	/* 12-13 */
+	uint16_t ReqQLen;	/* 14-15 */
+	uint16_t ComplQLen;	/* 16-17 */
+	uint32_t ReqQAddrLo;	/* 18-1B */
+	uint32_t ReqQAddrHi;	/* 1C-1F */
+	uint32_t ComplQAddrLo;	/* 20-23 */
+	uint32_t ComplQAddrHi;	/* 24-27 */
+	uint32_t ShadowRegBufAddrLo;	/* 28-2B */
+	uint32_t ShadowRegBufAddrHi;	/* 2C-2F */
+
+	uint16_t iSCSIOptions;	/* 30-31 */
+
+	uint16_t TCPOptions;	/* 32-33 */
+
+	uint16_t IPOptions;	/* 34-35 */
+
+	uint16_t MaxPDUSize;	/* 36-37 */
+	uint16_t RcvMarkerInt;	/* 38-39 */
+	uint16_t SndMarkerInt;	/* 3A-3B */
+	uint16_t InitMarkerlessInt;	/* 3C-3D */
+	uint16_t FirstBurstSize;	/* 3E-3F */
+	uint16_t DefaultTime2Wait;	/* 40-41 */
+	uint16_t DefaultTime2Retain;	/* 42-43 */
+	uint16_t MaxOutStndngR2T;	/* 44-45 */
+	uint16_t KeepAliveTimeout;	/* 46-47 */
+	uint16_t PortNumber;	/* 48-49 */
+	uint16_t MaxBurstSize;	/* 4A-4B */
+	uint32_t RES4;		/* 4C-4F */
+	uint8_t IPAddr[4];	/* 50-53 */
+	uint8_t RES5[12];	/* 54-5F */
+	uint8_t SubnetMask[4];	/* 60-63 */
+	uint8_t RES6[12];	/* 64-6F */
+	uint8_t GatewayIPAddr[4];	/* 70-73 */
+	uint8_t RES7[12];	/* 74-7F */
+	uint8_t PriDNSIPAddr[4];	/* 80-83 */
+	uint8_t SecDNSIPAddr[4];	/* 84-87 */
+	uint8_t RES8[8];	/* 88-8F */
+	uint8_t Alias[32];	/* 90-AF */
+	uint8_t TargAddr[8];	/* B0-B7 *//* /FIXME: Remove?? */
+	uint8_t CHAPNameSecretsTable[8];	/* B8-BF */
+	uint8_t EthernetMACAddr[6];	/* C0-C5 */
+	uint16_t TargetPortalGroup;	/* C6-C7 */
+	uint8_t SendScale;	/* C8	 */
+	uint8_t RecvScale;	/* C9	 */
+	uint8_t TypeOfService;	/* CA	 */
+	uint8_t Time2Live;	/* CB	 */
+	uint16_t VLANPriority;	/* CC-CD */
+	uint16_t Reserved8;	/* CE-CF */
+	uint8_t SecIPAddr[4];	/* D0-D3 */
+	uint8_t Reserved9[12];	/* D4-DF */
+	uint8_t iSNSIPAddr[4];	/* E0-E3 */
+	uint16_t iSNSServerPortNumber;	/* E4-E5 */
+	uint8_t Reserved10[10]; /* E6-EF */
+	uint8_t SLPDAIPAddr[4]; /* F0-F3 */
+	uint8_t Reserved11[12]; /* F4-FF */
+	uint8_t iSCSINameString[256];	/* 100-1FF */
+};
+
+/*************************************************************************/
+
+struct dev_db_entry {
+	uint8_t options;	/* 00 */
+#define DDB_OPT_DISC_SESSION  0x10
+#define DDB_OPT_TARGET	      0x02 /* device is a target */
+
+	uint8_t control;	/* 01 */
+
+	uint16_t exeThrottle;	/* 02-03 */
+	uint16_t exeCount;	/* 04-05 */
+	uint8_t retryCount;	/* 06	 */
+	uint8_t retryDelay;	/* 07	 */
+	uint16_t iSCSIOptions;	/* 08-09 */
+
+	uint16_t TCPOptions;	/* 0A-0B */
+
+	uint16_t IPOptions;	/* 0C-0D */
+
+	uint16_t maxPDUSize;	/* 0E-0F */
+	uint16_t rcvMarkerInt;	/* 10-11 */
+	uint16_t sndMarkerInt;	/* 12-13 */
+	uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
+	uint16_t firstBurstSize;	/* 16-17 */
+	uint16_t minTime2Wait;	/* 18-19 : RA :default_time2wait */
+	uint16_t maxTime2Retain;	/* 1A-1B */
+	uint16_t maxOutstndngR2T;	/* 1C-1D */
+	uint16_t keepAliveTimeout;	/* 1E-1F */
+	uint8_t ISID[6];	/* 20-25 big-endian, must be converted
+				 * to little-endian */
+	uint16_t TSID;		/* 26-27 */
+	uint16_t portNumber;	/* 28-29 */
+	uint16_t maxBurstSize;	/* 2A-2B */
+	uint16_t taskMngmntTimeout;	/* 2C-2D */
+	uint16_t reserved1;	/* 2E-2F */
+	uint8_t ipAddr[0x10];	/* 30-3F */
+	uint8_t iSCSIAlias[0x20];	/* 40-5F */
+	uint8_t targetAddr[0x20];	/* 60-7F */
+	uint8_t userID[0x20];	/* 80-9F */
+	uint8_t password[0x20]; /* A0-BF */
+	uint8_t iscsiName[0x100];	/* C0-1BF : xxzzy Make this a
+					 * pointer to a string so we
+					 * don't have to reserve soooo
+					 * much RAM */
+	uint16_t ddbLink;	/* 1C0-1C1 */
+	uint16_t CHAPTableIndex; /* 1C2-1C3 */
+	uint16_t TargetPortalGroup; /* 1C4-1C5 */
+	uint16_t reserved2[2];	/* 1C6-1C7 */
+	uint32_t statSN;	/* 1C8-1CB */
+	uint32_t expStatSN;	/* 1CC-1CF */
+	uint16_t reserved3[0x2C]; /* 1D0-1FB */
+	uint16_t ddbValidCookie; /* 1FC-1FD */
+	uint16_t ddbValidSize;	/* 1FE-1FF */
+};
+
+/*************************************************************************/
+
+/* Flash definitions */
+
+#define FLASH_OFFSET_SYS_INFO	0x02000000
+#define FLASH_DEFAULTBLOCKSIZE	0x20000
+#define FLASH_EOF_OFFSET	(FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
+							    * for EOF
+							    * signature */
+
+struct sys_info_phys_addr {
+	uint8_t address[6];	/* 00-05 */
+	uint8_t filler[2];	/* 06-07 */
+};
+
+struct flash_sys_info {
+	uint32_t cookie;	/* 00-03 */
+	uint32_t physAddrCount; /* 04-07 */
+	struct sys_info_phys_addr physAddr[4]; /* 08-27 */
+	uint8_t vendorId[128];	/* 28-A7 */
+	uint8_t productId[128]; /* A8-127 */
+	uint32_t serialNumber;	/* 128-12B */
+
+	/*  PCI Configuration values */
+	uint32_t pciDeviceVendor;	/* 12C-12F */
+	uint32_t pciDeviceId;	/* 130-133 */
+	uint32_t pciSubsysVendor;	/* 134-137 */
+	uint32_t pciSubsysId;	/* 138-13B */
+
+	/*  This validates version 1. */
+	uint32_t crumbs;	/* 13C-13F */
+
+	uint32_t enterpriseNumber;	/* 140-143 */
+
+	uint32_t mtu;		/* 144-147 */
+	uint32_t reserved0;	/* 148-14b */
+	uint32_t crumbs2;	/* 14c-14f */
+	uint8_t acSerialNumber[16];	/* 150-15f */
+	uint32_t crumbs3;	/* 160-16f */
+
+	/* Leave this last in the struct so it is declared invalid if
+	 * any new items are added.
+	 */
+	uint32_t reserved1[39]; /* 170-1ff */
+};	/* 200 */
+
+struct crash_record {
+	uint16_t fw_major_version;	/* 00 - 01 */
+	uint16_t fw_minor_version;	/* 02 - 03 */
+	uint16_t fw_patch_version;	/* 04 - 05 */
+	uint16_t fw_build_version;	/* 06 - 07 */
+
+	uint8_t build_date[16]; /* 08 - 17 */
+	uint8_t build_time[16]; /* 18 - 27 */
+	uint8_t build_user[16]; /* 28 - 37 */
+	uint8_t card_serial_num[16];	/* 38 - 47 */
+
+	uint32_t time_of_crash_in_secs; /* 48 - 4B */
+	uint32_t time_of_crash_in_ms;	/* 4C - 4F */
+
+	uint16_t out_RISC_sd_num_frames;	/* 50 - 51 */
+	uint16_t OAP_sd_num_words;	/* 52 - 53 */
+	uint16_t IAP_sd_num_frames;	/* 54 - 55 */
+	uint16_t in_RISC_sd_num_words;	/* 56 - 57 */
+
+	uint8_t reserved1[28];	/* 58 - 7F */
+
+	uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
+	uint8_t in_RISC_reg_dump[256];	/*180 -27F */
+	uint8_t in_out_RISC_stack_dump[0];	/*280 - ??? */
+};
+
+struct conn_event_log_entry {
+#define MAX_CONN_EVENT_LOG_ENTRIES	100
+	uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
+	uint32_t timestamp_ms;	/* 04 - 07 milliseconds since boot */
+	uint16_t device_index;	/* 08 - 09  */
+	uint16_t fw_conn_state; /* 0A - 0B  */
+	uint8_t event_type;	/* 0C - 0C  */
+	uint8_t error_code;	/* 0D - 0D  */
+	uint16_t error_code_detail;	/* 0E - 0F  */
+	uint8_t num_consecutive_events; /* 10 - 10  */
+	uint8_t rsvd[3];	/* 11 - 13  */
+};
+
+/*************************************************************************
+ *
+ *				IOCB Commands Structures and Definitions
+ *
+ *************************************************************************/
+#define IOCB_MAX_CDB_LEN	    16	/* Bytes in a CBD */
+#define IOCB_MAX_SENSEDATA_LEN	    32	/* Bytes of sense data */
+
+/* IOCB header structure */
+struct qla4_header {
+	uint8_t entryType;
+#define ET_STATUS		 0x03
+#define ET_MARKER		 0x04
+#define ET_CONT_T1		 0x0A
+#define ET_STATUS_CONTINUATION	 0x10
+#define ET_CMND_T3		 0x19
+#define ET_PASSTHRU0		 0x3A
+#define ET_PASSTHRU_STATUS	 0x3C
+
+	uint8_t entryStatus;
+	uint8_t systemDefined;
+	uint8_t entryCount;
+
+	/* SyetemDefined definition */
+};
+
+/* Generic queue entry structure*/
+struct queue_entry {
+	uint8_t data[60];
+	uint32_t signature;
+
+};
+
+/* 64 bit addressing segment counts*/
+
+#define COMMAND_SEG_A64	  1
+#define CONTINUE_SEG_A64  5
+
+/* 64 bit addressing segment definition*/
+
+struct data_seg_a64 {
+	struct {
+		uint32_t addrLow;
+		uint32_t addrHigh;
+
+	} base;
+
+	uint32_t count;
+
+};
+
+/* Command Type 3 entry structure*/
+
+struct command_t3_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connection_id; /* 0A-0B */
+
+	uint8_t control_flags;	/* 0C */
+
+	/* data direction  (bits 5-6) */
+#define CF_WRITE		0x20
+#define CF_READ			0x40
+#define CF_NO_DATA		0x00
+
+	/* task attributes (bits 2-0) */
+#define CF_HEAD_TAG		0x03
+#define CF_ORDERED_TAG		0x02
+#define CF_SIMPLE_TAG		0x01
+
+	/* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
+	 * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
+	 * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
+	 * PROPERLY.
+	 */
+	uint8_t state_flags;	/* 0D */
+	uint8_t cmdRefNum;	/* 0E */
+	uint8_t reserved1;	/* 0F */
+	uint8_t cdb[IOCB_MAX_CDB_LEN];	/* 10-1F */
+	struct scsi_lun lun;	/* FCP LUN (BE). */
+	uint32_t cmdSeqNum;	/* 28-2B */
+	uint16_t timeout;	/* 2C-2D */
+	uint16_t dataSegCnt;	/* 2E-2F */
+	uint32_t ttlByteCnt;	/* 30-33 */
+	struct data_seg_a64 dataseg[COMMAND_SEG_A64];	/* 34-3F */
+
+};
+
+
+/* Continuation Type 1 entry structure*/
+struct continuation_t1_entry {
+	struct qla4_header hdr;
+
+	struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
+
+};
+
+/* Parameterize for 64 or 32 bits */
+#define COMMAND_SEG	COMMAND_SEG_A64
+#define CONTINUE_SEG	CONTINUE_SEG_A64
+
+#define ET_COMMAND	ET_CMND_T3
+#define ET_CONTINUE	ET_CONT_T1
+
+/* Marker entry structure*/
+struct marker_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t system_defined; /* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t modifier;	/* 0A-0B */
+#define MM_LUN_RESET	     0
+
+	uint16_t flags;		/* 0C-0D */
+	uint16_t reserved1;	/* 0E-0F */
+	struct scsi_lun lun;	/* FCP LUN (BE). */
+	uint64_t reserved2;	/* 18-1F */
+	uint64_t reserved3;	/* 20-27 */
+	uint64_t reserved4;	/* 28-2F */
+	uint64_t reserved5;	/* 30-37 */
+	uint64_t reserved6;	/* 38-3F */
+};
+
+/* Status entry structure*/
+struct status_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t handle;	/* 04-07 */
+
+	uint8_t scsiStatus;	/* 08 */
+#define SCSI_CHECK_CONDITION		  0x02
+
+	uint8_t iscsiFlags;	/* 09 */
+#define ISCSI_FLAG_RESIDUAL_UNDER	  0x02
+#define ISCSI_FLAG_RESIDUAL_OVER	  0x04
+
+	uint8_t iscsiResponse;	/* 0A */
+
+	uint8_t completionStatus;	/* 0B */
+#define SCS_COMPLETE			  0x00
+#define SCS_INCOMPLETE			  0x01
+#define SCS_RESET_OCCURRED		  0x04
+#define SCS_ABORTED			  0x05
+#define SCS_TIMEOUT			  0x06
+#define SCS_DATA_OVERRUN		  0x07
+#define SCS_DATA_UNDERRUN		  0x15
+#define SCS_QUEUE_FULL			  0x1C
+#define SCS_DEVICE_UNAVAILABLE		  0x28
+#define SCS_DEVICE_LOGGED_OUT		  0x29
+
+	uint8_t reserved1;	/* 0C */
+
+	/* state_flags MUST be at the same location as state_flags in
+	 * the Command_T3/4_Entry */
+	uint8_t state_flags;	/* 0D */
+
+	uint16_t senseDataByteCnt;	/* 0E-0F */
+	uint32_t residualByteCnt;	/* 10-13 */
+	uint32_t bidiResidualByteCnt;	/* 14-17 */
+	uint32_t expSeqNum;	/* 18-1B */
+	uint32_t maxCmdSeqNum;	/* 1C-1F */
+	uint8_t senseData[IOCB_MAX_SENSEDATA_LEN];	/* 20-3F */
+
+};
+
+struct pdu_entry {
+	uint8_t *Buff;
+	uint32_t BuffLen;
+	uint32_t SendBuffLen;
+	uint32_t RecvBuffLen;
+	struct pdu_entry *Next;
+	dma_addr_t DmaBuff;
+};
+
+struct passthru0 {
+	struct qla4_header hdr;		       /* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connectionID;	/* 0A-0B */
+#define ISNS_DEFAULT_SERVER_CONN_ID	((uint16_t)0x8000)
+
+	uint16_t controlFlags;	/* 0C-0D */
+#define PT_FLAG_ETHERNET_FRAME		0x8000
+#define PT_FLAG_ISNS_PDU		0x8000
+#define PT_FLAG_SEND_BUFFER		0x0200
+#define PT_FLAG_WAIT_4_RESPONSE		0x0100
+
+	uint16_t timeout;	/* 0E-0F */
+#define PT_DEFAULT_TIMEOUT		30 /* seconds */
+
+	struct data_seg_a64 outDataSeg64;	/* 10-1B */
+	uint32_t res1;		/* 1C-1F */
+	struct data_seg_a64 inDataSeg64;	/* 20-2B */
+	uint8_t res2[20];	/* 2C-3F */
+};
+
+struct passthru_status {
+	struct qla4_header hdr;		       /* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connectionID;	/* 0A-0B */
+
+	uint8_t completionStatus;	/* 0C */
+#define PASSTHRU_STATUS_COMPLETE		0x01
+
+	uint8_t residualFlags;	/* 0D */
+
+	uint16_t timeout;	/* 0E-0F */
+	uint16_t portNumber;	/* 10-11 */
+	uint8_t res1[10];	/* 12-1B */
+	uint32_t outResidual;	/* 1C-1F */
+	uint8_t res2[12];	/* 20-2B */
+	uint32_t inResidual;	/* 2C-2F */
+	uint8_t res4[16];	/* 30-3F */
+};
+
+#endif /*  _QLA4X_FW_H */
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_glbl.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_glbl.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_glbl.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_glbl.h	2006-08-17 13:00:03.000000000 -0400
@@ -0,0 +1,80 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA4x_GBL_H
+#define	__QLA4x_GBL_H
+
+int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
+int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
+			       uint8_t renew_ddb_list);
+int qla4xxx_soft_reset(struct scsi_qla_host *ha);
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs);
+
+void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
+void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
+
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
+int qla4xxx_relogin_device(struct scsi_qla_host * ha,
+			   struct ddb_entry * ddb_entry);
+int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
+		      int lun);
+int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t len);
+int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
+int qla4xxx_get_firmware_state(struct scsi_qla_host * ha);
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha);
+
+/* FIXME: Goodness!  this really wants a small struct to hold the
+ * parameters. On x86 the args will get passed on the stack! */
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+			    uint16_t fw_ddb_index,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma,
+			    uint32_t *num_valid_ddb_entries,
+			    uint32_t *next_ddb_index,
+			    uint32_t *fw_ddb_device_state,
+			    uint32_t *conn_err_detail,
+			    uint16_t *tcp_source_port_num,
+			    uint16_t *connection_id);
+
+struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
+				     uint32_t fw_ddb_index);
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+			  dma_addr_t fw_ddb_entry_dma);
+
+void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
+				 struct ddb_entry *ddb_entry);
+u16 rd_nvram_word(struct scsi_qla_host * ha, int offset);
+void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
+struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
+int qla4xxx_add_sess(struct ddb_entry *, int);
+void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
+				   uint16_t fw_ddb_index,
+				   uint16_t connection_id,
+				   uint16_t option);
+int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
+				 uint16_t fw_ddb_index);
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
+int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
+				       uint32_t intr_status);
+int qla4xxx_init_rings(struct scsi_qla_host * ha);
+void qla4xxx_dump_buffer(void *b, uint32_t size);
+struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
+void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
+int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
+				uint32_t fw_ddb_index, uint32_t state);
+
+void qla4xxx_free_pdu(struct scsi_qla_host * ha, struct pdu_entry * pdu);
+
+extern int extended_error_logging;
+extern int ql4xdiscoverywait;
+extern int ql4xdontresethba;
+#endif				/* _QLA4x_GBL_H */
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_init.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_init.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_init.c	2006-08-17 13:00:14.000000000 -0400
@@ -0,0 +1,1355 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+
+/*
+ * QLogic ISP4xxx Hardware Support Function Prototypes.
+ */
+
+static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
+{
+	uint32_t value;
+	uint8_t func_number;
+	unsigned long flags;
+
+	/* Get the function number */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	value = readw(&ha->reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	func_number = (uint8_t) ((value >> 4) & 0x30);
+	switch (value & ISP_CONTROL_FN_MASK) {
+	case ISP_CONTROL_FN0_SCSI:
+		ha->mac_index = 1;
+		break;
+	case ISP_CONTROL_FN1_SCSI:
+		ha->mac_index = 3;
+		break;
+	default:
+		DEBUG2(printk("scsi%ld: %s: Invalid function number, "
+			      "ispControlStatus = 0x%x\n", ha->host_no,
+			      __func__, value));
+		break;
+	}
+	DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
+		      ha->mac_index));
+}
+
+/**
+ * qla4xxx_free_ddb - deallocate ddb	
+ * @ha: pointer to host adapter structure.
+ * @ddb_entry: pointer to device database entry
+ *
+ * This routine deallocates and unlinks the specified ddb_entry from the
+ * adapter's
+ **/
+void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry)
+{
+	/* Remove device entry from list */
+	list_del_init(&ddb_entry->list);
+
+	/* Remove device pointer from index mapping arrays */
+	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
+		(struct ddb_entry *) INVALID_ENTRY;
+	ha->tot_ddbs--;
+
+	/* Free memory and scsi-ml struct for device entry */
+	qla4xxx_destroy_sess(ddb_entry);
+}
+
+/**
+ * qla4xxx_free_ddb_list - deallocate all ddbs
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine deallocates and removes all devices on the sppecified adapter.
+ **/
+void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
+{
+	struct list_head *ptr;
+	struct ddb_entry *ddb_entry;
+
+	while (!list_empty(&ha->ddb_list)) {
+		ptr = ha->ddb_list.next;
+		/* Free memory for device entry and remove */
+		ddb_entry = list_entry(ptr, struct ddb_entry, list);
+		qla4xxx_free_ddb(ha, ddb_entry);
+	}
+}
+
+/**
+ * qla4xxx_init_rings - initialize hw queues
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine initializes the internal queues for the specified adapter.
+ * The QLA4010 requires us to restart the queues at index 0.
+ * The QLA4000 doesn't care, so just default to QLA4010's requirement.
+ **/
+int qla4xxx_init_rings(struct scsi_qla_host *ha)
+{
+	uint16_t i;
+	unsigned long flags = 0;
+
+	/* Initialize request queue. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->request_out = 0;
+	ha->request_in = 0;
+	ha->request_ptr = &ha->request_ring[ha->request_in];
+	ha->req_q_count = REQUEST_QUEUE_DEPTH;
+
+	/* Initialize response queue. */
+	ha->response_in = 0;
+	ha->response_out = 0;
+	ha->response_ptr = &ha->response_ring[ha->response_out];
+
+	/*
+	 * Initialize DMA Shadow registers.  The firmware is really supposed to
+	 * take care of this, but on some uniprocessor systems, the shadow
+	 * registers aren't cleared-- causing the interrupt_handler to think
+	 * there are responses to be processed when there aren't.
+	 */
+	ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
+	ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+	wmb();
+
+	writel(0, &ha->reg->req_q_in);
+	writel(0, &ha->reg->rsp_q_out);
+	readl(&ha->reg->rsp_q_out);
+
+	/* Initialize active array */
+	for (i = 0; i < MAX_SRBS; i++)
+		ha->active_srb_array[i] = NULL;
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_validate_mac_address - validate adapter MAC address(es)
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha)
+{
+	struct flash_sys_info *sys_info;
+	dma_addr_t sys_info_dma;
+	int status = QLA_ERROR;
+
+	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+				      &sys_info_dma, GFP_KERNEL);
+	if (sys_info == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+			      ha->host_no, __func__));
+
+		goto exit_validate_mac_no_free;
+	}
+	memset(sys_info, 0, sizeof(*sys_info));
+
+	/* Get flash sys info */
+	if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
+			      sizeof(*sys_info)) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
+			      "failed\n", ha->host_no, __func__));
+
+		goto exit_validate_mac;
+	}
+
+	/* Save M.A.C. address & serial_number */
+	memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
+	       min(sizeof(ha->my_mac),
+		   sizeof(sys_info->physAddr[0].address)));
+	memcpy(ha->serial_number, &sys_info->acSerialNumber,
+	       min(sizeof(ha->serial_number),
+		   sizeof(sys_info->acSerialNumber)));
+
+	status = QLA_SUCCESS;
+
+ exit_validate_mac:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
+			  sys_info_dma);
+
+ exit_validate_mac_no_free:
+	return status;
+}
+
+/**
+ * qla4xxx_init_local_data - initialize adapter specific local data
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
+{
+	int i;
+
+	/* Initialize passthru PDU list */
+	for (i = 0; i < (MAX_PDU_ENTRIES - 1); i++)
+		ha->pdu_queue[i].Next = &ha->pdu_queue[i + 1];
+	ha->free_pdu_top = &ha->pdu_queue[0];
+	ha->free_pdu_bottom = &ha->pdu_queue[MAX_PDU_ENTRIES - 1];
+	ha->free_pdu_bottom->Next = NULL;
+	ha->pdu_active = 0;
+
+	/* Initilize aen queue */
+	ha->aen_q_count = MAX_AEN_ENTRIES;
+
+	return qla4xxx_get_firmware_status(ha);
+}
+
+static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
+{
+	uint32_t timeout_count;
+	int ready = 0;
+
+	DEBUG2(dev_info(&ha->pdev->dev, "Waiting for Firmware Ready..\n"));
+	for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
+	     timeout_count--) {
+		if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+			qla4xxx_get_dhcp_ip_address(ha);
+
+		/* Get firmware state. */
+		if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
+			DEBUG2(printk("scsi%ld: %s: unable to get firmware "
+				      "state\n", ha->host_no, __func__));
+			break;
+
+		}
+
+		if (ha->firmware_state & FW_STATE_ERROR) {
+			DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
+				      " occurred\n", ha->host_no, __func__));
+			break;
+
+		}
+		if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
+			/*
+			 * The firmware has not yet been issued an Initialize
+			 * Firmware command, so issue it now.
+			 */
+			if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
+				break;
+
+			/* Go back and test for ready state - no wait. */
+			continue;
+		}
+
+		if (ha->firmware_state == FW_STATE_READY) {
+			DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
+			/* The firmware is ready to process SCSI commands. */
+			DEBUG2(dev_info(&ha->pdev->dev,
+					  "scsi%ld: %s: MEDIA TYPE - %s\n",
+					  ha->host_no,
+					  __func__, (ha->addl_fw_state &
+						     FW_ADDSTATE_OPTICAL_MEDIA)
+					  != 0 ? "OPTICAL" : "COPPER"));
+			DEBUG2(dev_info(&ha->pdev->dev,
+					  "scsi%ld: %s: DHCP STATE Enabled "
+					  "%s\n",
+					  ha->host_no, __func__,
+					  (ha->addl_fw_state &
+					   FW_ADDSTATE_DHCP_ENABLED) != 0 ?
+					  "YES" : "NO"));
+			DEBUG2(dev_info(&ha->pdev->dev,
+					  "scsi%ld: %s: LINK %s\n",
+					  ha->host_no, __func__,
+					  (ha->addl_fw_state &
+					   FW_ADDSTATE_LINK_UP) != 0 ?
+					  "UP" : "DOWN"));
+			DEBUG2(dev_info(&ha->pdev->dev,
+					  "scsi%ld: %s: iSNS Service "
+					  "Started %s\n",
+					  ha->host_no, __func__,
+					  (ha->addl_fw_state &
+					   FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+					  "YES" : "NO"));
+
+			ready = 1;
+			break;
+		}
+		DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
+			      "seconds expired= %d\n", ha->host_no, __func__,
+			      ha->firmware_state, ha->addl_fw_state,
+			      timeout_count));
+		msleep(1000);
+	}			/* end of for */
+
+	if (timeout_count <= 0)
+		DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
+			      ha->host_no, __func__));
+
+	if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)  {
+		DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
+			      " grab an IP address from DHCP server\n",
+			      ha->host_no, __func__));
+		ready = 1;
+	}
+
+	return ready;
+}
+
+/**
+ * qla4xxx_init_firmware - initializes the firmware.
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
+{
+	int status = QLA_ERROR;
+
+	dev_info(&ha->pdev->dev, "Initializing firmware..\n");
+	if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
+			      "control block\n", ha->host_no, __func__));
+		return status;
+	}
+	if (!qla4xxx_fw_ready(ha))
+		return status;
+
+	set_bit(AF_ONLINE, &ha->flags);
+	return qla4xxx_get_firmware_status(ha);
+}
+
+static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
+					       uint32_t fw_ddb_index)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	struct ddb_entry *ddb_entry = NULL;
+	int found = 0;
+	uint32_t device_state;
+
+	/* Make sure the dma buffer is valid */
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+					  sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+			      ha->host_no, __func__));
+		return NULL;
+	}
+
+	if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
+				    fw_ddb_entry_dma, NULL, NULL,
+				    &device_state, NULL, NULL, NULL) ==
+	    QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
+			      "fw_ddb_index %d\n", ha->host_no, __func__,
+			      fw_ddb_index));
+		return NULL;
+	}
+
+	/* Allocate DDB if not already allocated. */
+	DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
+		      __func__, fw_ddb_index));
+	list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
+		if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName,
+			   ISCSI_NAME_SIZE) == 0) {
+			found++;
+			break;
+		}
+	}
+
+	if (!found) {
+		DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
+			      "new ddb\n", ha->host_no, __func__,
+			      fw_ddb_index));
+		ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
+	}
+
+	/* if not found allocate new ddb */
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
+			  fw_ddb_entry_dma);
+
+	return ddb_entry;
+}
+
+/**
+ * qla4xxx_update_ddb_entry - update driver's internal ddb
+ * @ha: pointer to host adapter structure.
+ * @ddb_entry: pointer to device database structure to be filled
+ * @fw_ddb_index: index of the ddb entry in fw ddb table
+ *
+ * This routine updates the driver's internal device database entry
+ * with information retrieved from the firmware's device database
+ * entry for the specified device. The ddb_entry->fw_ddb_index field 
+ * must be initialized prior to	calling this routine
+ *
+ **/
+int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
+			     struct ddb_entry *ddb_entry,
+			     uint32_t fw_ddb_index)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	int status = QLA_ERROR;
+
+	if (ddb_entry == NULL) {
+		DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
+			      __func__));
+		goto exit_update_ddb;
+	}
+
+	/* Make sure the dma buffer is valid */
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+					  sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+			      ha->host_no, __func__));
+
+		goto exit_update_ddb;
+	}
+
+	if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
+				    fw_ddb_entry_dma, NULL, NULL,
+				    &ddb_entry->fw_ddb_device_state, NULL,
+				    &ddb_entry->tcp_source_port_num,
+				    &ddb_entry->connection_id) ==
+	    QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
+			      "fw_ddb_index %d\n", ha->host_no, __func__,
+			      fw_ddb_index));
+
+		goto exit_update_ddb;
+	}
+
+	status = QLA_SUCCESS;
+	ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID);
+	ddb_entry->task_mgmt_timeout =
+		le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
+	ddb_entry->CmdSn = 0;
+	ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle);
+	ddb_entry->default_relogin_timeout =
+		le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
+	ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait);
+
+	/* Update index in case it changed */
+	ddb_entry->fw_ddb_index = fw_ddb_index;
+	ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
+
+	ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber);
+	ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup);
+	memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0],
+	       min(sizeof(ddb_entry->iscsi_name),
+		   sizeof(fw_ddb_entry->iscsiName)));
+	memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0],
+	       min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr)));
+
+	DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
+		      ha->host_no, __func__, fw_ddb_index,
+		      ddb_entry->fw_ddb_device_state, status));
+
+ exit_update_ddb:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+
+	return status;
+}
+
+/**
+ * qla4xxx_alloc_ddb - allocate device database entry
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ *
+ * This routine allocates a ddb_entry, ititializes some values, and
+ * inserts it into the ddb list.
+ **/
+struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
+				     uint32_t fw_ddb_index)
+{
+	struct ddb_entry *ddb_entry;
+
+	DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no,
+		      __func__, fw_ddb_index));
+
+	ddb_entry = qla4xxx_alloc_sess(ha);
+	if (ddb_entry == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
+			      "to add fw_ddb_index [%d]\n",
+			      ha->host_no, __func__, fw_ddb_index));
+		return ddb_entry;
+	}
+
+	ddb_entry->fw_ddb_index = fw_ddb_index;
+	atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
+	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+	atomic_set(&ddb_entry->relogin_timer, 0);
+	atomic_set(&ddb_entry->relogin_retry_count, 0);
+	atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+	list_add_tail(&ddb_entry->list, &ha->ddb_list);
+	ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
+	ha->tot_ddbs++;
+
+	return ddb_entry;
+}
+
+/**
+ * qla4xxx_configure_ddbs - builds driver ddb list
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine searches for all valid firmware ddb entries and builds
+ * an internal ddb list. Ddbs that are considered valid are those with 
+ * a device state of SESSION_ACTIVE.
+ **/
+static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
+{
+	int status = QLA_SUCCESS;
+	uint32_t fw_ddb_index = 0;
+	uint32_t next_fw_ddb_index = 0;
+	uint32_t ddb_state;
+	uint32_t conn_err, err_code;
+	struct ddb_entry *ddb_entry;
+
+	dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
+	for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
+	     fw_ddb_index = next_fw_ddb_index) {
+		/* First, let's see if a device exists here */
+		if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
+					    &next_fw_ddb_index, &ddb_state,
+					    &conn_err, NULL, NULL) ==
+		    QLA_ERROR) {
+			DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
+				      "fw_ddb_index %d failed", ha->host_no,
+				      __func__, fw_ddb_index));
+			return QLA_ERROR;
+		}
+
+		DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
+			      "next_fw_ddb_index=%d.\n", ha->host_no, __func__,
+			      fw_ddb_index, ddb_state, next_fw_ddb_index));
+
+		/* Issue relogin, if necessary. */
+		if (ddb_state == DDB_DS_SESSION_FAILED ||
+		    ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) {
+			/* Try and login to device */
+			DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
+				      ha->host_no, __func__, fw_ddb_index));
+			err_code = ((conn_err & 0x00ff0000) >> 16);
+			if (err_code == 0x1c || err_code == 0x06) {
+				DEBUG2(printk("scsi%ld: %s send target "
+					      "completed "
+					      "or access denied failure\n",
+					      ha->host_no, __func__));
+			} else
+				qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
+		}
+
+		if (ddb_state != DDB_DS_SESSION_ACTIVE) 
+			goto next_one;
+		/*
+		 * if fw_ddb with session active state found,
+		 * add to ddb_list
+		 */
+		DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n",
+			      ha->host_no, __func__, fw_ddb_index));
+
+		/* Add DDB to internal our ddb list. */
+		ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+		if (ddb_entry == NULL) {
+			DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
+				      "for device at fw_ddb_index %d\n",
+				      ha->host_no, __func__, fw_ddb_index));
+			return QLA_ERROR;
+		}
+		/* Fill in the device structure */
+		if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
+		    QLA_ERROR) {
+			ha->fw_ddb_index_map[fw_ddb_index] =
+				(struct ddb_entry *)INVALID_ENTRY;
+
+
+			DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
+				      "for fw_ddb_index %d.\n",
+				      ha->host_no, __func__, fw_ddb_index));
+			return QLA_ERROR;
+		}
+
+next_one:
+		/* We know we've reached the last device when
+		 * next_fw_ddb_index is 0 */
+		if (next_fw_ddb_index == 0)
+			break;
+	}
+
+	dev_info(&ha->pdev->dev, "DDB list done..\n");
+
+	return status;
+}
+
+struct qla4_relog_scan {
+	int halt_wait;
+	uint32_t conn_err;
+	uint32_t err_code;
+	uint32_t fw_ddb_index;
+	uint32_t next_fw_ddb_index;
+	uint32_t fw_ddb_device_state;
+};
+
+static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
+{
+	struct ddb_entry *ddb_entry;
+
+	/*
+	 * Don't want to do a relogin if connection
+	 * error is 0x1c.
+	 */
+	rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
+	if (rs->err_code == 0x1c || rs->err_code == 0x06) {
+		DEBUG2(printk(
+			       "scsi%ld: %s send target"
+			       " completed or "
+			       "access denied failure\n",
+			       ha->host_no, __func__));
+	} else {
+		/* We either have a device that is in
+		 * the process of relogging in or a
+		 * device that is waiting to be
+		 * relogged in */
+		rs->halt_wait = 0;
+
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+							   rs->fw_ddb_index);
+		if (ddb_entry == NULL)
+			return QLA_ERROR;
+
+		if (ddb_entry->dev_scan_wait_to_start_relogin != 0
+		    && time_after_eq(jiffies,
+				     ddb_entry->
+				     dev_scan_wait_to_start_relogin))
+		{
+			ddb_entry->dev_scan_wait_to_start_relogin = 0;
+			qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0);
+		}
+	}
+	return QLA_SUCCESS;
+}
+
+static int qla4_scan_for_relogin(struct scsi_qla_host *ha,
+				 struct qla4_relog_scan *rs)
+{
+	int error;
+
+	/* scan for relogins
+	 * ----------------- */
+	for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES;
+	     rs->fw_ddb_index = rs->next_fw_ddb_index) {
+		if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0,
+					    NULL, &rs->next_fw_ddb_index,
+					    &rs->fw_ddb_device_state,
+					    &rs->conn_err, NULL, NULL)
+		    == QLA_ERROR)
+			return QLA_ERROR;
+
+		if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS)
+			rs->halt_wait = 0;
+
+		if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED ||
+		    rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) {
+			error = qla4_test_rdy(ha, rs);
+			if (error)
+				return error;
+		}
+
+		/* We know we've reached the last device when
+		 * next_fw_ddb_index is 0 */
+		if (rs->next_fw_ddb_index == 0)
+			break;
+	}
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_devices_ready - wait for target devices to be logged in
+ * @ha: pointer to adapter structure
+ *
+ * This routine waits up to ql4xdiscoverywait seconds
+ * F/W database during driver load time.
+ **/
+static int qla4xxx_devices_ready(struct scsi_qla_host *ha)
+{
+	int error;
+	unsigned long discovery_wtime;
+	struct qla4_relog_scan rs;
+
+	discovery_wtime = jiffies + (ql4xdiscoverywait * HZ);
+
+	DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait));
+	do {
+		/* poll for AEN. */
+		qla4xxx_get_firmware_state(ha);
+		if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) {
+			/* Set time-between-relogin timer */
+			qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS);
+		}
+
+		/* if no relogins active or needed, halt discvery wait */
+		rs.halt_wait = 1;
+
+		error = qla4_scan_for_relogin(ha, &rs);
+
+		if (rs.halt_wait) {
+			DEBUG2(printk("scsi%ld: %s: Delay halted.  Devices "
+				      "Ready.\n", ha->host_no, __func__));
+			return QLA_SUCCESS;
+		}
+
+		msleep(2000);
+	} while (!time_after_eq(jiffies, discovery_wtime));
+
+	DEBUG3(qla4xxx_get_conn_event_log(ha));
+
+	return QLA_SUCCESS;
+}
+
+static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
+{
+	unsigned long wtime;
+
+	/* Flush the 0x8014 AEN from the firmware as a result of
+	 * Auto connect. We are basically doing get_firmware_ddb()
+	 * to determine whether we need to log back in or not.
+	 *  Trying to do a set ddb before we have processed 0x8014
+	 *  will result in another set_ddb() for the same ddb. In other
+	 *  words there will be stale entries in the aen_q.
+	 */
+	wtime = jiffies + (2 * HZ);
+	do {
+		if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
+			if (ha->firmware_state & (BIT_2 | BIT_0))
+				return;
+
+		if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+		msleep(1000);
+	} while (!time_after_eq(jiffies, wtime));
+
+}
+
+static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
+{
+	uint16_t fw_ddb_index;
+	int status = QLA_SUCCESS;
+
+	/* free the ddb list if is not empty */
+	if (!list_empty(&ha->ddb_list))
+		qla4xxx_free_ddb_list(ha);
+
+	for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
+		ha->fw_ddb_index_map[fw_ddb_index] =
+			(struct ddb_entry *)INVALID_ENTRY;
+
+	ha->tot_ddbs = 0;
+
+	qla4xxx_flush_AENS(ha);
+
+	/*
+	 * First perform device discovery for active
+	 * fw ddb indexes and build
+	 * ddb list.
+	 */
+	if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR)
+		return status;
+
+	/* Wait for an AEN */
+	qla4xxx_devices_ready(ha);
+
+	/*
+	 * Targets can come online after the inital discovery, so processing
+	 * the aens here will catch them.
+	 */
+	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+
+	return status;
+}
+
+/**
+ * qla4xxx_update_ddb_list - update the driver ddb list
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine obtains device information from the F/W database after
+ * firmware or adapter resets.  The device table is preserved.
+ **/
+int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
+{
+	int status = QLA_SUCCESS;
+	struct ddb_entry *ddb_entry, *detemp;
+
+	/* Update the device information for all devices. */
+	list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) {
+		qla4xxx_update_ddb_entry(ha, ddb_entry,
+					 ddb_entry->fw_ddb_index);
+		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+			atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+			DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
+				       "ONLINE\n", ha->host_no, __func__,
+				       ddb_entry->fw_ddb_index));
+		} else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+			qla4xxx_mark_device_missing(ha, ddb_entry);
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_relogin_device - re-establish session
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ *
+ * This routine does a session relogin with the specified device.
+ * The ddb entry must be assigned prior to making this call.
+ **/
+int qla4xxx_relogin_device(struct scsi_qla_host *ha,
+			   struct ddb_entry * ddb_entry)
+{
+	uint16_t relogin_timer;
+
+	relogin_timer = max(ddb_entry->default_relogin_timeout,
+			    (uint16_t)RELOGIN_TOV);
+	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+	DEBUG2(printk("scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+		      ddb_entry->fw_ddb_index, relogin_timer));
+
+	qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
+ * @ha: Pointer to host adapter structure.
+ *
+ **/
+static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+	uint16_t topcat;
+
+	if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
+					    isp4010.topcat));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
+		set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
+	else
+		clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
+	ql4xxx_unlock_nvram(ha);
+	return QLA_SUCCESS;
+}
+
+
+static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+	union external_hw_config_reg extHwConfig;
+
+	DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
+		      __func__));
+	if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
+		return (QLA_ERROR);
+	if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
+		ql4xxx_unlock_flash(ha);
+		return (QLA_ERROR);
+	}
+
+	/* Get EEPRom Parameters from NVRAM and validate */
+	dev_info(&ha->pdev->dev, "Configuring NVRAM ...\n");
+	if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		extHwConfig.Asuint32_t =
+			rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	} else {
+		/*
+		 * QLogic adapters should always have a valid NVRAM.
+		 * If not valid, do not load.
+		 */
+		dev_warn(&ha->pdev->dev,
+			   "scsi%ld: %s: EEProm checksum invalid.  "
+			   "Please update your EEPROM\n", ha->host_no,
+			   __func__);
+
+		/* set defaults */
+		if (is_qla4010(ha))
+			extHwConfig.Asuint32_t = 0x1912;
+		else if (is_qla4022(ha))
+			extHwConfig.Asuint32_t = 0x0023;
+	}
+	DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
+		     ha->host_no, __func__, extHwConfig.Asuint32_t));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
+	readl(isp_ext_hw_conf(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql4xxx_unlock_nvram(ha);
+	ql4xxx_unlock_flash(ha);
+
+	return (QLA_SUCCESS);
+}
+
+static void qla4x00_pci_config(struct scsi_qla_host *ha)
+{
+	uint16_t w, mwi;
+
+	dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
+
+	pci_set_master(ha->pdev);
+	mwi = 0;
+	if (pci_set_mwi(ha->pdev))
+		mwi = PCI_COMMAND_INVALIDATE;
+	/*
+	 * We want to respect framework's setting of PCI configuration space
+	 * command register and also want to make sure that all bits of
+	 * interest to us are properly set in command register.
+	 */
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+}
+
+static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
+{
+	int status = QLA_ERROR;
+	uint32_t max_wait_time;
+	unsigned long flags;
+	uint32_t mbox_status;
+
+	dev_info(&ha->pdev->dev, "Starting firmware ...\n");
+
+	/*
+	 * Start firmware from flash ROM
+	 *
+	 * WORKAROUND: Stuff a non-constant value that the firmware can
+	 * use as a seed for a random number generator in MB7 prior to
+	 * setting BOOT_ENABLE.	 Fixes problem where the TCP
+	 * connections use the same TCP ports after each reboot,
+	 * causing some connections to not get re-established.
+	 */
+	DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
+		     ha->host_no, __func__));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel(jiffies, &ha->reg->mailbox[7]);
+	if (is_qla4022(ha))
+		writel(set_rmask(NVR_WRITE_ENABLE),
+		       &ha->reg->u1.isp4022.nvram);
+
+	writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait for firmware to come UP. */
+	max_wait_time = FIRMWARE_UP_TOV * 4;
+	do {
+		uint32_t ctrl_status;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		mbox_status = readw(&ha->reg->mailbox[0]);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
+			break;
+		if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
+			break;
+
+		DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
+			      "complete... ctrl_sts=0x%x, remaining=%d\n",
+			      ha->host_no, __func__, ctrl_status,
+			      max_wait_time));
+
+		msleep(250);
+	} while ((max_wait_time--));
+
+	if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
+		DEBUG(printk("scsi%ld: %s: Firmware has started\n",
+			     ha->host_no, __func__));
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+		       &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		status = QLA_SUCCESS;
+	} else {
+		printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
+		       "-  mbox status 0x%x\n", ha->host_no, __func__,
+		       mbox_status);
+		status = QLA_ERROR;
+	}
+	return status;
+}
+
+static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
+{
+#define QL4_LOCK_DRVR_WAIT	300
+#define QL4_LOCK_DRVR_SLEEP	100
+
+	int drvr_wait = QL4_LOCK_DRVR_WAIT;
+	while (drvr_wait) {
+		if (ql4xxx_lock_drvr(a) == 0) {
+			msleep(QL4_LOCK_DRVR_SLEEP);
+			if (drvr_wait) {
+				DEBUG2(printk("scsi%ld: %s: Waiting for "
+					      "Global Init Semaphore...n",
+					      a->host_no,
+					      __func__));
+			}
+			drvr_wait -= QL4_LOCK_DRVR_SLEEP;
+		} else {
+			DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
+				      "acquired.n", a->host_no, __func__));
+			return QLA_SUCCESS;
+		}
+	}
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_start_firmware - starts qla4xxx firmware
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine performs the neccessary steps to start the firmware for
+ * the QLA4010 adapter.
+ **/
+static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
+{
+	unsigned long flags = 0;
+	uint32_t mbox_status;
+	int status = QLA_ERROR;
+	int soft_reset = 1;
+	int config_chip = 0;
+
+	if (is_qla4010(ha)){
+		if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
+			return QLA_ERROR;
+	}
+
+	if (is_qla4022(ha))
+		ql4xxx_set_mac_number(ha);
+
+	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	DEBUG2(printk("scsi%ld: %s: port_ctrl	= 0x%08X\n", ha->host_no,
+		      __func__, readw(isp_port_ctrl(ha))));
+	DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
+		     __func__, readw(isp_port_status(ha))));
+
+	/* Is Hardware already initialized? */
+	if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
+		DEBUG(printk("scsi%ld: %s: Hardware has already been "
+			     "initialized\n", ha->host_no, __func__));
+
+		/* Receive firmware boot acknowledgement */
+		mbox_status = readw(&ha->reg->mailbox[0]);
+
+		DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
+			      "0x%x\n", ha->host_no, __func__, mbox_status));
+
+		/* Is firmware already booted? */
+		if (mbox_status == 0) {
+			/* F/W not running, must be config by net driver */
+			config_chip = 1;
+			soft_reset = 0;
+		} else {
+			writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
+				DEBUG2(printk("scsi%ld: %s: Get firmware "
+					      "state -- state = 0x%x\n",
+					      ha->host_no,
+					      __func__, ha->firmware_state));
+				/* F/W is running */
+				if (ha->firmware_state &
+				    FW_STATE_CONFIG_WAIT) {
+					DEBUG2(printk("scsi%ld: %s: Firmware "
+						      "in known state -- "
+						      "config and "
+						      "boot, state = 0x%x\n",
+						      ha->host_no, __func__,
+						      ha->firmware_state));
+					config_chip = 1;
+					soft_reset = 0;
+				}
+			} else {
+				DEBUG2(printk("scsi%ld: %s: Firmware in "
+					      "unknown state -- resetting,"
+					      " state = "
+					      "0x%x\n", ha->host_no, __func__,
+					      ha->firmware_state));
+			}
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+		}
+	} else {
+		DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
+			     "started - resetting\n", ha->host_no, __func__));
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
+		     ha->host_no, __func__, soft_reset, config_chip));
+	if (soft_reset) {
+		DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
+			     __func__));
+		status = qla4xxx_soft_reset(ha);
+		if (status == QLA_ERROR) {
+			DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
+				     ha->host_no, __func__));
+			ql4xxx_unlock_drvr(ha);
+			return QLA_ERROR;
+		}
+		config_chip = 1;
+
+		/* Reset clears the semaphore, so aquire again */
+		if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+			return QLA_ERROR;
+	}
+
+	if (config_chip) {
+		if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
+			status = qla4xxx_start_firmware_from_flash(ha);
+	}
+
+	ql4xxx_unlock_drvr(ha);
+	if (status == QLA_SUCCESS) {
+		qla4xxx_get_fw_version(ha);
+		if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
+			qla4xxx_get_crash_record(ha);
+	} else {
+		DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
+			     ha->host_no, __func__));
+	}
+	return status;
+}
+
+
+/**
+ * qla4xxx_initialize_adapter - initiailizes hba
+ * @ha: Pointer to host adapter structure.
+ * @renew_ddb_list: Indicates what to do with the adapter's ddb list
+ *	after adapter recovery has completed.
+ *	0=preserve ddb list, 1=destroy and rebuild ddb list
+ * 
+ * This routine parforms all of the steps necessary to initialize the adapter.
+ *
+ **/
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
+			       uint8_t renew_ddb_list)
+{
+	int status = QLA_ERROR;
+	int8_t ip_address[IP_ADDR_LEN] = {0} ;
+
+	ha->eeprom_cmd_data = 0;
+
+	qla4x00_pci_config(ha);
+
+	qla4xxx_disable_intrs(ha);
+
+	/* Initialize the Host adapter request/response queues and firmware */
+	if (qla4xxx_start_firmware(ha) == QLA_ERROR)
+		return status;
+
+	if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
+		return status;
+
+	if (qla4xxx_init_local_data(ha) == QLA_ERROR)
+		return status;
+
+	status = qla4xxx_init_firmware(ha);
+	if (status == QLA_ERROR)
+		return status;
+
+	/*
+	 * FW is waiting to get an IP address from DHCP server: Skip building
+	 * the ddb_list and wait for DHCP lease acquired aen to come in
+	 * followed by 0x8014 aen" to trigger the tgt discovery process.
+	 */
+	if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
+		return status;
+
+	/* Skip device discovery if ip and subnet is zero */
+	if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
+	    memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
+		return status;
+
+	if (renew_ddb_list == PRESERVE_DDB_LIST) {
+		/*
+		 * We want to preserve lun states (i.e. suspended, etc.)
+		 * for recovery initiated by the driver.  So just update
+		 * the device states for the existing ddb_list.
+		 */
+		qla4xxx_reinitialize_ddb_list(ha);
+	} else if (renew_ddb_list == REBUILD_DDB_LIST) {
+		/*
+		 * We want to build the ddb_list from scratch during
+		 * driver initialization and recovery initiated by the
+		 * INT_HBA_RESET IOCTL.
+		 */
+		status = qla4xxx_initialize_ddb_list(ha);
+		if (status == QLA_ERROR) {
+			DEBUG2(printk("%s(%ld) Error occurred during build"
+				      "ddb list\n", __func__, ha->host_no));
+			goto exit_init_hba;
+		}
+
+	}
+	if (!ha->tot_ddbs) {
+		DEBUG2(printk("scsi%ld: Failed to initialize devices or none "
+			      "present in Firmware device database\n",
+			      ha->host_no));
+	}
+
+ exit_init_hba:
+	return status;
+
+}
+
+/**
+ * qla4xxx_add_device_dynamically - ddb addition due to an AEN
+ * @ha:  Pointer to host adapter structure.
+ * @fw_ddb_index:  Firmware's device database index
+ *
+ * This routine processes adds a device as a result of an 8014h AEN.
+ **/
+static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
+					   uint32_t fw_ddb_index)
+{
+	struct ddb_entry * ddb_entry;
+
+	/* First allocate a device structure */
+	ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+	if (ddb_entry == NULL) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: Unable to allocate memory to add "
+			      "fw_ddb_index %d\n", ha->host_no, fw_ddb_index));
+		return;
+	}
+
+	if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
+				    QLA_ERROR) {
+		ha->fw_ddb_index_map[fw_ddb_index] =
+					(struct ddb_entry *)INVALID_ENTRY;
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: failed to add new device at index "
+			      "[%d]\n Unable to retrieve fw ddb entry\n",
+			      ha->host_no, fw_ddb_index));
+		qla4xxx_free_ddb(ha, ddb_entry);
+		return;
+	}
+
+	if (qla4xxx_add_sess(ddb_entry, 0)) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: failed to add new device at index "
+			      "[%d]\n Unable to add connection and session\n",
+			      ha->host_no, fw_ddb_index));
+		qla4xxx_free_ddb(ha, ddb_entry);
+	}
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+				uint32_t fw_ddb_index, uint32_t state)
+{
+	struct ddb_entry * ddb_entry;
+	uint32_t old_fw_ddb_device_state;
+
+	/* check for out of range index */
+	if (fw_ddb_index >= MAX_DDB_ENTRIES)
+		return QLA_ERROR;
+
+	/* Get the corresponging ddb entry */
+	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+	/* Device does not currently exist in our database. */
+	if (ddb_entry == NULL) {
+		if (state == DDB_DS_SESSION_ACTIVE)
+			qla4xxx_add_device_dynamically(ha, fw_ddb_index);
+		return QLA_SUCCESS;
+	}
+
+	/* Device already exists in our database. */
+	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+	DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for "
+		      "index [%d]\n", ha->host_no, __func__,
+		      ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+	if (old_fw_ddb_device_state == state &&
+	    state == DDB_DS_SESSION_ACTIVE) {
+		/* Do nothing, state not changed. */
+		return QLA_SUCCESS;
+	}
+
+	ddb_entry->fw_ddb_device_state = state;
+	/* Device is back online. */
+	if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+		atomic_set(&ddb_entry->port_down_timer,
+			   ha->port_down_retry_count);
+		atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
+		atomic_set(&ddb_entry->relogin_retry_count, 0);
+		atomic_set(&ddb_entry->relogin_timer, 0);
+		clear_bit(DF_RELOGIN, &ddb_entry->flags);
+		clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
+		iscsi_if_create_session_done(ddb_entry->conn);
+		/*
+		 * Change the lun state to READY in case the lun TIMEOUT before
+		 * the device came back.
+		 */
+	} else {
+		/* Device went away, try to relogin. */
+		/* Mark device missing */
+		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+			qla4xxx_mark_device_missing(ha, ddb_entry);
+		/*
+		 * Relogin if device state changed to a not active state.
+		 * However, do not relogin if this aen is a result of an IOCTL
+		 * logout (DF_NO_RELOGIN) or if this is a discovered device.
+		 */
+		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
+		    !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+		    !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
+		    !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
+			/*
+			 * This triggers a relogin.  After the relogin_timer
+			 * expires, the relogin gets scheduled.	 We must wait a
+			 * minimum amount of time since receiving an 0x8014 AEN
+			 * with failed device_state or a logout response before
+			 * we can issue another relogin.
+			 */
+			/* Firmware padds this timeout: (time2wait +1).
+			 * Driver retry to login should be longer than F/W.
+			 * Otherwise F/W will fail
+			 * set_ddb() mbx cmd with 0x4005 since it still
+			 * counting down its time2wait.
+			 */
+			atomic_set(&ddb_entry->relogin_timer, 0);
+			atomic_set(&ddb_entry->retry_relogin_timer,
+				   ddb_entry->default_time2wait + 4);
+		}
+	}
+
+	return QLA_SUCCESS;
+}
+
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_inline.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_inline.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_inline.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_inline.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,84 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ *
+ * qla4xxx_lookup_ddb_by_fw_index
+ *	This routine locates a device handle given the firmware device
+ *	database index.	 If device doesn't exist, returns NULL.
+ *
+ * Input:
+ *	ha - Pointer to host adapter structure.
+ *	fw_ddb_index - Firmware's device database index
+ *
+ * Returns:
+ *	Pointer to the corresponding internal device database structure
+ */
+static inline struct ddb_entry *
+qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
+{
+	struct ddb_entry *ddb_entry = NULL;
+
+	if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
+	    (ha->fw_ddb_index_map[fw_ddb_index] !=
+		(struct ddb_entry *) INVALID_ENTRY)) {
+		ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
+	}
+
+	DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n",
+	    ha->host_no, __func__, fw_ddb_index, ddb_entry));
+
+	return ddb_entry;
+}
+
+static inline void
+__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+	if (is_qla4022(ha)) {
+		writel(set_rmask(IMR_SCSI_INTR_ENABLE),
+		       &ha->reg->u1.isp4022.intr_mask);
+		readl(&ha->reg->u1.isp4022.intr_mask);
+	} else {
+		writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+	if (is_qla4022(ha)) {
+		writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
+		       &ha->reg->u1.isp4022.intr_mask);
+		readl(&ha->reg->u1.isp4022.intr_mask);
+	} else {
+		writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	clear_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	__qla4xxx_enable_intrs(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline void
+qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	__qla4xxx_disable_intrs(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_iocb.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_iocb.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_iocb.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_iocb.c	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,516 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+
+#include <scsi/scsi_tcq.h>
+
+/**
+ * qla4xxx_get_req_pkt - returns a valid entry in request queue.
+ * @ha: Pointer to host adapter structure.
+ * @queue_entry: Pointer to pointer to queue entry structure
+ *
+ * This routine performs the following tasks:
+ *	- returns the current request_in pointer (if queue not full)
+ *	- advances the request_in pointer
+ *	- checks for queue full
+ **/
+int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
+			struct queue_entry **queue_entry)
+{
+	uint16_t request_in;
+	uint8_t status = QLA_SUCCESS;
+
+	*queue_entry = ha->request_ptr;
+
+	/* get the latest request_in and request_out index */
+	request_in = ha->request_in;
+	ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
+
+	/* Advance request queue pointer and check for queue full */
+	if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+		request_in = 0;
+		ha->request_ptr = ha->request_ring;
+	} else {
+		request_in++;
+		ha->request_ptr++;
+	}
+
+	/* request queue is full, try again later */
+	if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
+		/* restore request pointer */
+		ha->request_ptr = *queue_entry;
+		status = QLA_ERROR;
+	} else {
+		ha->request_in = request_in;
+		memset(*queue_entry, 0, sizeof(**queue_entry));
+	}
+
+	return status;
+}
+
+/**
+ * qla4xxx_send_marker_iocb - issues marker iocb to HBA
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: SCSI LUN
+ * @marker_type: marker identifier
+ *
+ * This routine issues a marker IOCB.
+ **/
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+			     struct ddb_entry *ddb_entry, int lun)
+{
+	struct marker_entry *marker_entry;
+	unsigned long flags = 0;
+	uint8_t status = QLA_SUCCESS;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Get pointer to the queue entry for the marker */
+	if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
+	    QLA_SUCCESS) {
+		status = QLA_ERROR;
+		goto exit_send_marker;
+	}
+
+	/* Put the marker in the request queue */
+	marker_entry->hdr.entryType = ET_MARKER;
+	marker_entry->hdr.entryCount = 1;
+	marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+	marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
+	int_to_scsilun(lun, &marker_entry->lun);
+	wmb();
+
+	/* Tell ISP it's got a new I/O request */
+	writel(ha->request_in, &ha->reg->req_q_in);
+	readl(&ha->reg->req_q_in);
+
+exit_send_marker:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return status;
+}
+
+struct pdu_entry * qla4xxx_get_pdu(struct scsi_qla_host * ha, uint32_t length)
+{
+	struct pdu_entry *pdu;
+	struct pdu_entry *free_pdu_top;
+	struct pdu_entry *free_pdu_bottom;
+	uint16_t pdu_active;
+
+	if (ha->free_pdu_top == NULL)
+		return NULL;
+
+	/* Save current state */
+	free_pdu_top = ha->free_pdu_top;
+	free_pdu_bottom = ha->free_pdu_bottom;
+	pdu_active = ha->pdu_active + 1;
+
+	/* get next available pdu */
+	pdu = free_pdu_top;
+	free_pdu_top = pdu->Next;
+	if (free_pdu_top == NULL)
+		free_pdu_bottom = NULL;
+
+	/* round up to nearest page */
+	length = (length + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+
+	/* Allocate pdu buffer PDU */
+	pdu->Buff = dma_alloc_coherent(&ha->pdev->dev, length, &pdu->DmaBuff,
+				       GFP_KERNEL);
+	if (pdu->Buff == NULL)
+		return NULL;
+
+	memset(pdu->Buff, 0, length);
+
+	/* Fill in remainder of PDU */
+	pdu->BuffLen = length;
+	pdu->SendBuffLen = 0;
+	pdu->RecvBuffLen = 0;
+	pdu->Next = NULL;
+	ha->free_pdu_top = free_pdu_top;
+	ha->free_pdu_bottom = free_pdu_bottom;
+	ha->pdu_active = pdu_active;
+	return pdu;
+}
+
+void qla4xxx_free_pdu(struct scsi_qla_host * ha, struct pdu_entry * pdu)
+{
+	if (ha->free_pdu_bottom == NULL) {
+		ha->free_pdu_top = pdu;
+		ha->free_pdu_bottom = pdu;
+	} else {
+		ha->free_pdu_bottom->Next = pdu;
+		ha->free_pdu_bottom = pdu;
+	}
+	dma_free_coherent(&ha->pdev->dev, pdu->BuffLen, pdu->Buff,
+			  pdu->DmaBuff);
+	ha->pdu_active--;
+
+	/* Clear PDU */
+	pdu->Buff = NULL;
+	pdu->BuffLen = 0;
+	pdu->SendBuffLen = 0;
+	pdu->RecvBuffLen = 0;
+	pdu->Next = NULL;
+	pdu->DmaBuff = 0;
+}
+
+/**
+ * qla4xxx_send_passthru0_iocb - issues pass-thru iocb to HBA
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: firmware ddb index
+ * @connection_id: firmware connection id
+ * @pdu_dma_data: dma base address of pdu
+ * @send_len: send length
+ * @recv_len: receive length
+ * @control_flags: iocb control flags
+ * @handle: iocb handle
+ *
+ * This routine issues a passthru0 IOCB.
+ * hardware_lock acquired upon entry, interrupt context
+ **/
+int qla4_spt0_iocb(struct scsi_qla_host * ha,
+		   uint16_t fw_ddb_index,
+		   uint16_t connection_id,
+		   dma_addr_t pdu_dma_data, uint32_t send_len,
+		   uint32_t recv_len, uint16_t control_flags,
+		   uint32_t handle)
+{
+	struct passthru0 *passthru_entry;
+	uint8_t status = QLA_SUCCESS;
+
+	/* Get pointer to the queue entry for the marker */
+	if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &passthru_entry) !=
+	    QLA_SUCCESS) {
+		status = QLA_ERROR;
+		goto exit_send_pt0;
+	}
+
+	/* Fill in the request queue */
+	passthru_entry->hdr.entryType = ET_PASSTHRU0;
+	passthru_entry->hdr.entryCount = 1;
+	passthru_entry->handle = cpu_to_le32(handle);
+	passthru_entry->target = cpu_to_le16(fw_ddb_index);
+	passthru_entry->connectionID = cpu_to_le16(connection_id);
+	passthru_entry->timeout = __constant_cpu_to_le16(PT_DEFAULT_TIMEOUT);
+	if (send_len) {
+		control_flags |= PT_FLAG_SEND_BUFFER;
+		passthru_entry->outDataSeg64.base.addrHigh =
+			cpu_to_le32(MSDW(pdu_dma_data));
+		passthru_entry->outDataSeg64.base.addrLow =
+			cpu_to_le32(LSDW(pdu_dma_data));
+		passthru_entry->outDataSeg64.count = cpu_to_le32(send_len);
+	}
+	if (recv_len) {
+		passthru_entry->inDataSeg64.base.addrHigh =
+			cpu_to_le32(MSDW(pdu_dma_data));
+		passthru_entry->inDataSeg64.base.addrLow =
+			cpu_to_le32(LSDW(pdu_dma_data));
+		passthru_entry->inDataSeg64.count = cpu_to_le32(recv_len);
+	}
+	passthru_entry->controlFlags = cpu_to_le16(control_flags);
+	wmb();
+
+	/* Tell ISP it's got a new I/O request */
+	writel(ha->request_in, &ha->reg->req_q_in);
+	readl(&ha->reg->req_q_in);
+
+exit_send_pt0:
+	return status;
+}
+
+struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
+	struct scsi_qla_host *ha)
+{
+	struct continuation_t1_entry *cont_entry;
+
+	cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
+
+	/* Advance request queue pointer */
+	if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+		ha->request_in = 0;
+		ha->request_ptr = ha->request_ring;
+	} else {
+		ha->request_in++;
+		ha->request_ptr++;
+	}
+
+	/* Load packet defaults */
+	cont_entry->hdr.entryType = ET_CONTINUE;
+	cont_entry->hdr.entryCount = 1;
+	cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
+
+	return cont_entry;
+}
+
+uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > COMMAND_SEG) {
+		iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
+		if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
+			iocbs++;
+	}
+	return iocbs;
+}
+
+void qla4xxx_build_scsi_iocbs(struct srb *srb,
+			      struct command_t3_entry *cmd_entry,
+			      uint16_t tot_dsds)
+{
+	struct scsi_qla_host *ha;
+	uint16_t avail_dsds;
+	struct data_seg_a64 *cur_dsd;
+	struct scsi_cmnd *cmd;
+
+	cmd = srb->cmd;
+	ha = srb->ha;
+
+	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+		/* No data being transferred */
+		cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+		return;
+	}
+
+	avail_dsds = COMMAND_SEG;
+	cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
+
+	/* Load data segments */
+	if (cmd->use_sg) {
+		struct scatterlist *cur_seg;
+		struct scatterlist *end_seg;
+
+		cur_seg = (struct scatterlist *)cmd->request_buffer;
+		end_seg = cur_seg + tot_dsds;
+		while (cur_seg < end_seg) {
+			dma_addr_t sle_dma;
+
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				struct continuation_t1_entry *cont_entry;
+
+				cont_entry = qla4xxx_alloc_cont_entry(ha);
+				cur_dsd =
+					(struct data_seg_a64 *)
+					&cont_entry->dataseg[0];
+				avail_dsds = CONTINUE_SEG;
+			}
+
+			sle_dma = sg_dma_address(cur_seg);
+			cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+			cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+			cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
+			avail_dsds--;
+
+			cur_dsd++;
+			cur_seg++;
+		}
+	} else {
+		cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
+		cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
+		cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
+	}
+}
+
+/**
+ * qla4xxx_send_command_to_isp - issues command to HBA
+ * @ha: pointer to host adapter structure.
+ * @srb: pointer to SCSI Request Block to be sent to ISP
+ *
+ * This routine is called by qla4xxx_queuecommand to build an ISP
+ * command and pass it to the ISP for execution.
+ **/
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+	struct ddb_entry *ddb_entry;
+	struct command_t3_entry *cmd_entry;
+	struct scatterlist *sg = NULL;
+
+	uint16_t tot_dsds;
+	uint16_t req_cnt;
+
+	unsigned long flags;
+	uint16_t cnt;
+	uint16_t i;
+	uint32_t index;
+	char tag[2];
+
+	/* Get real lun and adapter */
+	ddb_entry = srb->ddb;
+
+	/* Send marker(s) if needed. */
+	if (ha->marker_needed == 1) {
+		if (qla4xxx_send_marker_iocb(ha, ddb_entry,
+					     cmd->device->lun) != QLA_SUCCESS)
+			return QLA_ERROR;
+
+		ha->marker_needed = 0;
+	}
+	tot_dsds = 0;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in active srb array */
+	index = ha->current_active_index;
+	for (i = 0; i < MAX_SRBS; i++) {
+		index++;
+		if (index == MAX_SRBS)
+			index = 1;
+		if (ha->active_srb_array[index] == 0) {
+			ha->current_active_index = index;
+			break;
+		}
+	}
+	if (i >= MAX_SRBS) {
+		printk(KERN_INFO "scsi%ld: %s: NO more SRB entries used "
+		       "iocbs=%d, \n reqs remaining=%d\n", ha->host_no,
+		       __func__, ha->iocb_cnt, ha->req_q_count);
+		goto queuing_error;
+	}
+
+	/* Calculate the number of request entries needed. */
+	if (cmd->use_sg) {
+		sg = (struct scatterlist *)cmd->request_buffer;
+		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
+				      cmd->sc_data_direction);
+		if (tot_dsds == 0)
+			goto queuing_error;
+	} else if (cmd->request_bufflen) {
+		dma_addr_t	req_dma;
+
+		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
+					 cmd->request_bufflen,
+					 cmd->sc_data_direction);
+		if (dma_mapping_error(req_dma))
+			goto queuing_error;
+
+		srb->dma_handle = req_dma;
+		tot_dsds = 1;
+	}
+	req_cnt = qla4xxx_calc_request_entries(tot_dsds);
+
+	if (ha->req_q_count < (req_cnt + 2)) {
+		cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
+		if (ha->request_in < cnt)
+			ha->req_q_count = cnt - ha->request_in;
+		else
+			ha->req_q_count = REQUEST_QUEUE_DEPTH -
+				(ha->request_in - cnt);
+	}
+
+	if (ha->req_q_count < (req_cnt + 2))
+		goto queuing_error;
+
+	/* total iocbs active */
+	if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
+		goto queuing_error;
+
+	/* Build command packet */
+	cmd_entry = (struct command_t3_entry *) ha->request_ptr;
+	memset(cmd_entry, 0, sizeof(struct command_t3_entry));
+	cmd_entry->hdr.entryType = ET_COMMAND;
+	cmd_entry->handle = cpu_to_le32(index);
+	cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+	cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
+
+	int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
+	cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
+	cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
+	memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
+	cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
+	cmd_entry->hdr.entryCount = req_cnt;
+
+	/* Set data transfer direction control flags
+	 * NOTE: Look at data_direction bits iff there is data to be
+	 *	 transferred, as the data direction bit is sometimed filled
+	 *	 in when there is no data to be transferred */
+	cmd_entry->control_flags = CF_NO_DATA;
+	if (cmd->request_bufflen) {
+		if (cmd->sc_data_direction == DMA_TO_DEVICE)
+			cmd_entry->control_flags = CF_WRITE;
+		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+			cmd_entry->control_flags = CF_READ;
+	}
+
+	/* Set tagged queueing control flags */
+	cmd_entry->control_flags |= CF_SIMPLE_TAG;
+	if (scsi_populate_tag_msg(cmd, tag))
+		switch (tag[0]) {
+		case MSG_HEAD_TAG:
+			cmd_entry->control_flags |= CF_HEAD_TAG;
+			break;
+		case MSG_ORDERED_TAG:
+			cmd_entry->control_flags |= CF_ORDERED_TAG;
+			break;
+		}
+
+
+	/* Advance request queue pointer */
+	ha->request_in++;
+	if (ha->request_in == REQUEST_QUEUE_DEPTH) {
+		ha->request_in = 0;
+		ha->request_ptr = ha->request_ring;
+	} else
+		ha->request_ptr++;
+
+
+	qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
+	wmb();
+
+	/*
+	 * Check to see if adapter is online before placing request on
+	 * request queue.  If a reset occurs and a request is in the queue,
+	 * the firmware will still attempt to process the request, retrieving
+	 * garbage for pointers.
+	 */
+	if (!test_bit(AF_ONLINE, &ha->flags)) {
+		DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
+			      "Do not issue command.\n",
+			      ha->host_no, __func__));
+		goto queuing_error;
+	}
+
+	/* put command in active array */
+	ha->active_srb_array[index] = srb;
+	srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
+
+	/* update counters */
+	srb->state = SRB_ACTIVE_STATE;
+	srb->flags |= SRB_DMA_VALID;
+
+	/* Track IOCB used */
+	ha->iocb_cnt += req_cnt;
+	srb->iocb_cnt = req_cnt;
+	ha->req_q_count -= req_cnt;
+
+	/* Debug print statements */
+	writel(ha->request_in, &ha->reg->req_q_in);
+	readl(&ha->reg->req_q_in);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (cmd->use_sg && tot_dsds) {
+		sg = (struct scatterlist *) cmd->request_buffer;
+		pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
+			     cmd->sc_data_direction);
+	} else if (tot_dsds)
+		pci_unmap_single(ha->pdev, srb->dma_handle,
+				 cmd->request_bufflen, cmd->sc_data_direction);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_ERROR;
+}
+
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_isr.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_isr.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_isr.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_isr.c	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,796 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+
+/**
+ * qla2x00_process_completed_request() - Process a Fast Post response.
+ * @ha: SCSI driver HA context
+ * @index: SRB index
+ **/
+static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
+					      uint32_t index)
+{
+	struct srb *srb;
+
+	srb = qla4xxx_del_from_active_array(ha, index);
+	if (srb) {
+		/* Save ISP completion status */
+		srb->cmd->result = DID_OK << 16;
+		qla4xxx_srb_compl(ha, srb);
+	} else {
+		DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
+			      "%d\n", ha->host_no, index));
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+	}
+}
+
+/**
+ * qla4xxx_status_entry - processes status IOCBs
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_status_entry(struct scsi_qla_host *ha,
+				 struct status_entry *sts_entry)
+{
+	uint8_t scsi_status;
+	struct scsi_cmnd *cmd;
+	struct srb *srb;
+	struct ddb_entry *ddb_entry;
+	uint32_t residual;
+	uint16_t sensebytecnt;
+
+	if (sts_entry->completionStatus == SCS_COMPLETE &&
+	    sts_entry->scsiStatus == 0) {
+		qla4xxx_process_completed_request(ha,
+						  le32_to_cpu(sts_entry->
+							      handle));
+		return;
+	}
+
+	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
+	if (!srb) {
+		/* FIXMEdg: Don't we need to reset ISP in this case??? */
+		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
+			      "handle 0x%x, sp=%p. This cmd may have already "
+			      "been completed.\n", ha->host_no, __func__,
+			      le32_to_cpu(sts_entry->handle), srb));
+	}
+
+	cmd = srb->cmd;
+	if (cmd == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
+			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
+			      ha->host_no, __func__, sts_entry->handle,
+			      srb, srb->state));
+		dev_warn(&ha->pdev->dev, "Command is NULL:"
+			" already returned to OS (srb=%p)\n", srb);
+		return;
+	}
+
+	ddb_entry = srb->ddb;
+	if (ddb_entry == NULL) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto status_entry_exit;
+	}
+
+	residual = le32_to_cpu(sts_entry->residualByteCnt);
+
+	/* Translate ISP error to a Linux SCSI error. */
+	scsi_status = sts_entry->scsiStatus;
+	switch (sts_entry->completionStatus) {
+	case SCS_COMPLETE:
+		if (scsi_status == 0) {
+			cmd->result = DID_OK << 16;
+			break;
+		}
+
+		if (sts_entry->iscsiFlags &
+		    (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
+			cmd->resid = residual;
+
+		cmd->result = DID_OK << 16 | scsi_status;
+
+		if (scsi_status != SCSI_CHECK_CONDITION)
+			break;
+
+		/* Copy Sense Data into sense buffer. */
+		memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+		sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
+		if (sensebytecnt == 0)
+			break;
+
+		memcpy(cmd->sense_buffer, sts_entry->senseData,
+		       min(sensebytecnt,
+			   (uint16_t) sizeof(cmd->sense_buffer)));
+
+		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
+			      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
+			      cmd->device->channel, cmd->device->id,
+			      cmd->device->lun, __func__,
+			      sts_entry->senseData[2] & 0x0f,
+			      sts_entry->senseData[12],
+			      sts_entry->senseData[13]));
+
+		srb->flags |= SRB_GOT_SENSE;
+		break;
+
+	case SCS_INCOMPLETE:
+		/* Always set the status to DID_ERROR, since
+		 * all conditions result in that status anyway */
+		cmd->result = DID_ERROR << 16;
+		break;
+
+	case SCS_RESET_OCCURRED:
+		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun, __func__));
+
+		cmd->result = DID_RESET << 16;
+		break;
+
+	case SCS_ABORTED:
+		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun, __func__));
+
+		cmd->result = DID_RESET << 16;
+		break;
+
+	case SCS_TIMEOUT:
+		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun));
+
+		cmd->result = DID_BUS_BUSY << 16;
+
+		/*
+		 * Mark device missing so that we won't continue to send
+		 * I/O to this device.	We should get a ddb state change
+		 * AEN soon.
+		 */
+		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+			qla4xxx_mark_device_missing(ha, ddb_entry);
+		break;
+
+	case SCS_DATA_UNDERRUN:
+	case SCS_DATA_OVERRUN:
+		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
+				      "residual = 0x%x\n", ha->host_no,
+				      cmd->device->channel, cmd->device->id,
+				      cmd->device->lun, __func__, residual));
+
+			cmd->result = DID_ERROR << 16;
+			break;
+		}
+
+		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
+			/*
+			 * Firmware detected a SCSI transport underrun
+			 * condition
+			 */
+			cmd->resid = residual;
+			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
+				      "detected, xferlen = 0x%x, residual = "
+				      "0x%x\n",
+				      ha->host_no, cmd->device->channel,
+				      cmd->device->id,
+				      cmd->device->lun, __func__,
+				      cmd->request_bufflen,
+				      residual));
+		}
+
+		/*
+		 * If there is scsi_status, it takes precedense over
+		 * underflow condition.
+		 */
+		if (scsi_status != 0) {
+			cmd->result = DID_OK << 16 | scsi_status;
+
+			if (scsi_status != SCSI_CHECK_CONDITION)
+				break;
+
+			/* Copy Sense Data into sense buffer. */
+			memset(cmd->sense_buffer, 0,
+			       sizeof(cmd->sense_buffer));
+
+			sensebytecnt =
+				le16_to_cpu(sts_entry->senseDataByteCnt);
+			if (sensebytecnt == 0)
+				break;
+
+			memcpy(cmd->sense_buffer, sts_entry->senseData,
+			       min(sensebytecnt,
+				   (uint16_t) sizeof(cmd->sense_buffer)));
+
+			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
+				      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
+				      cmd->device->channel, cmd->device->id,
+				      cmd->device->lun, __func__,
+				      sts_entry->senseData[2] & 0x0f,
+				      sts_entry->senseData[12],
+				      sts_entry->senseData[13]));
+		} else {
+			/*
+			 * If RISC reports underrun and target does not
+			 * report it then we must have a lost frame, so
+			 * tell upper layer to retry it by reporting a
+			 * bus busy.
+			 */
+			if ((sts_entry->iscsiFlags &
+			     ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
+				cmd->result = DID_BUS_BUSY << 16;
+			} else if ((cmd->request_bufflen - residual) <
+				   cmd->underflow) {
+				/*
+				 * Handle mid-layer underflow???
+				 *
+				 * For kernels less than 2.4, the driver must
+				 * return an error if an underflow is detected.
+				 * For kernels equal-to and above 2.4, the
+				 * mid-layer will appearantly handle the
+				 * underflow by detecting the residual count --
+				 * unfortunately, we do not see where this is
+				 * actually being done.	 In the interim, we
+				 * will return DID_ERROR.
+				 */
+				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
+					      "Mid-layer Data underrun, "
+					      "xferlen = 0x%x, "
+					      "residual = 0x%x\n", ha->host_no,
+					      cmd->device->channel,
+					      cmd->device->id,
+					      cmd->device->lun, __func__,
+					      cmd->request_bufflen, residual));
+
+				cmd->result = DID_ERROR << 16;
+			} else {
+				cmd->result = DID_OK << 16;
+			}
+		}
+		break;
+
+	case SCS_DEVICE_LOGGED_OUT:
+	case SCS_DEVICE_UNAVAILABLE:
+		/*
+		 * Mark device missing so that we won't continue to
+		 * send I/O to this device.  We should get a ddb
+		 * state change AEN soon.
+		 */
+		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+			qla4xxx_mark_device_missing(ha, ddb_entry);
+
+		cmd->result = DID_BUS_BUSY << 16;
+		break;
+
+	case SCS_QUEUE_FULL:
+		/*
+		 * SCSI Mid-Layer handles device queue full
+		 */
+		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
+		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
+			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
+			      " iResp=%02x\n", ha->host_no, cmd->device->id,
+			      cmd->device->lun, __func__,
+			      sts_entry->completionStatus,
+			      sts_entry->scsiStatus, sts_entry->state_flags,
+			      sts_entry->iscsiFlags,
+			      sts_entry->iscsiResponse));
+		break;
+
+	default:
+		cmd->result = DID_ERROR << 16;
+		break;
+	}
+
+status_entry_exit:
+
+	/* complete the request */
+	srb->cc_stat = sts_entry->completionStatus;
+	qla4xxx_srb_compl(ha, srb);
+}
+
+/**
+ * qla4xxx_process_response_queue - process response queue completions
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine process response queue completions in interrupt context.
+ * Hardware_lock locked upon entry
+ **/
+static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
+{
+	uint32_t count = 0;
+	struct srb *srb = NULL;
+	struct status_entry *sts_entry;
+
+	/* Process all responses from response queue */
+	while ((ha->response_in =
+		(uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
+	       ha->response_out) {
+		sts_entry = (struct status_entry *) ha->response_ptr;
+		count++;
+
+		/* Advance pointers for next entry */
+		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
+			ha->response_out = 0;
+			ha->response_ptr = ha->response_ring;
+		} else {
+			ha->response_out++;
+			ha->response_ptr++;
+		}
+
+		/* process entry */
+		switch (sts_entry->hdr.entryType) {
+		case ET_STATUS:
+			/*
+			 * Common status - Single completion posted in single
+			 * IOSB.
+			 */
+			qla4xxx_status_entry(ha, sts_entry);
+			break;
+
+		case ET_PASSTHRU_STATUS:
+			break;
+
+		case ET_STATUS_CONTINUATION:
+			/* Just throw away the status continuation entries */
+			DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
+				      "- ignoring\n", ha->host_no, __func__));
+			break;
+
+		case ET_COMMAND:
+			/* ISP device queue is full. Command not
+			 * accepted by ISP.  Queue command for
+			 * later */
+
+			srb = qla4xxx_del_from_active_array(ha,
+						    le32_to_cpu(sts_entry->
+								handle));
+			if (srb == NULL)
+				goto exit_prq_invalid_handle;
+
+			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
+				      "srb %p\n", ha->host_no, __func__, srb));
+
+			/* ETRY normally by sending it back with
+			 * DID_BUS_BUSY */
+			srb->cmd->result = DID_BUS_BUSY << 16;
+			qla4xxx_srb_compl(ha, srb);
+			break;
+
+		case ET_CONTINUE:
+			/* Just throw away the continuation entries */
+			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
+				      "ignoring\n", ha->host_no, __func__));
+			break;
+
+		default:
+			/*
+			 * Invalid entry in response queue, reset RISC
+			 * firmware.
+			 */
+			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
+				      "response queue \n", ha->host_no,
+				      __func__,
+				      sts_entry->hdr.entryType));
+			goto exit_prq_error;
+		}
+	}
+
+	/*
+	 * Done with responses, update the ISP For QLA4010, this also clears
+	 * the interrupt.
+	 */
+	writel(ha->response_out, &ha->reg->rsp_q_out);
+	readl(&ha->reg->rsp_q_out);
+
+	return;
+
+exit_prq_invalid_handle:
+	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
+		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
+		      sts_entry->completionStatus));
+
+exit_prq_error:
+	writel(ha->response_out, &ha->reg->rsp_q_out);
+	readl(&ha->reg->rsp_q_out);
+
+	set_bit(DPC_RESET_HA, &ha->dpc_flags);
+}
+
+/**
+ * qla4xxx_isr_decode_mailbox - decodes mailbox status
+ * @ha: Pointer to host adapter structure.
+ * @mailbox_status: Mailbox status.
+ *
+ * This routine decodes the mailbox status during the ISR.
+ * Hardware_lock locked upon entry. runs in interrupt context.
+ **/
+static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
+				       uint32_t mbox_status)
+{
+	int i;
+
+	if ((mbox_status == MBOX_STS_BUSY) ||
+	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
+	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
+		ha->mbox_status[0] = mbox_status;
+
+		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+			/*
+			 * Copy all mailbox registers to a temporary
+			 * location and set mailbox command done flag
+			 */
+			for (i = 1; i < ha->mbox_status_count; i++)
+				ha->mbox_status[i] =
+					readl(&ha->reg->mailbox[i]);
+
+			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+			wake_up(&ha->mailbox_wait_queue);
+		}
+	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
+		/* Immediately process the AENs that don't require much work.
+		 * Only queue the database_changed AENs */
+		switch (mbox_status) {
+		case MBOX_ASTS_SYSTEM_ERROR:
+			/* Log Mailbox registers */
+			if (ql4xdontresethba) {
+				DEBUG2(printk("%s:Dont Reset HBA\n",
+					      __func__));
+			} else {
+				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			}
+			break;
+
+		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
+		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
+		case MBOX_ASTS_NVRAM_INVALID:
+		case MBOX_ASTS_IP_ADDRESS_CHANGED:
+		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
+			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
+				      "Reset HA\n", ha->host_no, mbox_status));
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			break;
+
+		case MBOX_ASTS_LINK_UP:
+			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
+				      ha->host_no, mbox_status));
+			set_bit(AF_LINK_UP, &ha->flags);
+			break;
+
+		case MBOX_ASTS_LINK_DOWN:
+			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
+				      ha->host_no, mbox_status));
+			clear_bit(AF_LINK_UP, &ha->flags);
+			break;
+
+		case MBOX_ASTS_HEARTBEAT:
+			ha->seconds_since_last_heartbeat = 0;
+			break;
+
+		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
+			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
+				      "ACQUIRED\n", ha->host_no, mbox_status));
+			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+			break;
+
+		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
+		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
+							   * mode
+							   * only */
+		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
+		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
+		case MBOX_ASTS_SUBNET_STATE_CHANGE:
+			/* No action */
+			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
+				      mbox_status));
+			break;
+
+		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
+		case MBOX_ASTS_DNS:
+			/* No action */
+			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
+				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
+				      ha->host_no, mbox_status,
+				      readl(&ha->reg->mailbox[1]),
+				      readl(&ha->reg->mailbox[2])));
+			break;
+
+		case MBOX_ASTS_SELF_TEST_FAILED:
+		case MBOX_ASTS_LOGIN_FAILED:
+			/* No action */
+			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
+				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
+				      ha->host_no, mbox_status,
+				      readl(&ha->reg->mailbox[1]),
+				      readl(&ha->reg->mailbox[2]),
+				      readl(&ha->reg->mailbox[3])));
+			break;
+
+		case MBOX_ASTS_DATABASE_CHANGED:
+			/* Queue AEN information and process it in the DPC
+			 * routine */
+			if (ha->aen_q_count > 0) {
+				/* advance pointer */
+				if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
+					ha->aen_in = 0;
+				else
+					ha->aen_in++;
+
+				/* decrement available counter */
+				ha->aen_q_count--;
+
+				for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
+					ha->aen_q[ha->aen_in].mbox_sts[i] =
+						readl(&ha->reg->mailbox[i]);
+
+				ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
+
+				/* print debug message */
+				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
+					      " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
+					      ha->host_no, ha->aen_in,
+					      mbox_status,
+					      ha->aen_q[ha->aen_in].mbox_sts[1],
+					      ha->aen_q[ha->aen_in].mbox_sts[2],
+					      ha->aen_q[ha->aen_in].mbox_sts[3],
+					      ha->aen_q[ha->aen_in].  mbox_sts[4]));
+
+				/* The DPC routine will process the aen */
+				set_bit(DPC_AEN, &ha->dpc_flags);
+			} else {
+				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
+					      "overflowed!  AEN LOST!!\n",
+					      ha->host_no, __func__,
+					      mbox_status));
+
+				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
+					      ha->host_no));
+
+				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
+					DEBUG2(printk("AEN[%d] %04x %04x %04x "
+						      "%04x\n", i,
+						      ha->aen_q[i].mbox_sts[0],
+						      ha->aen_q[i].mbox_sts[1],
+						      ha->aen_q[i].mbox_sts[2],
+						      ha->aen_q[i].mbox_sts[3]));
+				}
+			}
+			break;
+
+		default:
+			DEBUG2(printk(KERN_WARNING
+				      "scsi%ld: AEN %04x UNKNOWN\n",
+				      ha->host_no, mbox_status));
+			break;
+		}
+	} else {
+		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
+			      ha->host_no, mbox_status));
+
+		ha->mbox_status[0] = mbox_status;
+	}
+}
+
+/**
+ * qla4xxx_interrupt_service_routine - isr
+ * @ha: pointer to host adapter structure.
+ *
+ * This is the main interrupt service routine.	
+ * hardware_lock locked upon entry. runs in interrupt context.
+ **/
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
+				       uint32_t intr_status)
+{
+	/* Process response queue interrupt. */
+	if (intr_status & CSR_SCSI_COMPLETION_INTR)
+		qla4xxx_process_response_queue(ha);
+
+	/* Process mailbox/asynch event	 interrupt.*/
+	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
+		qla4xxx_isr_decode_mailbox(ha,
+					   readl(&ha->reg->mailbox[0]));
+
+		/* Clear Mailbox Interrupt */
+		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+		       &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+}
+
+/**
+ * qla4xxx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ * @regs: Unused
+ **/
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct scsi_qla_host *ha;
+	uint32_t intr_status;
+	unsigned long flags = 0;
+	uint8_t reqs_count = 0;
+
+	ha = (struct scsi_qla_host *) dev_id;
+	if (!ha) {
+		DEBUG2(printk(KERN_INFO
+			      "qla4xxx: Interrupt with NULL host ptr\n"));
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/*
+	 * Repeatedly service interrupts up to a maximum of
+	 * MAX_REQS_SERVICED_PER_INTR
+	 */
+	while (1) {
+		/*
+		 * Read interrupt status
+		 */
+		if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
+		    ha->response_out)
+			intr_status = CSR_SCSI_COMPLETION_INTR;
+		else
+			intr_status = readl(&ha->reg->ctrl_status);
+
+		if ((intr_status &
+		     (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
+		    0) {
+			if (reqs_count == 0)
+				ha->spurious_int_count++;
+			break;
+		}
+
+		if (intr_status & CSR_FATAL_ERROR) {
+			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
+				      "Status 0x%04x\n", ha->host_no,
+				      readl(isp_port_error_status (ha))));
+
+			/* Issue Soft Reset to clear this error condition.
+			 * This will prevent the RISC from repeatedly
+			 * interrupting the driver; thus, allowing the DPC to
+			 * get scheduled to continue error recovery.
+			 * NOTE: Disabling RISC interrupts does not work in
+			 * this case, as CSR_FATAL_ERROR overrides
+			 * CSR_SCSI_INTR_ENABLE */
+			if ((readl(&ha->reg->ctrl_status) &
+			     CSR_SCSI_RESET_INTR) == 0) {
+				writel(set_rmask(CSR_SOFT_RESET),
+				       &ha->reg->ctrl_status);
+				readl(&ha->reg->ctrl_status);
+			}
+
+			writel(set_rmask(CSR_FATAL_ERROR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+
+			__qla4xxx_disable_intrs(ha);
+
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+
+			break;
+		} else if (intr_status & CSR_SCSI_RESET_INTR) {
+			clear_bit(AF_ONLINE, &ha->flags);
+			__qla4xxx_disable_intrs(ha);
+
+			writel(set_rmask(CSR_SCSI_RESET_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+
+			set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+
+			break;
+		} else if (intr_status & INTR_PENDING) {
+			qla4xxx_interrupt_service_routine(ha, intr_status);
+			ha->total_io_count++;
+			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+				break;
+
+			intr_status = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * qla4xxx_process_aen - processes AENs generated by firmware
+ * @ha: pointer to host adapter structure.
+ * @process_aen: type of AENs to process
+ *
+ * Processes specific types of Asynchronous Events generated by firmware. 
+ * The type of AENs to process is specified by process_aen and can be
+ *	PROCESS_ALL_AENS	 0
+ *	FLUSH_DDB_CHANGED_AENS	 1
+ *	RELOGIN_DDB_CHANGED_AENS 2
+ **/
+void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
+{
+	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+	struct aen *aen;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	while (ha->aen_out != ha->aen_in) {
+		/* Advance pointers for next entry */
+		if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
+			ha->aen_out = 0;
+		else
+			ha->aen_out++;
+
+		ha->aen_q_count++;
+		aen = &ha->aen_q[ha->aen_out];
+
+		/* copy aen information to local structure */
+		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+			mbox_sts[i] = aen->mbox_sts[i];
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
+			     "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
+			     mbox_sts[0], mbox_sts[2], mbox_sts[3],
+			     mbox_sts[1], mbox_sts[4]));
+
+		switch (mbox_sts[0]) {
+		case MBOX_ASTS_DATABASE_CHANGED:
+			if (process_aen == FLUSH_DDB_CHANGED_AENS) {
+				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
+					      "[%d] state=%04x FLUSHED!\n",
+					      ha->host_no, ha->aen_out,
+					      mbox_sts[0], mbox_sts[2],
+					      mbox_sts[3]));
+				break;
+			} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
+				/* for use during init time, we only want to
+				 * relogin non-active ddbs */
+				struct ddb_entry *ddb_entry;
+
+				ddb_entry =
+					/* FIXME: name length? */
+					qla4xxx_lookup_ddb_by_fw_index(ha,
+								       mbox_sts[2]);
+				if (!ddb_entry)
+					break;
+
+				ddb_entry->dev_scan_wait_to_complete_relogin =
+					0;
+				ddb_entry->dev_scan_wait_to_start_relogin =
+					jiffies +
+					((ddb_entry->default_time2wait +
+					  4) * HZ);
+
+				DEBUG2(printk("scsi%ld: ddb index [%d] initate"
+					      " RELOGIN after %d seconds\n",
+					      ha->host_no,
+					      ddb_entry->fw_ddb_index,
+					      ddb_entry->default_time2wait +
+					      4));
+				break;
+			}
+
+			if (mbox_sts[1] == 0) {	/* Global DB change. */
+				qla4xxx_reinitialize_ddb_list(ha);
+			} else if (mbox_sts[1] == 1) {	/* Specific device. */
+				qla4xxx_process_ddb_changed(ha, mbox_sts[2],
+							    mbox_sts[3]);
+			}
+			break;
+		}
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+}
+
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_mbx.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_mbx.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_mbx.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_mbx.c	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,930 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+
+
+/**
+ * qla4xxx_mailbox_command - issues mailbox commands
+ * @ha: Pointer to host adapter structure.
+ * @inCount: number of mailbox registers to load.
+ * @outCount: number of mailbox registers to return.
+ * @mbx_cmd: data pointer for mailbox in registers.
+ * @mbx_sts: data pointer for mailbox out registers.
+ *
+ * This routine sssue mailbox commands and waits for completion.
+ * If outCount is 0, this routine completes successfully WITHOUT waiting
+ * for the mailbox command to complete.
+ **/
+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
+			    uint8_t outCount, uint32_t *mbx_cmd,
+			    uint32_t *mbx_sts)
+{
+	int status = QLA_ERROR;
+	uint8_t i;
+	u_long wait_count;
+	uint32_t intr_status;
+	unsigned long flags = 0;
+	DECLARE_WAITQUEUE(wait, current);
+
+	mutex_lock(&ha->mbox_sem);
+
+	/* Mailbox code active */
+	set_bit(AF_MBOX_COMMAND, &ha->flags);
+
+	/* Make sure that pointers are valid */
+	if (!mbx_cmd || !mbx_sts) {
+		DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
+			      "pointer\n", ha->host_no, __func__));
+		goto mbox_exit;
+	}
+
+	/* To prevent overwriting mailbox registers for a command that has
+	 * not yet been serviced, check to see if a previously issued
+	 * mailbox command is interrupting.
+	 * -----------------------------------------------------------------
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	intr_status = readl(&ha->reg->ctrl_status);
+	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
+		/* Service existing interrupt */
+		qla4xxx_interrupt_service_routine(ha, intr_status);
+		clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+	}
+
+	/* Send the mailbox command to the firmware */
+	ha->mbox_status_count = outCount;
+	for (i = 0; i < outCount; i++)
+		ha->mbox_status[i] = 0;
+
+	/* Load all mailbox registers, except mailbox 0. */
+	for (i = 1; i < inCount; i++)
+		writel(mbx_cmd[i], &ha->reg->mailbox[i]);
+
+	/* Wakeup firmware  */
+	writel(mbx_cmd[0], &ha->reg->mailbox[0]);
+	readl(&ha->reg->mailbox[0]);
+	writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait for completion */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	add_wait_queue(&ha->mailbox_wait_queue, &wait);
+
+	/*
+	 * If we don't want status, don't wait for the mailbox command to
+	 * complete.  For example, MBOX_CMD_RESET_FW doesn't return status,
+	 * you must poll the inbound Interrupt Mask for completion.
+	 */
+	if (outCount == 0) {
+		status = QLA_SUCCESS;
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&ha->mailbox_wait_queue, &wait);
+		goto mbox_exit;
+	}
+	/* Wait for command to complete */
+	wait_count = jiffies + MBOX_TOV * HZ;
+	while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
+		if (time_after_eq(jiffies, wait_count))
+			break;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		intr_status = readl(&ha->reg->ctrl_status);
+		if (intr_status & INTR_PENDING) {
+			/*
+			 * Service the interrupt.
+			 * The ISR will save the mailbox status registers
+			 * to a temporary storage location in the adapter
+			 * structure.
+			 */
+			ha->mbox_status_count = outCount;
+			qla4xxx_interrupt_service_routine(ha, intr_status);
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		msleep(10);
+	}
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&ha->mailbox_wait_queue, &wait);
+
+	/* Check for mailbox timeout. */
+	if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
+		DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
+			      " Scheduling Adapter Reset\n", ha->host_no,
+			      mbx_cmd[0]));
+		ha->mailbox_timeout_count++;
+		mbx_sts[0] = (-1);
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		goto mbox_exit;
+	}
+
+	/*
+	 * Copy the mailbox out registers to the caller's mailbox in/out
+	 * structure.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 0; i < outCount; i++)
+		mbx_sts[i] = ha->mbox_status[i];
+
+	/* Set return status and error flags (if applicable). */
+	switch (ha->mbox_status[0]) {
+	case MBOX_STS_COMMAND_COMPLETE:
+		status = QLA_SUCCESS;
+		break;
+
+	case MBOX_STS_INTERMEDIATE_COMPLETION:
+		status = QLA_SUCCESS;
+		break;
+
+	case MBOX_STS_BUSY:
+		DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+			       ha->host_no, __func__, mbx_cmd[0]));
+		ha->mailbox_timeout_count++;
+		break;
+
+	default:
+		DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
+			      "sts = %08X ****\n", ha->host_no, __func__,
+			      mbx_cmd[0], mbx_sts[0]));
+		break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+mbox_exit:
+	clear_bit(AF_MBOX_COMMAND, &ha->flags);
+	clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+	mutex_unlock(&ha->mbox_sem);
+
+	return status;
+}
+
+
+/**
+ * qla4xxx_issue_iocb - issue mailbox iocb command
+ * @ha: adapter state pointer.
+ * @buffer: buffer pointer.
+ * @phys_addr: physical address of buffer.
+ * @size: size of buffer.
+ *
+ * Issues iocbs via mailbox commands. 
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ **/
+int
+qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
+		   dma_addr_t phys_addr, size_t size)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
+	mbox_cmd[1] = 0;
+	mbox_cmd[2] = LSDW(phys_addr);
+	mbox_cmd[3] = MSDW(phys_addr);
+	status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
+	return status;
+}
+
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
+				   uint16_t fw_ddb_index,
+				   uint16_t connection_id,
+				   uint16_t option)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+	mbox_cmd[1] = fw_ddb_index;
+	mbox_cmd[2] = connection_id;
+	mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
+	if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
+			      "option %04x failed sts %04X %04X",
+			      ha->host_no, __func__,
+			      option, mbox_sts[0], mbox_sts[1]));
+		if (mbox_sts[0] == 0x4005)
+			DEBUG2(printk("%s reason %04X\n", __func__,
+				      mbox_sts[1]));
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
+				 uint16_t fw_ddb_index)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
+	mbox_cmd[1] = fw_ddb_index;
+	if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS)
+		return QLA_ERROR;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_initialize_fw_cb - initializes firmware control block.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
+{
+	struct init_fw_ctrl_blk *init_fw_cb;
+	dma_addr_t init_fw_cb_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_ERROR;
+
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct init_fw_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
+	if (init_fw_cb == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
+			      ha->host_no, __func__));
+		return 10;
+	}
+	memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+
+	/* Get Initialize Firmware Control Block. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+	mbox_cmd[2] = LSDW(init_fw_cb_dma);
+	mbox_cmd[3] = MSDW(init_fw_cb_dma);
+	if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		dma_free_coherent(&ha->pdev->dev,
+				  sizeof(struct init_fw_ctrl_blk),
+				  init_fw_cb, init_fw_cb_dma);
+		return status;
+	}
+
+	/* Initialize request and response queues. */
+	qla4xxx_init_rings(ha);
+
+	/* Fill in the request and response queue information. */
+	init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
+	init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
+	init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+	init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+	init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
+	init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
+	init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
+	init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
+	init_fw_cb->ShadowRegBufAddrLo =
+		cpu_to_le32(LSDW(ha->shadow_regs_dma));
+	init_fw_cb->ShadowRegBufAddrHi =
+		cpu_to_le32(MSDW(ha->shadow_regs_dma));
+
+	/* Set up required options. */
+	init_fw_cb->FwOptions |=
+		__constant_cpu_to_le16(FWOPT_SESSION_MODE |
+				       FWOPT_INITIATOR_MODE);
+	init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+
+	/* Save some info in adapter structure. */
+	ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
+	ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
+	ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
+	memcpy(ha->ip_address, init_fw_cb->IPAddr,
+	       min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
+	memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
+	       min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
+	memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
+	       min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
+	memcpy(ha->name_string, init_fw_cb->iSCSINameString,
+	       min(sizeof(ha->name_string),
+		   sizeof(init_fw_cb->iSCSINameString)));
+	memcpy(ha->alias, init_fw_cb->Alias,
+	       min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
+
+	/* Save Command Line Paramater info */
+	ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
+	ha->discovery_wait = ql4xdiscoverywait;
+
+	/* Send Initialize Firmware Control Block. */
+	mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+	mbox_cmd[1] = 0;
+	mbox_cmd[2] = LSDW(init_fw_cb_dma);
+	mbox_cmd[3] = MSDW(init_fw_cb_dma);
+	if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
+	    QLA_SUCCESS)
+		status = QLA_SUCCESS;
+	 else {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
+			      "failed w/ status %04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+	}
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
+			  init_fw_cb, init_fw_cb_dma);
+
+	return status;
+}
+
+/**
+ * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
+{
+	struct init_fw_ctrl_blk *init_fw_cb;
+	dma_addr_t init_fw_cb_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct init_fw_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
+	if (init_fw_cb == NULL) {
+		printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
+		       __func__);
+		return 10;
+	}
+
+	/* Get Initialize Firmware Control Block. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+	mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+	mbox_cmd[2] = LSDW(init_fw_cb_dma);
+	mbox_cmd[3] = MSDW(init_fw_cb_dma);
+
+	if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+			      ha->host_no, __func__));
+		dma_free_coherent(&ha->pdev->dev,
+				  sizeof(struct init_fw_ctrl_blk),
+				  init_fw_cb, init_fw_cb_dma);
+		return QLA_ERROR;
+	}
+
+	/* Save IP Address. */
+	memcpy(ha->ip_address, init_fw_cb->IPAddr,
+	       min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
+	memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
+	       min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
+	memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
+	       min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
+
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
+			  init_fw_cb, init_fw_cb_dma);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_state - gets firmware state of HBA
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Get firmware version */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
+	if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
+			      "status %04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	ha->firmware_state = mbox_sts[1];
+	ha->board_id = mbox_sts[2];
+	ha->addl_fw_state = mbox_sts[3];
+	DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
+		      ha->host_no, __func__, ha->firmware_state);)
+
+		return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_status - retrieves firmware status
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Get firmware version */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
+	if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
+			      "status %04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		return QLA_ERROR;
+	}
+
+	/* High-water mark of IOCBs */
+	ha->iocb_hiwat = mbox_sts[2];
+	if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+		ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+	else
+		dev_info(&ha->pdev->dev, "WARNING!!!  You have less than %d "
+			   "firmare IOCBs available (%d).\n",
+			   IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry: Pointer to firmware's device database entry structure
+ * @num_valid_ddb_entries: Pointer to number of valid ddb entries
+ * @next_ddb_index: Pointer to next valid device database index
+ * @fw_ddb_device_state: Pointer to device state
+ **/
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+			    uint16_t fw_ddb_index,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma,
+			    uint32_t *num_valid_ddb_entries,
+			    uint32_t *next_ddb_index,
+			    uint32_t *fw_ddb_device_state,
+			    uint32_t *conn_err_detail,
+			    uint16_t *tcp_source_port_num,
+			    uint16_t *connection_id)
+{
+	int status = QLA_ERROR;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Make sure the device index is valid */
+	if (fw_ddb_index >= MAX_DDB_ENTRIES) {
+		DEBUG2(printk("scsi%ld: %s: index [%d] out of range.\n",
+			      ha->host_no, __func__, fw_ddb_index));
+		goto exit_get_fwddb;
+	}
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
+	mbox_cmd[1] = (uint32_t) fw_ddb_index;
+	mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+	mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+	if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
+	    QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
+			      " with status 0x%04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		goto exit_get_fwddb;
+	}
+	if (fw_ddb_index != mbox_sts[1]) {
+		DEBUG2(printk("scsi%ld: %s: index mismatch [%d] != [%d].\n",
+			      ha->host_no, __func__, fw_ddb_index,
+			      mbox_sts[1]));
+		goto exit_get_fwddb;
+	}
+	if (fw_ddb_entry) {
+		dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
+			   "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
+			   fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
+			   mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
+			   fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
+			   fw_ddb_entry->ipAddr[3],
+			   le16_to_cpu(fw_ddb_entry->portNumber),
+			   fw_ddb_entry->iscsiName);
+	}
+	if (num_valid_ddb_entries)
+		*num_valid_ddb_entries = mbox_sts[2];
+	if (next_ddb_index)
+		*next_ddb_index = mbox_sts[3];
+	if (fw_ddb_device_state)
+		*fw_ddb_device_state = mbox_sts[4];
+
+	/*
+	 * RA: This mailbox has been changed to pass connection error and
+	 * details.  Its true for ISP4010 as per Version E - Not sure when it
+	 * was changed.	 Get the time2wait from the fw_dd_entry field :
+	 * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
+	 * struct.
+	 */
+	if (conn_err_detail)
+		*conn_err_detail = mbox_sts[5];
+	if (tcp_source_port_num)
+		*tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
+	if (connection_id)
+		*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
+	status = QLA_SUCCESS;
+
+exit_get_fwddb:
+	return status;
+}
+
+/**
+ * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL.
+ *
+ * This routine initializes or updates the adapter's device database
+ * entry for the specified device. It also triggers a login for the 
+ * specified device. Therefore, it may also be used as a secondary 
+ * login routine when a NULL pointer is specified for the fw_ddb_entry.
+ **/
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+			  dma_addr_t fw_ddb_entry_dma)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Do not wait for completion. The firmware will send us an
+	 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
+	mbox_cmd[1] = (uint32_t) fw_ddb_index;
+	mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+	mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+	return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
+}
+
+int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
+				    uint16_t fw_ddb_index)
+{
+	int status = QLA_ERROR;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Do not wait for completion. The firmware will send us an
+	 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
+	mbox_cmd[1] = (uint32_t) fw_ddb_index;
+	mbox_cmd[2] = 0;
+	mbox_cmd[3] = 0;
+	mbox_cmd[4] = 0;
+	status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
+	DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
+		      __func__, fw_ddb_index, status, mbox_sts[0],
+		      mbox_sts[1]);)
+
+		return status;
+}
+
+/**
+ * qla4xxx_get_crash_record - retrieves crash record.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
+ **/
+void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct crash_record *crash_record = NULL;
+	dma_addr_t crash_record_dma = 0;
+	uint32_t crash_record_size = 0;
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	/* Get size of crash record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+	if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
+			      ha->host_no, __func__));
+		goto exit_get_crash_record;
+	}
+	crash_record_size = mbox_sts[4];
+	if (crash_record_size == 0) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
+			      ha->host_no, __func__));
+		goto exit_get_crash_record;
+	}
+
+	/* Alloc Memory for Crash Record. */
+	crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
+					  &crash_record_dma, GFP_KERNEL);
+	if (crash_record == NULL)
+		goto exit_get_crash_record;
+
+	/* Get Crash Record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+	mbox_cmd[2] = LSDW(crash_record_dma);
+	mbox_cmd[3] = MSDW(crash_record_dma);
+	mbox_cmd[4] = crash_record_size;
+	if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS)
+		goto exit_get_crash_record;
+
+	/* Dump Crash Record. */
+
+exit_get_crash_record:
+	if (crash_record)
+		dma_free_coherent(&ha->pdev->dev, crash_record_size,
+				  crash_record, crash_record_dma);
+}
+
+/**
+ * qla4xxx_get_conn_event_log - retrieves connection event log
+ * @ha: Pointer to host adapter structure.
+ **/
+void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct conn_event_log_entry *event_log = NULL;
+	dma_addr_t event_log_dma = 0;
+	uint32_t event_log_size = 0;
+	uint32_t num_valid_entries;
+	uint32_t      oldest_entry = 0;
+	uint32_t	max_event_log_entries;
+	uint8_t		i;
+
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	/* Get size of crash record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+	if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS)
+		goto exit_get_event_log;
+
+	event_log_size = mbox_sts[4];
+	if (event_log_size == 0)
+		goto exit_get_event_log;
+
+	/* Alloc Memory for Crash Record. */
+	event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
+				       &event_log_dma, GFP_KERNEL);
+	if (event_log == NULL)
+		goto exit_get_event_log;
+
+	/* Get Crash Record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+	mbox_cmd[2] = LSDW(event_log_dma);
+	mbox_cmd[3] = MSDW(event_log_dma);
+	if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
+			      "log!\n", ha->host_no, __func__));
+		goto exit_get_event_log;
+	}
+
+	/* Dump Event Log. */
+	num_valid_entries = mbox_sts[1];
+
+	max_event_log_entries = event_log_size /
+		sizeof(struct conn_event_log_entry);
+
+	if (num_valid_entries > max_event_log_entries)
+		oldest_entry = num_valid_entries % max_event_log_entries;
+
+	DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
+		      ha->host_no, num_valid_entries));
+
+	if (extended_error_logging == 3) {
+		if (oldest_entry == 0) {
+			/* Circular Buffer has not wrapped around */
+			for (i=0; i < num_valid_entries; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+		}
+		else {
+			/* Circular Buffer has wrapped around -
+			 * display accordingly*/
+			for (i=oldest_entry; i < max_event_log_entries; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+			for (i=0; i < oldest_entry; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+		}
+	}
+
+exit_get_event_log:
+	if (event_log)
+		dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
+				  event_log_dma);
+}
+
+/**
+ * qla4xxx_reset_lun - issues LUN Reset
+ * @ha: Pointer to host adapter structure.
+ * @db_entry: Pointer to device database entry
+ * @un_entry: Pointer to lun entry structure
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
+		      int lun)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
+		      ddb_entry->os_target_id, lun));
+
+	/*
+	 * Send lun reset command to ISP, so that the ISP will return all
+	 * outstanding requests with RESET status
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_LUN_RESET;
+	mbox_cmd[1] = ddb_entry->fw_ddb_index;
+	mbox_cmd[2] = lun << 8;
+	mbox_cmd[5] = 0x01;	/* Immediate Command Enable */
+	qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
+	if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+	    mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+		status = QLA_ERROR;
+
+	return status;
+}
+
+
+int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t len)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_READ_FLASH;
+	mbox_cmd[1] = LSDW(dma_addr);
+	mbox_cmd[2] = MSDW(dma_addr);
+	mbox_cmd[3] = offset;
+	mbox_cmd[4] = len;
+	if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
+		    "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
+		    __func__, mbox_sts[0], mbox_sts[1], offset, len));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_fw_version - gets firmware version
+ * @ha: Pointer to host adapter structure.
+ *
+ * Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may 
+ * hold an address for data.  Make sure that we write 0 to those mailboxes, 
+ * if unused.
+ **/
+int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Get firmware version. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
+	if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
+		    "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+
+	/* Save firmware version information. */
+	ha->firmware_version[0] = mbox_sts[1];
+	ha->firmware_version[1] = mbox_sts[2];
+	ha->patch_number = mbox_sts[3];
+	ha->build_number = mbox_sts[4];
+
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
+	mbox_cmd[2] = LSDW(dma_addr);
+	mbox_cmd[3] = MSDW(dma_addr);
+
+	if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
+		     ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
+	mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
+
+	if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
+			*ddb_index = mbox_sts[2];
+		} else {
+			DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
+			     ha->host_no, __func__, mbox_sts[0]));
+			return QLA_ERROR;
+		}
+	} else {
+		*ddb_index = MAX_PRST_DEV_DB_ENTRIES;
+	}
+
+	return QLA_SUCCESS;
+}
+
+
+int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t ddb_index;
+	int ret_val = QLA_SUCCESS;
+
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+					  sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+			      ha->host_no, __func__));
+		ret_val = QLA_ERROR;
+		goto qla4xxx_send_tgts_exit;
+	}
+
+	ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
+	if (ret_val != QLA_SUCCESS)
+		goto qla4xxx_send_tgts_exit;
+
+	ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
+	if (ret_val != QLA_SUCCESS)
+		goto qla4xxx_send_tgts_exit;
+
+	memset((void *)fw_ddb_entry->iSCSIAlias, 0,
+	       sizeof(fw_ddb_entry->iSCSIAlias));
+
+	memset((void *)fw_ddb_entry->iscsiName, 0,
+	       sizeof(fw_ddb_entry->iscsiName));
+
+	memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
+	memset((void *)fw_ddb_entry->targetAddr, 0,
+	       sizeof(fw_ddb_entry->targetAddr));
+
+	fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
+	fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
+
+	fw_ddb_entry->ipAddr[0] = *ip;
+	fw_ddb_entry->ipAddr[1] = *(ip + 1);
+	fw_ddb_entry->ipAddr[2] = *(ip + 2);
+	fw_ddb_entry->ipAddr[3] = *(ip + 3);
+
+	ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
+
+qla4xxx_send_tgts_exit:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+			  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret_val;
+}
+
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_nvram.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_nvram.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_nvram.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_nvram.c	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,224 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+
+static inline int eeprom_size(struct scsi_qla_host *ha)
+{
+	return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
+}
+
+static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
+{
+	return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
+		FM93C56A_NO_ADDR_BITS_16;
+}
+
+static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
+{
+	return FM93C56A_DATA_BITS_16;
+}
+
+static int fm93c56a_select(struct scsi_qla_host * ha)
+{
+	DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
+
+	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
+	writel(ha->eeprom_cmd_data, isp_nvram(ha));
+	readl(isp_nvram(ha));
+	return 1;
+}
+
+static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
+{
+	int i;
+	int mask;
+	int dataBit;
+	int previousBit;
+
+	/* Clock in a zero, then do the start bit. */
+	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
+	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
+	writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
+	readl(isp_nvram(ha));
+	mask = 1 << (FM93C56A_CMD_BITS - 1);
+
+	/* Force the previous data bit to be different. */
+	previousBit = 0xffff;
+	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+		dataBit =
+			(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+		if (previousBit != dataBit) {
+
+			/*
+			 * If the bit changed, then change the DO state to
+			 * match.
+			 */
+			writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+			previousBit = dataBit;
+		}
+		writel(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
+		writel(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
+		readl(isp_nvram(ha));
+		cmd = cmd << 1;
+	}
+	mask = 1 << (eeprom_no_addr_bits(ha) - 1);
+
+	/* Force the previous data bit to be different. */
+	previousBit = 0xffff;
+	for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
+		dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
+			AUBURN_EEPROM_DO_0;
+		if (previousBit != dataBit) {
+			/*
+			 * If the bit changed, then change the DO state to
+			 * match.
+			 */
+			writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+			previousBit = dataBit;
+		}
+		writel(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
+		writel(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
+		readl(isp_nvram(ha));
+		addr = addr << 1;
+	}
+	return 1;
+}
+
+static int fm93c56a_deselect(struct scsi_qla_host * ha)
+{
+	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
+	writel(ha->eeprom_cmd_data, isp_nvram(ha));
+	readl(isp_nvram(ha));
+	return 1;
+}
+
+static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
+{
+	int i;
+	int data = 0;
+	int dataBit;
+
+	/* Read the data bits
+	 * The first bit is a dummy.  Clock right over it. */
+	for (i = 0; i < eeprom_no_data_bits(ha); i++) {
+		writel(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
+		writel(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
+		dataBit =
+			(readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+		data = (data << 1) | dataBit;
+	}
+
+	*value = data;
+	return 1;
+}
+
+static int eeprom_readword(int eepromAddr, u16 * value,
+			   struct scsi_qla_host * ha)
+{
+	fm93c56a_select(ha);
+	fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
+	fm93c56a_datain(ha, value);
+	fm93c56a_deselect(ha);
+	return 1;
+}
+
+/* Hardware_lock must be set before calling */
+u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
+{
+	u16 val;
+
+	/* NOTE: NVRAM uses half-word addresses */
+	eeprom_readword(offset, &val, ha);
+	return val;
+}
+
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
+{
+	int status = QLA_ERROR;
+	uint16_t checksum = 0;
+	uint32_t index;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (index = 0; index < eeprom_size(ha); index++)
+		checksum += rd_nvram_word(ha, index);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (checksum == 0)
+		status = QLA_SUCCESS;
+
+	return status;
+}
+
+/*************************************************************************
+ *
+ *			Hardware Semaphore routines
+ *
+ *************************************************************************/
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+	uint32_t value;
+	unsigned long flags;
+	unsigned int seconds = 30;
+
+	DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
+		      "0x%x\n", ha->host_no, sem_mask, sem_bits));
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel((sem_mask | sem_bits), isp_semaphore(ha));
+		value = readw(isp_semaphore(ha));
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		if ((value & (sem_mask >> 16)) == sem_bits) {
+			DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
+				      "code = 0x%x\n", ha->host_no,
+				      sem_mask, sem_bits));
+			return QLA_SUCCESS;
+		}
+		ssleep(1);
+	} while (--seconds);
+	return QLA_ERROR;
+}
+
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel(sem_mask, isp_semaphore(ha));
+	readl(isp_semaphore(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
+		      sem_mask));
+}
+
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+	uint32_t value;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel((sem_mask | sem_bits), isp_semaphore(ha));
+	value = readw(isp_semaphore(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if ((value & (sem_mask >> 16)) == sem_bits) {
+		DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
+			      "0x%x, sema code=0x%x\n", ha->host_no,
+			      sem_mask, sem_bits, value));
+		return 1;
+	}
+	return 0;
+}
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_nvram.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_nvram.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_nvram.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_nvram.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,256 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QL4XNVRM_H_
+#define _QL4XNVRM_H_
+
+/*
+ * AM29LV Flash definitions
+ */
+#define FM93C56A_SIZE_8	 0x100
+#define FM93C56A_SIZE_16 0x80
+#define FM93C66A_SIZE_8	 0x200
+#define FM93C66A_SIZE_16 0x100/* 4010 */
+#define FM93C86A_SIZE_16 0x400/* 4022 */
+
+#define	 FM93C56A_START	      0x1
+
+// Commands
+#define	 FM93C56A_READ	      0x2
+#define	 FM93C56A_WEN	      0x0
+#define	 FM93C56A_WRITE	      0x1
+#define	 FM93C56A_WRITE_ALL   0x0
+#define	 FM93C56A_WDS	      0x0
+#define	 FM93C56A_ERASE	      0x3
+#define	 FM93C56A_ERASE_ALL   0x0
+
+/* Command Extentions */
+#define	 FM93C56A_WEN_EXT	 0x3
+#define	 FM93C56A_WRITE_ALL_EXT	 0x1
+#define	 FM93C56A_WDS_EXT	 0x0
+#define	 FM93C56A_ERASE_ALL_EXT	 0x2
+
+/* Address Bits */
+#define	 FM93C56A_NO_ADDR_BITS_16   8	/* 4010 */
+#define	 FM93C56A_NO_ADDR_BITS_8    9	/* 4010 */
+#define	 FM93C86A_NO_ADDR_BITS_16   10	/* 4022 */
+
+/* Data Bits */
+#define	 FM93C56A_DATA_BITS_16	 16
+#define	 FM93C56A_DATA_BITS_8	 8
+
+/* Special Bits */
+#define	 FM93C56A_READ_DUMMY_BITS   1
+#define	 FM93C56A_READY		    0
+#define	 FM93C56A_BUSY		    1
+#define	 FM93C56A_CMD_BITS	    2
+
+/* Auburn Bits */
+#define	 AUBURN_EEPROM_DI	    0x8
+#define	 AUBURN_EEPROM_DI_0	    0x0
+#define	 AUBURN_EEPROM_DI_1	    0x8
+#define	 AUBURN_EEPROM_DO	    0x4
+#define	 AUBURN_EEPROM_DO_0	    0x0
+#define	 AUBURN_EEPROM_DO_1	    0x4
+#define	 AUBURN_EEPROM_CS	    0x2
+#define	 AUBURN_EEPROM_CS_0	    0x0
+#define	 AUBURN_EEPROM_CS_1	    0x2
+#define	 AUBURN_EEPROM_CLK_RISE	    0x1
+#define	 AUBURN_EEPROM_CLK_FALL	    0x0
+
+/* */
+/* EEPROM format */
+/* */
+struct bios_params {
+	uint16_t SpinUpDelay:1;
+	uint16_t BIOSDisable:1;
+	uint16_t MMAPEnable:1;
+	uint16_t BootEnable:1;
+	uint16_t Reserved0:12;
+	uint8_t bootID0:7;
+	uint8_t bootID0Valid:1;
+	uint8_t bootLUN0[8];
+	uint8_t bootID1:7;
+	uint8_t bootID1Valid:1;
+	uint8_t bootLUN1[8];
+	uint16_t MaxLunsPerTarget;
+	uint8_t Reserved1[10];
+};
+
+struct eeprom_port_cfg {
+
+	/* MTU MAC 0 */
+	u16 etherMtu_mac;
+
+	/* Flow Control MAC 0 */
+	u16 pauseThreshold_mac;
+	u16 resumeThreshold_mac;
+	u16 reserved[13];
+};
+
+struct eeprom_function_cfg {
+	u8 reserved[30];
+
+	/* MAC ADDR */
+	u8 macAddress[6];
+	u8 macAddressSecondary[6];
+	u16 subsysVendorId;
+	u16 subsysDeviceId;
+};
+
+struct eeprom_data {
+	union {
+		struct {	/* isp4010 */
+			u8 asic_id[4]; /* x00 */
+			u8 version;	/* x04 */
+			u8 reserved;	/* x05 */
+			u16 board_id;	/* x06 */
+#define	  EEPROM_BOARDID_ELDORADO    1
+#define	  EEPROM_BOARDID_PLACER	     2
+
+#define EEPROM_SERIAL_NUM_SIZE	     16
+			u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
+
+			/* ExtHwConfig: */
+			/* Offset = 24bytes
+			 *
+			 * | SSRAM Size|     |ST|PD|SDRAM SZ| W| B| SP	|  |
+			 * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+			 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+			 */
+			u16 ext_hw_conf; /* x18 */
+			u8 mac0[6];	/* x1A */
+			u8 mac1[6];	/* x20 */
+			u8 mac2[6];	/* x26 */
+			u8 mac3[6];	/* x2C */
+			u16 etherMtu;	/* x32 */
+			u16 macConfig;	/* x34 */
+#define	 MAC_CONFIG_ENABLE_ANEG	    0x0001
+#define	 MAC_CONFIG_ENABLE_PAUSE    0x0002
+			u16 phyConfig;	/* x36 */
+#define	 PHY_CONFIG_PHY_ADDR_MASK	      0x1f
+#define	 PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
+			u16 topcat;	/* x38 */
+#define TOPCAT_PRESENT		0x0100
+#define TOPCAT_MASK		0xFF00
+
+#define EEPROM_UNUSED_1_SIZE   2
+			u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
+			u16 bufletSize;	/* x3C */
+			u16 bufletCount;	/* x3E */
+			u16 bufletPauseThreshold; /* x40 */
+			u16 tcpWindowThreshold50; /* x42 */
+			u16 tcpWindowThreshold25; /* x44 */
+			u16 tcpWindowThreshold0; /* x46 */
+			u16 ipHashTableBaseHi;	/* x48 */
+			u16 ipHashTableBaseLo;	/* x4A */
+			u16 ipHashTableSize;	/* x4C */
+			u16 tcpHashTableBaseHi;	/* x4E */
+			u16 tcpHashTableBaseLo;	/* x50 */
+			u16 tcpHashTableSize;	/* x52 */
+			u16 ncbTableBaseHi;	/* x54 */
+			u16 ncbTableBaseLo;	/* x56 */
+			u16 ncbTableSize;	/* x58 */
+			u16 drbTableBaseHi;	/* x5A */
+			u16 drbTableBaseLo;	/* x5C */
+			u16 drbTableSize;	/* x5E */
+
+#define EEPROM_UNUSED_2_SIZE   4
+			u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
+			u16 ipReassemblyTimeout; /* x64 */
+			u16 tcpMaxWindowSizeHi;	/* x66 */
+			u16 tcpMaxWindowSizeLo;	/* x68 */
+			u32 net_ip_addr0;	/* x6A Added for TOE
+						 * functionality. */
+			u32 net_ip_addr1;	/* x6E */
+			u32 scsi_ip_addr0;	/* x72 */
+			u32 scsi_ip_addr1;	/* x76 */
+#define EEPROM_UNUSED_3_SIZE   128	/* changed from 144 to account
+					 * for ip addresses */
+			u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
+			u16 subsysVendorId_f0;	/* xFA */
+			u16 subsysDeviceId_f0;	/* xFC */
+
+			/* Address = 0x7F */
+#define FM93C56A_SIGNATURE  0x9356
+#define FM93C66A_SIGNATURE  0x9366
+			u16 signature;	/* xFE */
+
+#define EEPROM_UNUSED_4_SIZE   250
+			u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
+			u16 subsysVendorId_f1;	/* x1FA */
+			u16 subsysDeviceId_f1;	/* x1FC */
+			u16 checksum;	/* x1FE */
+		} __attribute__ ((packed)) isp4010;
+		struct {	/* isp4022 */
+			u8 asicId[4];	/* x00 */
+			u8 version;	/* x04 */
+			u8 reserved_5;	/* x05 */
+			u16 boardId;	/* x06 */
+			u8 boardIdStr[16];	/* x08 */
+			u8 serialNumber[16];	/* x18 */
+
+			/* External Hardware Configuration */
+			u16 ext_hw_conf;	/* x28 */
+
+			/* MAC 0 CONFIGURATION */
+			struct eeprom_port_cfg macCfg_port0; /* x2A */
+
+			/* MAC 1 CONFIGURATION */
+			struct eeprom_port_cfg macCfg_port1; /* x4A */
+
+			/* DDR SDRAM Configuration */
+			u16 bufletSize;	/* x6A */
+			u16 bufletCount;	/* x6C */
+			u16 tcpWindowThreshold50; /* x6E */
+			u16 tcpWindowThreshold25; /* x70 */
+			u16 tcpWindowThreshold0; /* x72 */
+			u16 ipHashTableBaseHi;	/* x74 */
+			u16 ipHashTableBaseLo;	/* x76 */
+			u16 ipHashTableSize;	/* x78 */
+			u16 tcpHashTableBaseHi;	/* x7A */
+			u16 tcpHashTableBaseLo;	/* x7C */
+			u16 tcpHashTableSize;	/* x7E */
+			u16 ncbTableBaseHi;	/* x80 */
+			u16 ncbTableBaseLo;	/* x82 */
+			u16 ncbTableSize;	/* x84 */
+			u16 drbTableBaseHi;	/* x86 */
+			u16 drbTableBaseLo;	/* x88 */
+			u16 drbTableSize;	/* x8A */
+			u16 reserved_142[4];	/* x8C */
+
+			/* TCP/IP Parameters */
+			u16 ipReassemblyTimeout; /* x94 */
+			u16 tcpMaxWindowSize;	/* x96 */
+			u16 ipSecurity;	/* x98 */
+			u8 reserved_156[294]; /* x9A */
+			u16 qDebug[8];	/* QLOGIC USE ONLY   x1C0 */
+			struct eeprom_function_cfg funcCfg_fn0;	/* x1D0 */
+			u16 reserved_510; /* x1FE */
+
+			/* Address = 512 */
+			u8 oemSpace[432]; /* x200 */
+			struct bios_params sBIOSParams_fn1; /* x3B0 */
+			struct eeprom_function_cfg funcCfg_fn1;	/* x3D0 */
+			u16 reserved_1022; /* x3FE */
+
+			/* Address = 1024 */
+			u8 reserved_1024[464];	/* x400 */
+			struct eeprom_function_cfg funcCfg_fn2;	/* x5D0 */
+			u16 reserved_1534; /* x5FE */
+
+			/* Address = 1536 */
+			u8 reserved_1536[432];	/* x600 */
+			struct bios_params sBIOSParams_fn3; /* x7B0 */
+			struct eeprom_function_cfg funcCfg_fn3;	/* x7D0 */
+			u16 checksum;	/* x7FE */
+		} __attribute__ ((packed)) isp4022;
+	};
+};
+
+
+#endif	/* _QL4XNVRM_H_ */
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_os.c
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_os.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_os.c	2006-08-17 13:00:29.000000000 -0400
@@ -0,0 +1,1758 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#include <linux/moduleparam.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+
+#include "ql4_def.h"
+
+/*
+ * Driver version
+ */
+char qla4xxx_version_str[40];
+
+/*
+ * SRB allocation cache
+ */
+static kmem_cache_t *srb_cachep;
+
+/*
+ * Module parameter information and variables
+ */
+int ql4xdiscoverywait = 60;
+module_param(ql4xdiscoverywait, int, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(ql4xdiscoverywait, "Discovery wait time");
+int ql4xdontresethba = 0;
+module_param(ql4xdontresethba, int, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(ql4xdontresethba,
+		 "Dont reset the HBA when the driver gets 0x8002 AEN "
+		 " default it will reset hba :0"
+		 " set to 1 to avoid resetting HBA");
+
+int extended_error_logging = 0; /* 0 = off, 1 = log errors */
+module_param(extended_error_logging, int, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(extended_error_logging,
+		 "Option to enable extended error logging, "
+		 "Default is 0 - no logging, 1 - debug logging");
+
+/*
+ * SCSI host template entry points
+ */
+
+void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
+
+/*
+ * iSCSI template entry points
+ */
+static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
+			     uint32_t enable, struct sockaddr *dst_addr);
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
+				  enum iscsi_param param, char *buf);
+static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
+				  enum iscsi_param param, char *buf);
+static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
+static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
+static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
+
+/*
+ * SCSI host template entry points
+ */
+static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
+				void (*done) (struct scsi_cmnd *));
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_slave_alloc(struct scsi_device *device);
+static int qla4xxx_slave_configure(struct scsi_device *device);
+
+static struct scsi_host_template qla4xxx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= DRIVER_NAME,
+	.proc_name		= DRIVER_NAME,
+	.queuecommand		= qla4xxx_queuecommand,
+
+	.eh_device_reset_handler = qla4xxx_eh_device_reset,
+	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
+
+	.slave_configure	= qla4xxx_slave_configure,
+	.slave_alloc		= qla4xxx_slave_alloc,
+
+	.this_id		= -1,
+	.cmd_per_lun		= 3,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= SG_ALL,
+
+	.max_sectors		= 0xFFFF,
+};
+
+static struct iscsi_transport qla4xxx_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= DRIVER_NAME,
+	.param_mask		= ISCSI_CONN_PORT |
+				  ISCSI_CONN_ADDRESS |
+				  ISCSI_TARGET_NAME |
+				  ISCSI_TPGT,
+	.sessiondata_size	= sizeof(struct ddb_entry),
+	.host_template		= &qla4xxx_driver_template,
+
+	.tgt_dscvr		= qla4xxx_tgt_dscvr,
+	.get_conn_param		= qla4xxx_conn_get_param,
+	.get_session_param	= qla4xxx_sess_get_param,
+	.start_conn		= qla4xxx_conn_start,
+	.stop_conn		= qla4xxx_conn_stop,
+	.session_recovery_timedout = qla4xxx_recovery_timedout,
+};
+
+static struct scsi_transport_template *qla4xxx_scsi_transport;
+
+static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
+{
+	struct ddb_entry *ddb_entry = session->dd_data;
+	struct scsi_qla_host *ha = ddb_entry->ha;
+
+	DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) "
+		      "secs exhausted, marking device DEAD.\n", ha->host_no,
+		      __func__, ddb_entry->fw_ddb_index,
+		      ha->port_down_retry_count));
+
+	atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
+
+	DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
+		      "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
+	queue_work(ha->dpc_thread, &ha->dpc_work);
+}
+
+static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
+{
+	struct iscsi_cls_session *session;
+	struct ddb_entry *ddb_entry;
+
+	session = iscsi_dev_to_session(conn->dev.parent);
+	ddb_entry = session->dd_data;
+
+	DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
+		      ddb_entry->ha->host_no, __func__,
+		      ddb_entry->fw_ddb_index));
+	iscsi_unblock_session(session);
+	return 0;
+}
+
+static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
+{
+	struct iscsi_cls_session *session;
+	struct ddb_entry *ddb_entry;
+
+	session = iscsi_dev_to_session(conn->dev.parent);
+	ddb_entry = session->dd_data;
+
+	DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
+		      ddb_entry->ha->host_no, __func__,
+		      ddb_entry->fw_ddb_index));
+	if (flag == STOP_CONN_RECOVER)
+		iscsi_block_session(session);
+	else
+		printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+}
+
+static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
+				  enum iscsi_param param, char *buf)
+{
+	struct ddb_entry *ddb_entry = sess->dd_data;
+	int len;
+
+	switch (param) {
+	case ISCSI_PARAM_TARGET_NAME:
+		len = snprintf(buf, PAGE_SIZE - 1, "%s\n",
+			       ddb_entry->iscsi_name);
+		break;
+	case ISCSI_PARAM_TPGT:
+		len = sprintf(buf, "%u\n", ddb_entry->tpgt);
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return len;
+}
+
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
+				  enum iscsi_param param, char *buf)
+{
+	struct iscsi_cls_session *session;
+	struct ddb_entry *ddb_entry;
+	int len;
+
+	session = iscsi_dev_to_session(conn->dev.parent);
+	ddb_entry = session->dd_data;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+		len = sprintf(buf, "%hu\n", ddb_entry->port);
+		break;
+	case ISCSI_PARAM_CONN_ADDRESS:
+		/* TODO: what are the ipv6 bits */
+		len = sprintf(buf, "%u.%u.%u.%u\n",
+			      NIPQUAD(ddb_entry->ip_addr));
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return len;
+}
+
+static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
+			     uint32_t enable, struct sockaddr *dst_addr)
+{
+	struct scsi_qla_host *ha;
+	struct Scsi_Host *shost;
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	int ret = 0;
+
+	shost = scsi_host_lookup(host_no);
+	if (IS_ERR(shost)) {
+		printk(KERN_ERR "Could not find host no %u\n", host_no);
+		return -ENODEV;
+	}
+
+	ha = (struct scsi_qla_host *) shost->hostdata;
+
+	switch (type) {
+	case ISCSI_TGT_DSCVR_SEND_TARGETS:
+		if (dst_addr->sa_family == AF_INET) {
+			addr = (struct sockaddr_in *)dst_addr;
+			if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr,
+					      addr->sin_port) != QLA_SUCCESS)
+				ret = -EIO;
+		} else if (dst_addr->sa_family == AF_INET6) {
+			/*
+			 * TODO: fix qla4xxx_send_tgts
+			 */
+			addr6 = (struct sockaddr_in6 *)dst_addr;
+			if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr,
+					      addr6->sin6_port) != QLA_SUCCESS)
+				ret = -EIO;
+		} else
+			ret = -ENOSYS;
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	scsi_host_put(shost);
+	return ret;
+}
+
+void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry)
+{
+	if (!ddb_entry->sess)
+		return;
+
+	if (ddb_entry->conn) {
+		iscsi_if_destroy_session_done(ddb_entry->conn);
+		iscsi_destroy_conn(ddb_entry->conn);
+		iscsi_remove_session(ddb_entry->sess);
+	}
+	iscsi_free_session(ddb_entry->sess);
+}
+
+int qla4xxx_add_sess(struct ddb_entry *ddb_entry, int scan)
+{
+	int err;
+
+	err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index);
+	if (err) {
+		DEBUG2(printk(KERN_ERR "Could not add session.\n"));
+		return err;
+	}
+
+	ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
+	if (!ddb_entry->conn) {
+		iscsi_remove_session(ddb_entry->sess);
+		DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
+		return -ENOMEM;
+	}
+
+	ddb_entry->sess->recovery_tmo = ddb_entry->ha->port_down_retry_count;
+	if (scan)
+		scsi_scan_target(&ddb_entry->sess->dev, 0,
+				 ddb_entry->sess->target_id,
+				 SCAN_WILD_CARD, 0);
+	iscsi_if_create_session_done(ddb_entry->conn);
+	return 0;
+}
+
+struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry;
+	struct iscsi_cls_session *sess;
+
+	sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
+	if (!sess)
+		return NULL;
+
+	ddb_entry = sess->dd_data;
+	memset(ddb_entry, 0, sizeof(*ddb_entry));
+	ddb_entry->ha = ha;
+	ddb_entry->sess = sess;
+	return ddb_entry;
+}
+
+/*
+ * Timer routines
+ */
+
+static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
+				unsigned long interval)
+{
+	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
+		     __func__, ha->host->host_no));
+	init_timer(&ha->timer);
+	ha->timer.expires = jiffies + interval * HZ;
+	ha->timer.data = (unsigned long)ha;
+	ha->timer.function = (void (*)(unsigned long))func;
+	add_timer(&ha->timer);
+	ha->timer_active = 1;
+}
+
+static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
+{
+	del_timer_sync(&ha->timer);
+	ha->timer_active = 0;
+}
+
+/***
+ * qla4xxx_mark_device_missing - mark a device as missing.
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ *
+ * This routine marks a device missing and resets the relogin retry count.
+ **/
+void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
+				 struct ddb_entry *ddb_entry)
+{
+	atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
+	DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
+		      ha->host_no, ddb_entry->bus, ddb_entry->target,
+		      ddb_entry->fw_ddb_index));
+	iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
+}
+
+static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
+				       struct ddb_entry *ddb_entry,
+				       struct scsi_cmnd *cmd,
+				       void (*done)(struct scsi_cmnd *))
+{
+	struct srb *srb;
+
+	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
+	if (!srb)
+		return srb;
+
+	atomic_set(&srb->ref_count, 1);
+	srb->ha = ha;
+	srb->ddb = ddb_entry;
+	srb->cmd = cmd;
+	srb->flags = 0;
+	cmd->SCp.ptr = (void *)srb;
+	cmd->scsi_done = done;
+
+	return srb;
+}
+
+static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+
+	if (srb->flags & SRB_DMA_VALID) {
+		if (cmd->use_sg) {
+			pci_unmap_sg(ha->pdev, cmd->request_buffer,
+				     cmd->use_sg, cmd->sc_data_direction);
+		} else if (cmd->request_bufflen) {
+			pci_unmap_single(ha->pdev, srb->dma_handle,
+					 cmd->request_bufflen,
+					 cmd->sc_data_direction);
+		}
+		srb->flags &= ~SRB_DMA_VALID;
+	}
+	cmd->SCp.ptr = NULL;
+}
+
+void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+
+	qla4xxx_srb_free_dma(ha, srb);
+
+	mempool_free(srb, ha->srb_mempool);
+
+	cmd->scsi_done(cmd);
+}
+
+/**
+ * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @cmd: Pointer to Linux's SCSI command structure
+ * @done_fn: Function that the driver calls to notify the SCSI mid-layer
+ *	that the command has been processed.
+ *
+ * Remarks:
+ * This routine is invoked by Linux to send a SCSI command to the driver.
+ * The mid-level driver tries to ensure that queuecommand never gets
+ * invoked concurrently with itself or the interrupt handler (although
+ * the interrupt handler may call this routine as part of request-
+ * completion handling).   Unfortunely, it sometimes calls the scheduler
+ * in interrupt context which is a big NO! NO!.
+ **/
+static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
+				void (*done)(struct scsi_cmnd *))
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	struct srb *srb;
+	int rval;
+
+	if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
+		if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
+			cmd->result = DID_NO_CONNECT << 16;
+			goto qc_fail_command;
+		}
+		goto qc_host_busy;
+	}
+
+	spin_unlock_irq(ha->host->host_lock);
+
+	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
+	if (!srb)
+		goto qc_host_busy_lock;
+
+	rval = qla4xxx_send_command_to_isp(ha, srb);
+	if (rval != QLA_SUCCESS)
+		goto qc_host_busy_free_sp;
+
+	spin_lock_irq(ha->host->host_lock);
+	return 0;
+
+qc_host_busy_free_sp:
+	qla4xxx_srb_free_dma(ha, srb);
+	mempool_free(srb, ha->srb_mempool);
+
+qc_host_busy_lock:
+	spin_lock_irq(ha->host->host_lock);
+
+qc_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+qc_fail_command:
+	done(cmd);
+
+	return 0;
+}
+
+/**
+ * qla4xxx_mem_free - frees memory allocated to adapter
+ * @ha: Pointer to host adapter structure.
+ *
+ * Frees memory previously allocated by qla4xxx_mem_alloc
+ **/
+static void qla4xxx_mem_free(struct scsi_qla_host *ha)
+{
+	if (ha->queues)
+		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
+				  ha->queues_dma);
+
+	ha->queues_len = 0;
+	ha->queues = NULL;
+	ha->queues_dma = 0;
+	ha->request_ring = NULL;
+	ha->request_dma = 0;
+	ha->response_ring = NULL;
+	ha->response_dma = 0;
+	ha->shadow_regs = NULL;
+	ha->shadow_regs_dma = 0;
+
+	/* Free srb pool. */
+	if (ha->srb_mempool)
+		mempool_destroy(ha->srb_mempool);
+
+	ha->srb_mempool = NULL;
+
+	/* release io space registers  */
+	if (ha->reg)
+		iounmap(ha->reg);
+	pci_release_regions(ha->pdev);
+}
+
+/**
+ * qla4xxx_mem_alloc - allocates memory for use by adapter.
+ * @ha: Pointer to host adapter structure
+ * 
+ * Allocates DMA memory for request and response queues. Also allocates memory
+ * for srbs.
+ **/
+static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
+{
+	unsigned long align;
+
+	/* Allocate contiguous block of DMA memory for queues. */
+	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
+			  sizeof(struct shadow_regs) +
+			  MEM_ALIGN_VALUE +
+			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+					&ha->queues_dma, GFP_KERNEL);
+	if (ha->queues == NULL) {
+		dev_warn(&ha->pdev->dev,
+			"Memory Allocation failed - queues.\n");
+
+		goto mem_alloc_error_exit;
+	}
+	memset(ha->queues, 0, ha->queues_len);
+
+	/*
+	 * As per RISC alignment requirements -- the bus-address must be a
+	 * multiple of the request-ring size (in bytes).
+	 */
+	align = 0;
+	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
+		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
+					   (MEM_ALIGN_VALUE - 1));
+
+	/* Update request and response queue pointers. */
+	ha->request_dma = ha->queues_dma + align;
+	ha->request_ring = (struct queue_entry *) (ha->queues + align);
+	ha->response_dma = ha->queues_dma + align +
+		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
+	ha->response_ring = (struct queue_entry *) (ha->queues + align +
+						    (REQUEST_QUEUE_DEPTH *
+						     QUEUE_SIZE));
+	ha->shadow_regs_dma = ha->queues_dma + align +
+		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
+	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
+						  (REQUEST_QUEUE_DEPTH *
+						   QUEUE_SIZE) +
+						  (RESPONSE_QUEUE_DEPTH *
+						   QUEUE_SIZE));
+
+	/* Allocate memory for srb pool. */
+	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
+					 mempool_free_slab, srb_cachep);
+	if (ha->srb_mempool == NULL) {
+		dev_warn(&ha->pdev->dev,
+			"Memory Allocation failed - SRB Pool.\n");
+
+		goto mem_alloc_error_exit;
+	}
+
+	return QLA_SUCCESS;
+
+mem_alloc_error_exit:
+	qla4xxx_mem_free(ha);
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_timer - checks every second for work to do.
+ * @ha: Pointer to host adapter structure.
+ **/
+static void qla4xxx_timer(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry, *dtemp;
+	int start_dpc = 0;
+
+	/* Search for relogin's to time-out and port down retry. */
+	list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
+		/* Count down time between sending relogins */
+		if (adapter_up(ha) &&
+		    !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+		    atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
+			if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+			    INVALID_ENTRY) {
+				if (atomic_read(&ddb_entry->retry_relogin_timer)
+				    		== 0) {
+					atomic_set(&ddb_entry->
+						retry_relogin_timer,
+						INVALID_ENTRY);
+					set_bit(DPC_RELOGIN_DEVICE,
+						&ha->dpc_flags);
+					set_bit(DF_RELOGIN, &ddb_entry->flags);
+					DEBUG2(printk("scsi%ld: %s: index [%d]"
+						      " login device\n",
+						      ha->host_no, __func__,
+						      ddb_entry->fw_ddb_index));
+				} else
+					atomic_dec(&ddb_entry->
+							retry_relogin_timer);
+			}
+		}
+
+		/* Wait for relogin to timeout */
+		if (atomic_read(&ddb_entry->relogin_timer) &&
+		    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+			/*
+			 * If the relogin times out and the device is
+			 * still NOT ONLINE then try and relogin again.
+			 */
+			if (atomic_read(&ddb_entry->state) !=
+			    DDB_STATE_ONLINE &&
+			    ddb_entry->fw_ddb_device_state ==
+			    DDB_DS_SESSION_FAILED) {
+				/* Reset retry relogin timer */
+				atomic_inc(&ddb_entry->relogin_retry_count);
+				DEBUG2(printk("scsi%ld: index[%d] relogin"
+					      " timed out-retrying"
+					      " relogin (%d)\n",
+					      ha->host_no,
+					      ddb_entry->fw_ddb_index,
+					      atomic_read(&ddb_entry->
+							  relogin_retry_count))
+					);
+				start_dpc++;
+				DEBUG(printk("scsi%ld:%d:%d: index [%d] "
+					     "initate relogin after"
+					     " %d seconds\n",
+					     ha->host_no, ddb_entry->bus,
+					     ddb_entry->target,
+					     ddb_entry->fw_ddb_index,
+					     ddb_entry->default_time2wait + 4)
+					);
+
+				atomic_set(&ddb_entry->retry_relogin_timer,
+					   ddb_entry->default_time2wait + 4);
+			}
+		}
+	}
+
+	/* Check for heartbeat interval. */
+	if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
+	    ha->heartbeat_interval != 0) {
+		ha->seconds_since_last_heartbeat++;
+		if (ha->seconds_since_last_heartbeat >
+		    ha->heartbeat_interval + 2)
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+	}
+
+
+	/* Wakeup the dpc routine for this adapter, if needed. */
+	if ((start_dpc ||
+	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
+	     test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
+	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+	     test_bit(DPC_AEN, &ha->dpc_flags)) &&
+	     ha->dpc_thread) {
+		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
+			      " - dpc flags = 0x%lx\n",
+			      ha->host_no, __func__, ha->dpc_flags));
+		queue_work(ha->dpc_thread, &ha->dpc_work);
+	}
+
+	/* Reschedule timer thread to call us back in one second */
+	mod_timer(&ha->timer, jiffies + HZ);
+
+	DEBUG2(ha->seconds_since_last_intr++);
+}
+
+/**
+ * qla4xxx_cmd_wait - waits for all outstanding commands to complete
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine stalls the driver until all outstanding commands are returned.
+ * Caller must release the Hardware Lock prior to calling this routine.
+ **/
+static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
+{
+	uint32_t index = 0;
+	int stat = QLA_SUCCESS;
+	unsigned long flags;
+	int wait_cnt = WAIT_CMD_TOV;	/*
+					 * Initialized for 30 seconds as we
+					 * expect all commands to retuned
+					 * ASAP.
+					 */
+
+	while (wait_cnt) {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		/* Find a command that hasn't completed. */
+		for (index = 1; index < MAX_SRBS; index++) {
+			if (ha->active_srb_array[index] != NULL)
+				break;
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		/* If No Commands are pending, wait is complete */
+		if (index == MAX_SRBS) {
+			break;
+		}
+
+		/* If we timed out on waiting for commands to come back
+		 * return ERROR.
+		 */
+		wait_cnt--;
+		if (wait_cnt == 0)
+			stat = QLA_ERROR;
+		else {
+			msleep(1000);
+		}
+	}			/* End of While (wait_cnt) */
+
+	return stat;
+}
+
+/**
+ * qla4010_soft_reset - performs soft reset.
+ * @ha: Pointer to host adapter structure.
+ **/
+static int qla4010_soft_reset(struct scsi_qla_host *ha)
+{
+	uint32_t max_wait_time;
+	unsigned long flags = 0;
+	int status = QLA_ERROR;
+	uint32_t ctrl_status;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/*
+	 * If the SCSI Reset Interrupt bit is set, clear it.
+	 * Otherwise, the Soft Reset won't work.
+	 */
+	ctrl_status = readw(&ha->reg->ctrl_status);
+	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
+		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+
+	/* Issue Soft Reset */
+	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait until the Network Reset Intr bit is cleared */
+	max_wait_time = RESET_INTR_TOV;
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
+			break;
+
+		msleep(1000);
+	} while ((--max_wait_time));
+
+	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: Network Reset Intr not cleared by "
+			      "Network function, clearing it now!\n",
+			      ha->host_no));
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	/* Wait until the firmware tells us the Soft Reset is done */
+	max_wait_time = SOFT_RESET_TOV;
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
+			status = QLA_SUCCESS;
+			break;
+		}
+
+		msleep(1000);
+	} while ((--max_wait_time));
+
+	/*
+	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
+	 * after the soft reset has taken place.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ctrl_status = readw(&ha->reg->ctrl_status);
+	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
+		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* If soft reset fails then most probably the bios on other
+	 * function is also enabled.
+	 * Since the initialization is sequential the other fn
+	 * wont be able to acknowledge the soft reset.
+	 * Issue a force soft reset to workaround this scenario.
+	 */
+	if (max_wait_time == 0) {
+		/* Issue Force Soft Reset */
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		/* Wait until the firmware tells us the Soft Reset is done */
+		max_wait_time = SOFT_RESET_TOV;
+		do {
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			ctrl_status = readw(&ha->reg->ctrl_status);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
+				status = QLA_SUCCESS;
+				break;
+			}
+
+			msleep(1000);
+		} while ((--max_wait_time));
+	}
+
+	return status;
+}
+
+/**
+ * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
+ * @ha: Pointer to host adapter structure.
+ **/
+static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+
+	ql4xxx_lock_nvram(ha);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
+	readl(isp_gp_out(ha));
+	mdelay(1);
+
+	writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
+	readl(isp_gp_out(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	mdelay(2523);
+
+	ql4xxx_unlock_nvram(ha);
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine is called just prior to a HARD RESET to return all
+ * outstanding commands back to the Operating System.
+ * Caller should make sure that the following locks are released
+ * before this calling routine: Hardware lock, and io_request_lock.
+ **/
+static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
+{
+	struct srb *srb;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 1; i < MAX_SRBS; i++) {
+		if ((srb = ha->active_srb_array[i]) != NULL) {
+			qla4xxx_del_from_active_array(ha, i);
+			srb->cmd->result = DID_RESET << 16;
+			qla4xxx_srb_compl(ha, srb);
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+}
+
+/**
+ * qla4xxx_hard_reset - performs HBA Hard Reset
+ * @ha: Pointer to host adapter structure.
+ **/
+static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
+{
+	/* The QLA4010 really doesn't have an equivalent to a hard reset */
+	qla4xxx_flush_active_srbs(ha);
+	if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
+		int status = QLA_ERROR;
+
+		if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
+		    (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
+		    (qla4010_soft_reset(ha) == QLA_SUCCESS))
+			status = QLA_SUCCESS;
+		return status;
+	} else
+		return qla4010_soft_reset(ha);
+}
+
+/**
+ * qla4xxx_recover_adapter - recovers adapter after a fatal error
+ * @ha: Pointer to host adapter structure.
+ * @renew_ddb_list: Indicates what to do with the adapter's ddb list
+ *	after adapter recovery has completed.
+ *	0=preserve ddb list, 1=destroy and rebuild ddb list
+ **/
+static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, 
+				uint8_t renew_ddb_list)
+{
+	int status;
+
+	/* Stall incoming I/O until we are done */
+	clear_bit(AF_ONLINE, &ha->flags);
+	DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
+		      __func__));
+
+	/* Wait for outstanding commands to complete.
+	 * Stalls the driver for max 30 secs
+	 */
+	status = qla4xxx_cmd_wait(ha);
+
+	qla4xxx_disable_intrs(ha);
+
+	/* Flush any pending ddb changed AENs */
+	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+	/* Reset the firmware.	If successful, function
+	 * returns with ISP interrupts enabled.
+	 */
+	if (status == QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
+			      ha->host_no, __func__));
+		status = qla4xxx_soft_reset(ha);
+	}
+	/* FIXMEkaren: Do we want to keep interrupts enabled and process
+	   AENs after soft reset */
+
+	/* If firmware (SOFT) reset failed, or if all outstanding
+	 * commands have not returned, then do a HARD reset.
+	 */
+	if (status == QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
+			      ha->host_no, __func__));
+		status = qla4xxx_hard_reset(ha);
+	}
+
+	/* Flush any pending ddb changed AENs */
+	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+	/* Re-initialize firmware. If successful, function returns
+	 * with ISP interrupts enabled */
+	if (status == QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s - Initializing adapter..\n",
+			      ha->host_no, __func__));
+
+		/* If successful, AF_ONLINE flag set in
+		 * qla4xxx_initialize_adapter */
+		status = qla4xxx_initialize_adapter(ha, renew_ddb_list);
+	}
+
+	/* Failed adapter initialization?
+	 * Retry reset_ha only if invoked via DPC (DPC_RESET_HA) */
+	if ((test_bit(AF_ONLINE, &ha->flags) == 0) &&
+	    (test_bit(DPC_RESET_HA, &ha->dpc_flags))) {
+		/* Adapter initialization failed, see if we can retry
+		 * resetting the ha */
+		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
+			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
+			DEBUG2(printk("scsi%ld: recover adapter - retrying "
+				      "(%d) more times\n", ha->host_no,
+				      ha->retry_reset_ha_cnt));
+			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+			status = QLA_ERROR;
+		} else {
+			if (ha->retry_reset_ha_cnt > 0) {
+				/* Schedule another Reset HA--DPC will retry */
+				ha->retry_reset_ha_cnt--;
+				DEBUG2(printk("scsi%ld: recover adapter - "
+					      "retry remaining %d\n",
+					      ha->host_no,
+					      ha->retry_reset_ha_cnt));
+				status = QLA_ERROR;
+			}
+
+			if (ha->retry_reset_ha_cnt == 0) {
+				/* Recover adapter retries have been exhausted.
+				 * Adapter DEAD */
+				DEBUG2(printk("scsi%ld: recover adapter "
+					      "failed - board disabled\n",
+					      ha->host_no));
+				qla4xxx_flush_active_srbs(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST,
+					  &ha->dpc_flags);
+				status = QLA_ERROR;
+			}
+		}
+	} else {
+		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+		clear_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags);
+		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+	}
+
+	ha->adapter_error_count++;
+
+	if (status == QLA_SUCCESS)
+		qla4xxx_enable_intrs(ha);
+
+	DEBUG2(printk("scsi%ld: recover adapter .. DONE\n", ha->host_no));
+	return status;
+}
+
+/**
+ * qla4xxx_do_dpc - dpc routine
+ * @data: in our case pointer to adapter structure
+ *
+ * This routine is a task that is schedule by the interrupt handler
+ * to perform the background processing for interrupts.  We put it
+ * on a task queue that is consumed whenever the scheduler runs; that's
+ * so you can do anything (i.e. put the process to sleep etc).  In fact,
+ * the mid-level tries to sleep when it reaches the driver threshold
+ * "host->can_queue". This can cause a panic if we were in our interrupt code.
+ **/
+static void qla4xxx_do_dpc(void *data)
+{
+	struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+	struct ddb_entry *ddb_entry, *dtemp;
+
+	DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
+		      ha->host_no, __func__));
+
+	DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
+		      ha->host_no, __func__, ha->flags));
+	DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
+		      ha->host_no, __func__, ha->dpc_flags));
+
+	/* Initialization not yet finished. Don't do anything yet. */
+	if (!test_bit(AF_INIT_DONE, &ha->flags))
+		return;
+
+	if (adapter_up(ha) ||
+	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
+		if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
+			/*
+			 * dg 09/23 Never initialize ddb list
+			 * once we up and running
+			 * qla4xxx_recover_adapter(ha,
+			 *    REBUILD_DDB_LIST);
+			 */
+			qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
+
+		if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+			qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
+
+		if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+			uint8_t wait_time = RESET_INTR_TOV;
+			unsigned long flags = 0;
+
+			qla4xxx_flush_active_srbs(ha);
+
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			while ((readw(&ha->reg->ctrl_status) &
+				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
+				if (--wait_time == 0)
+					break;
+
+				spin_unlock_irqrestore(&ha->hardware_lock,
+						       flags);
+
+				msleep(1000);
+
+				spin_lock_irqsave(&ha->hardware_lock, flags);
+			}
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+			if (wait_time == 0)
+				DEBUG2(printk("scsi%ld: %s: SR|FSR "
+					      "bit not cleared-- resetting\n",
+					      ha->host_no, __func__));
+		}
+	}
+
+	/* ---- process AEN? --- */
+	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+
+	/* ---- Get DHCP IP Address? --- */
+	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+		qla4xxx_get_dhcp_ip_address(ha);
+
+	/* ---- relogin device? --- */
+	if (adapter_up(ha) &&
+	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+		list_for_each_entry_safe(ddb_entry, dtemp,
+					 &ha->ddb_list, list) {
+			if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+			    atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
+				qla4xxx_relogin_device(ha, ddb_entry);
+
+			/*
+			 * If mbx cmd times out there is no point
+			 * in continuing further.
+			 * With large no of targets this can hang
+			 * the system.
+			 */
+			if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+				printk(KERN_WARNING "scsi%ld: %s: "
+				       "need to reset hba\n",
+				       ha->host_no, __func__);
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * qla4xxx_free_adapter - release the adapter
+ * @ha: pointer to adapter structure
+ **/
+static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
+{
+
+	if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
+		/* Turn-off interrupts on the card. */
+		qla4xxx_disable_intrs(ha);
+	}
+
+	/* Kill the kernel thread for this host */
+	if (ha->dpc_thread)
+		destroy_workqueue(ha->dpc_thread);
+
+	/* Issue Soft Reset to put firmware in unknown state */
+	qla4xxx_soft_reset(ha);
+
+	/* Remove timer thread, if present */
+	if (ha->timer_active)
+		qla4xxx_stop_timer(ha);
+
+	/* free extra memory */
+	qla4xxx_mem_free(ha);
+
+	/* Detach interrupts */
+	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+		free_irq(ha->pdev->irq, ha);
+
+	pci_disable_device(ha->pdev);
+
+}
+
+/***
+ * qla4xxx_iospace_config - maps registers
+ * @ha: pointer to adapter structure
+ *
+ * This routines maps HBA's registers from the pci address space
+ * into the kernel virtual address space for memory mapped i/o. 
+ **/
+static int qla4xxx_iospace_config(struct scsi_qla_host *ha)
+{
+	unsigned long pio, pio_len, pio_flags;
+	unsigned long mmio, mmio_len, mmio_flags;
+
+	pio = pci_resource_start(ha->pdev, 0);
+	pio_len = pci_resource_len(ha->pdev, 0);
+	pio_flags = pci_resource_flags(ha->pdev, 0);
+	if (pio_flags & IORESOURCE_IO) {
+		if (pio_len < MIN_IOBASE_LEN) {
+			dev_warn(&ha->pdev->dev,
+				"Invalid PCI I/O region size\n");
+			pio = 0;
+		}
+	} else {
+		dev_warn(&ha->pdev->dev, "region #0 not a PIO resource\n");
+		pio = 0;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	mmio = pci_resource_start(ha->pdev, 1);
+	mmio_len = pci_resource_len(ha->pdev, 1);
+	mmio_flags = pci_resource_flags(ha->pdev, 1);
+
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		dev_err(&ha->pdev->dev,
+			"region #0 not an MMIO resource, aborting\n");
+			
+		goto iospace_error_exit;
+	}
+	if (mmio_len < MIN_IOBASE_LEN) {
+		dev_err(&ha->pdev->dev,
+			"Invalid PCI mem region size, aborting\n");
+		goto iospace_error_exit;
+	}
+
+	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
+		dev_warn(&ha->pdev->dev,
+			"Failed to reserve PIO/MMIO regions\n");
+
+		goto iospace_error_exit;
+	}
+
+	ha->pio_address = pio;
+	ha->pio_length = pio_len;
+	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
+	if (!ha->reg) {
+		dev_err(&ha->pdev->dev,
+			"cannot remap MMIO, aborting\n");
+
+		goto iospace_error_exit;
+	}
+
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+/**
+ * qla4xxx_probe_adapter - callback function to probe HBA
+ * @pdev: pointer to pci_dev structure
+ * @pci_device_id: pointer to pci_device entry
+ *
+ * This routine will probe for Qlogic 4xxx iSCSI host adapters.
+ * It returns zero if successful. It also initializes all data necessary for
+ * the driver.
+ **/
+static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
+					   const struct pci_device_id *ent)
+{
+	int ret = -ENODEV, status;
+	struct Scsi_Host *host;
+	struct scsi_qla_host *ha;
+	struct ddb_entry *ddb_entry, *ddbtemp;
+	uint8_t init_retry_count = 0;
+	char buf[34];
+
+	if (pci_enable_device(pdev))
+		return -1;
+
+	host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha));
+	if (host == NULL) {
+		printk(KERN_WARNING
+		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
+		goto probe_disable_device;
+	}
+
+	/* Clear our data area */
+	ha = (struct scsi_qla_host *) host->hostdata;
+	memset(ha, 0, sizeof(*ha));
+
+	/* Save the information from PCI BIOS.	*/
+	ha->pdev = pdev;
+	ha->host = host;
+	ha->host_no = host->host_no;
+
+	/* Configure PCI I/O space. */
+	ret = qla4xxx_iospace_config(ha);
+	if (ret)
+		goto probe_failed;
+
+	dev_info(&ha->pdev->dev, "Found an ISP%04x, irq %d, iobase 0x%p\n",
+		   pdev->device, pdev->irq, ha->reg);
+
+	qla4xxx_config_dma_addressing(ha);
+
+	/* Initialize lists and spinlocks. */
+	INIT_LIST_HEAD(&ha->ddb_list);
+	INIT_LIST_HEAD(&ha->free_srb_q);
+
+	mutex_init(&ha->mbox_sem);
+	init_waitqueue_head(&ha->mailbox_wait_queue);
+
+	spin_lock_init(&ha->hardware_lock);
+	spin_lock_init(&ha->list_lock);
+
+	/* Allocate dma buffers */
+	if (qla4xxx_mem_alloc(ha)) {
+		dev_warn(&ha->pdev->dev,
+			   "[ERROR] Failed to allocate memory for adapter\n");
+
+		ret = -ENOMEM;
+		goto probe_failed;
+	}
+
+	/*
+	 * Initialize the Host adapter request/response queues and
+	 * firmware
+	 * NOTE: interrupts enabled upon successful completion
+	 */
+	status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
+	while (status == QLA_ERROR && init_retry_count++ < MAX_INIT_RETRIES) {
+		DEBUG2(printk("scsi: %s: retrying adapter initialization "
+			      "(%d)\n", __func__, init_retry_count));
+		qla4xxx_soft_reset(ha);
+		status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST);
+	}
+	if (status == QLA_ERROR) {
+		dev_warn(&ha->pdev->dev, "Failed to initialize adapter\n");
+
+		ret = -ENODEV;
+		goto probe_failed;
+	}
+
+	host->cmd_per_lun = 3;
+	host->max_channel = 0;
+	host->max_lun = MAX_LUNS - 1;
+	host->max_id = MAX_TARGETS;
+	host->max_cmd_len = IOCB_MAX_CDB_LEN;
+	host->can_queue = REQUEST_QUEUE_DEPTH + 128;
+	host->transportt = qla4xxx_scsi_transport;
+
+	/* Startup the kernel thread for this host adapter. */
+	DEBUG2(printk("scsi: %s: Starting kernel thread for "
+		      "qla4xxx_dpc\n", __func__));
+	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
+	ha->dpc_thread = create_singlethread_workqueue(buf);
+	if (!ha->dpc_thread) {
+		dev_warn(&ha->pdev->dev, "Unable to start DPC thread!\n");
+		ret = -ENODEV;
+		goto probe_failed;
+	}
+	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+
+	ret = request_irq(pdev->irq, qla4xxx_intr_handler,
+			  SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
+	if (ret) {
+		dev_warn(&ha->pdev->dev, "Failed to reserve interrupt %d"
+			" already in use.\n", pdev->irq);
+		goto probe_failed;
+	}
+	set_bit(AF_IRQ_ATTACHED, &ha->flags);
+	host->irq = pdev->irq;
+	DEBUG(printk("scsi%d: irq %d attached\n", ha->host_no, ha->pdev->irq));
+
+	qla4xxx_enable_intrs(ha);
+
+	/* Start timer thread. */
+	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
+
+	set_bit(AF_INIT_DONE, &ha->flags);
+
+	pci_set_drvdata(pdev, ha);
+
+	ret = scsi_add_host(host, &pdev->dev);
+	if (ret)
+		goto probe_failed;
+
+	/* Update transport device information for all devices. */
+	list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
+		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
+			if (qla4xxx_add_sess(ddb_entry, 1))
+				goto remove_host;
+	}
+
+	printk(KERN_INFO
+	       " QLogic iSCSI HBA Driver version: %s\n"
+	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
+	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
+	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
+	       ha->patch_number, ha->build_number);
+
+	return 0;
+
+remove_host:
+	qla4xxx_free_ddb_list(ha);
+	scsi_remove_host(host);
+
+probe_failed:
+	qla4xxx_free_adapter(ha);
+	scsi_host_put(ha->host);
+
+probe_disable_device:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+
+/**
+ * qla4xxx_remove_adapter - calback function to remove adapter.
+ * @pci_dev: PCI device pointer
+ **/
+static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
+{
+	struct scsi_qla_host *ha;
+
+	ha = pci_get_drvdata(pdev);
+
+	/* remove devs from iscsi_sessions to scsi_devices */
+	qla4xxx_free_ddb_list(ha);
+
+	scsi_remove_host(ha->host);
+
+	qla4xxx_free_adapter(ha);
+
+	scsi_host_put(ha->host);
+
+	pci_set_drvdata(pdev, NULL);
+}
+
+/**
+ * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
+ * @ha: HA context
+ *
+ * At exit, the @ha's flags.enable_64bit_addressing set to indicated
+ * supported addressing method.
+ */
+void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
+{
+	int retval;
+
+	/* Update our PCI device dma_mask for full 64 bit mask */
+	if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
+		if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
+			dev_dbg(&ha->pdev->dev,
+				  "Failed to set 64 bit PCI consistent mask; "
+				   "using 32 bit.\n");
+			retval = pci_set_consistent_dma_mask(ha->pdev,
+							     DMA_32BIT_MASK);
+		}
+	} else
+		retval = pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
+}
+
+static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+{
+	struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target);
+	struct ddb_entry *ddb = sess->dd_data;
+
+	sdev->hostdata = ddb;
+	return 0;
+}
+
+static int qla4xxx_slave_configure(struct scsi_device *sdev)
+{
+	if (sdev->tagged_supported)
+		scsi_activate_tcq(sdev, 32);
+	else
+		scsi_deactivate_tcq(sdev, 32);
+
+	return 0;
+}
+
+/**
+ * qla4xxx_del_from_active_array - returns an active srb
+ * @ha: Pointer to host adapter structure.
+ * @index: index into to the active_array
+ *
+ * This routine removes and returns the srb at the specified index
+ **/
+struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
+{
+	struct srb *srb = NULL;
+
+	/* validate handle and remove from active array */
+	if (index >= MAX_SRBS)
+		return srb;
+
+	srb = ha->active_srb_array[index];
+	ha->active_srb_array[index] = NULL;
+	if (!srb)
+		return srb;
+
+	/* update counters */
+	if (srb->flags & SRB_DMA_VALID) {
+		ha->req_q_count += srb->iocb_cnt;
+		ha->iocb_cnt -= srb->iocb_cnt;
+		if (srb->cmd)
+			srb->cmd->host_scribble = NULL;
+	}
+	return srb;
+}
+
+/**
+ * qla4xxx_soft_reset - performs a SOFT RESET of hba.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_soft_reset(struct scsi_qla_host *ha)
+{
+
+	DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
+		      __func__));
+	if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
+		int status = QLA_ERROR;
+
+		if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
+		    (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
+		    (qla4010_soft_reset(ha) == QLA_SUCCESS) )
+			status = QLA_SUCCESS;
+		return status;
+	} else
+		return qla4010_soft_reset(ha);
+}
+
+/**
+ * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
+ * @ha: actual ha whose done queue will contain the comd returned by firmware.
+ * @cmd: Scsi Command to wait on.
+ *
+ * This routine waits for the command to be returned by the Firmware
+ * for some max time.
+ **/
+static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
+				      struct scsi_cmnd *cmd)
+{
+	int done = 0;
+	struct srb *rp;
+	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
+
+	do {
+		/* Checking to see if its returned to OS */
+		rp = (struct srb *) cmd->SCp.ptr;
+		if (rp == NULL) {
+			done++;
+			break;
+		}
+
+		msleep(2000);
+	} while (max_wait_time--);
+
+	return done;
+}
+
+/**
+ * qla4xxx_wait_for_hba_online - waits for HBA to come online
+ * @ha: Pointer to host adapter structure
+ **/
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
+{
+	unsigned long wait_online;
+
+	wait_online = jiffies + (30 * HZ);
+	while (time_before(jiffies, wait_online)) {
+
+		if (adapter_up(ha))
+			return QLA_SUCCESS;
+		else if (ha->retry_reset_ha_cnt == 0)
+			return QLA_ERROR;
+
+		msleep(2000);
+	}
+
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_eh_wait_for_active_target_commands - wait for active cmds to finish.
+ * @ha: pointer to to HBA
+ * @t: target id
+ * @l: lun id
+ *
+ * This function waits for all outstanding commands to a lun to complete. It
+ * returns 0 if all pending commands are returned and 1 otherwise.
+ **/
+static int qla4xxx_eh_wait_for_active_target_commands(struct scsi_qla_host *ha,
+						 int t, int l)
+{
+	int cnt;
+	int status;
+	struct srb *sp;
+	struct scsi_cmnd *cmd;
+
+	/*
+	 * Waiting for all commands for the designated target in the active
+	 * array
+	 */
+	status = 0;
+	for (cnt = 1; cnt < MAX_SRBS; cnt++) {
+		spin_lock(&ha->hardware_lock);
+		sp = ha->active_srb_array[cnt];
+		if (sp) {
+			cmd = sp->cmd;
+			spin_unlock(&ha->hardware_lock);
+			if (cmd->device->id == t && cmd->device->lun == l) {
+				if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+					status++;
+					break;
+				}
+			}
+		} else {
+			spin_unlock(&ha->hardware_lock);
+		}
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_eh_device_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset all luns on the
+ * specified target.
+ **/
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	struct srb *sp;
+	int ret = FAILED, stat;
+
+	sp = (struct srb *) cmd->SCp.ptr;
+	if (!sp || !ddb_entry)
+		return ret;
+
+	dev_info(&ha->pdev->dev,
+		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
+		   cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+	DEBUG2(printk(KERN_INFO
+		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
+		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
+		      cmd, jiffies, cmd->timeout_per_command / HZ,
+		      ha->dpc_flags, cmd->result, cmd->allowed));
+
+	/* FIXME: wait for hba to go online */
+	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
+	if (stat != QLA_SUCCESS) {
+		dev_info(&ha->pdev->dev, "DEVICE RESET FAILED. %d\n", stat);
+		goto eh_dev_reset_done;
+	}
+
+	/* Send marker. */
+	ha->marker_needed = 1;
+
+	/*
+	 * If we are coming down the EH path, wait for all commands to complete
+	 * for the device.
+	 */
+	if (cmd->device->host->shost_state == SHOST_RECOVERY) {
+		if (qla4xxx_eh_wait_for_active_target_commands(ha,
+							       cmd->device->id,
+							       cmd->device->
+							       lun)) {
+			dev_info(&ha->pdev->dev,
+				   "DEVICE RESET FAILED - waiting for "
+				   "commands.\n");
+			goto eh_dev_reset_done;
+		}
+	}
+
+	dev_info(&ha->pdev->dev,
+		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
+		   ha->host_no, cmd->device->channel, cmd->device->id,
+		   cmd->device->lun);
+
+	ret = SUCCESS;
+
+eh_dev_reset_done:
+
+	return ret;
+}
+
+/**
+ * qla4xxx_eh_host_reset - kernel callback
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is invoked by the Linux kernel to perform fatal error
+ * recovery on the specified adapter.
+ **/
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	int return_status = FAILED;
+	struct scsi_qla_host *ha;
+
+	ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
+
+	dev_info(&ha->pdev->dev,
+		   "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no,
+		   cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
+			      "DEAD.\n", ha->host_no, cmd->device->channel,
+			      __func__));
+
+		return FAILED;
+	}
+
+	if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) {
+		return_status = SUCCESS;
+	}
+
+	dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
+		   return_status == FAILED ? "FAILED" : "SUCCEDED");
+
+	return return_status;
+}
+
+
+static struct pci_device_id qla4xxx_pci_tbl[] = {
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{0, 0},
+};
+MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
+
+struct pci_driver qla4xxx_pci_driver = {
+	.name		= DRIVER_NAME,
+	.id_table	= qla4xxx_pci_tbl,
+	.probe		= qla4xxx_probe_adapter,
+	.remove		= qla4xxx_remove_adapter,
+};
+
+static int __init qla4xxx_module_init(void)
+{
+	int ret;
+
+	/* Allocate cache for SRBs. */
+	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
+				       SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (srb_cachep == NULL) {
+		printk(KERN_ERR
+		       "%s: Unable to allocate SRB cache..."
+		       "Failing load!\n", DRIVER_NAME);
+		ret = -ENOMEM;
+		goto no_srp_cache;
+	}
+
+	/* Derive version string. */
+	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
+	if (extended_error_logging)
+		strcat(qla4xxx_version_str, "-debug");
+
+	qla4xxx_scsi_transport =
+		iscsi_register_transport(&qla4xxx_iscsi_transport);
+	if (!qla4xxx_scsi_transport){
+		ret = -ENODEV;
+		goto release_srb_cache;
+	}
+
+	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
+	ret = pci_register_driver(&qla4xxx_pci_driver);
+	if (ret)
+		goto unregister_transport;
+
+	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
+	return 0;
+unregister_transport:
+	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+release_srb_cache:
+	kmem_cache_destroy(srb_cachep);
+no_srp_cache:
+	return ret;
+}
+
+static void __exit qla4xxx_module_exit(void)
+{
+	pci_unregister_driver(&qla4xxx_pci_driver);
+	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+	kmem_cache_destroy(srb_cachep);
+}
+
+module_init(qla4xxx_module_init);
+module_exit(qla4xxx_module_exit);
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
diff -Naurp linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_version.h linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_version.h
--- linux-2.6.17.noarch/drivers/scsi/qla4xxx/ql4_version.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.17.noarch.qla4xxx/drivers/scsi/qla4xxx/ql4_version.h	2006-08-17 12:57:22.000000000 -0400
@@ -0,0 +1,13 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#define QLA4XXX_DRIVER_VERSION	"5.00.05b9-k"
+
+#define QL4_DRIVER_MAJOR_VER	5
+#define QL4_DRIVER_MINOR_VER	0
+#define QL4_DRIVER_PATCH_VER	5
+#define QL4_DRIVER_BETA_VER	9