Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2979

kernel-2.6.18-238.el5.src.rpm

From: Marcus Barrow <mbarrow@redhat.com>
Date: Wed, 10 Jun 2009 08:57:04 -0400
Subject: [net] qla2xxx, ql8xxx : support for 10 GigE
Message-id: 20090610125704.7984.85449.sendpatchset@file.bos.redhat.com
O-Subject: [rhel 5.4 patch] [ V4 ] qla2xxx, ql8xxx : Support for QLogic 10 Gb ether driver.
Bugzilla: 479288
RH-Acked-by: Neil Horman <nhorman@redhat.com>
RH-Acked-by: David Miller <davem@redhat.com>
RH-Acked-by: Andy Gospodarek <gospo@redhat.com>
RH-Acked-by: Neil Horman <nhorman@redhat.com>
RH-Acked-by: Stanislaw Gruszka <sgruszka@redhat.com>

BZ 479288 [ V4 ] ql8xxx - Support 10 Gb ether driver.

Version 4 of this patch.

This patch has been updated to address Neil Horman's concerns:
 - It has compatibility code not required for RHEL 5.4 removed.
 - Code has been consolodated to use the routine "netif_rx_schedule()".
 - The net_poll() routine was changed to correctly handle budget and quaota.

This is submitted without the correct config-generic file portion,
because my laptap (MacOS) couldn't run the Makefile and I have been
unable to configure my lab machines to do this work so far. I will re-post
when the config stuff is available.

Version 3 of this patch.

I am re-submitting this driver because the updates in the two
subsequant patches have become unwieldy. This driver has been reformatted
to remove extra space chars and to shorten lines where appropriate.

Updates for this driver have also been reviewed and found wanting
in these threads, their contents have now been folded into this driver
patch.

[rhel 5.4 patch] qlge : updates and fixes from upstream, part 1
[rhel 5.4 patch] qlge : updates and fixes from upstream, part 2

Here is a synopsis of variations between the upstream driver and the one
being submitted here for RHEL 5.4. Some deltas are caused by differences
in the interfaces between upstream and 5.4, and they are not listed here.

Features:
 1)     R54 has support for large queues beyond 512 entries.
 2)     R54 has core dump code.
 3)     R%$ has detection for alternate NIC PCIe function.
 4)     R54 has more ethtool supports fw_rev_id, led, loopback, wol,
            and get/set tx csum.
 5)         R54 has more debug printing.
 6)         R54 has BOFM changes. This is an interface to the IBM firmware boot
            environment. Provides MAC addresses etc.
 7)         R54 has coalesce of 1000 us.
 8)         R54 has flush removed from ql_enable_completion_interrupt().
 9)         R54 has prefetch() calls.
 10)        R54 handles TX completions in ISR.
 11)        R54 turns on coalescing selectively for queue type.

Bug fixes:
 1) R54 has longer UDELAY value.  It is 100, upstream is 10us.
            Used when manipulating device registers
 2)         R54 has 100ms wait 1000 times in ql_wait_mbx_cmd_cmplt()
 3)         R54 has fix for MTU in ql_mpi_port_cfg_work().
 4)         R54 has fix in ql_mpi_work() for error checking.
 5)         R54 has rx checking for len > MTU.
 6)         R54 tosses error frames.
 7)         R54 uses 4-byte queue alignment instead of old queuesize.
 8)         R54 has bonding fix.

Where suitable, these changes are being queued for submission upstream.
Limited resources has forced greater testing and development based on RHEL
5.4.

Version 2 of this patch. This patch was modified to remove the ifdefs
that Pete Zaitcev objected to (there were several of this type):

+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif

This patch provides the support for the 10 Gb Ethernet NIC
on the new QLogic FCoE CNA.

It applies and builds cleanly with 2.6.18-152. This driver is currently
upstream.

Tested by QLogic. This hardare is currently shipping to OEMS.

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 46e609d..8264a2f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2557,6 +2557,16 @@ config MLX4_EN
 	  This driver supports Mellanox Technologies ConnectX Ethernet
 	  devices.
 
+config QLGE
+       tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+       depends on PCI
+       default m 	
+       ---help---
+         This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+         To compile this driver as a module, choose M here: the module
+         will be called qlge.
+
 config MLX4_CORE
 	tristate
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 337bea0..0dd8fb8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLGE) += qlge/
 
 obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 0000000..8a19765
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/kcompat.h b/drivers/net/qlge/kcompat.h
new file mode 100644
index 0000000..c2d12e1
--- /dev/null
+++ b/drivers/net/qlge/kcompat.h
@@ -0,0 +1,139 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:     Linux qlge network device driver by
+ *                      Ron Mercer <ron.mercer@qlogic.com>
+ */
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef netdev_alloc_skb
+#define netdev_alloc_skb _kc_netdev_alloc_skb
+static inline struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+						unsigned int length)
+{
+	struct sk_buff *skb;
+	skb = alloc_skb(length + 16, GFP_ATOMIC);
+	if (likely(skb != NULL)) {
+		skb_reserve(skb, 16);
+		skb->dev = dev;
+	}
+	return skb;
+}
+#endif
+
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+	return skb->h.raw;
+}
+static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
+{
+	return (struct udphdr *)skb_transport_header(skb);
+}
+
+#define pci_channel_offline(pdev) (pdev->error_state && \
+	pdev->error_state != pci_channel_io_normal)
+
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
+
+#ifndef RHEL_RELEASE_CODE
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a, b) 0
+#endif
+#if !defined(__USE_COMPAT_LAYER_2_6_18_PLUS__)
+#if (!((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4, 4)) && \
+	(RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 0)) || \
+	(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5, 0))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#endif /* !defined(__USE_COMPAT_LAYER_2_6_18_PLUS__) */
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq,
+			new_handler_t handler, unsigned long flags,
+			const char *devname, void *dev_id)
+{
+	irq_handler_t new_handler = (irq_handler_t) handler;
+	return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+#if defined(__COMPAT_LAYER_2_6_18_PLUS__)
+#undef irq_handler_t
+#endif
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) \
+		_kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+
+#if !defined(__USE_COMPAT_LAYER_2_6_18_PLUS__)
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+	INIT_LIST_HEAD(&(_work)->entry); \
+	(_work)->pending = 0; \
+	(_work)->func = (void (*)(void *))_func; \
+	(_work)->data = _work; \
+	init_timer(&(_work)->timer); \
+} while (0)
+#define INIT_DELAYED_WORK INIT_WORK
+#endif
+
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+
+#define cancel_delayed_work_sync(x) cancel_delayed_work(x)
+
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 16))
+typedef __u16 __bitwise __sum16;
+#endif /* > 2.6.18 */
+
+#endif /* _KCOMPAT_H_ */
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 0000000..ac980a5
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1968 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:	Linux qlge network device driver by
+ *			Ron Mercer <ron.mercer@qlogic.com>
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "kcompat.h"
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME	"qlge"
+#define DRV_STRING	"QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION	"1.00.00.13"
+#define DIS_VERSION	"2.6.16-2.6.18-p13"
+#define REL_DATE	"090609"
+
+#define PFX "qlge: "
+#define QPRINTK(qdev, nlevel, klevel, fmt, args...)	\
+	do {	\
+	if (!((qdev)->msg_enable & NETIF_MSG_##nlevel))		\
+		;						\
+	else							\
+		dev_printk(KERN_##klevel, &((qdev)->pdev->dev),	\
+			"%s: " fmt, __func__, ##args);  \
+	} while (0)
+
+#define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */
+
+#define QLGE_VENDOR_ID	0x1077
+#define QLGE_DEVICE_ID_8012	0x8012
+#define QLGE_DEVICE_ID_8000	0x8000
+
+#define MAX_CPUS 8
+#define MAX_TX_RINGS MAX_CPUS
+#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
+
+#define NUM_TX_RING_ENTRIES	1024	/* Power of 2, range 32 to 65536 */
+#define NUM_RX_RING_ENTRIES	1024	/* Power of 2, range 32 to 65536 */
+
+#define NUM_SMALL_BUFFERS	1024	/* Power of 2, range 32 to 65536 */
+#define NUM_LARGE_BUFFERS	1024	/* Power of 2, range 32 to 65536 */
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+		(((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+		(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+
+#define RX_RING_SHADOW_SPACE	(sizeof(u64) + \
+		MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+		MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+
+#define SMALL_BUFFER_SIZE 2504	/* Per FCoE largest frame for normal MTU */
+#define LARGE_BUFFER_SIZE 9600	/* Per FCoE largest frame for jumbo MTU */
+
+#define MAX_SPLIT_SIZE 1023
+#define QLGE_SB_PAD 32
+
+#define MAX_CQ 128
+#define DFLT_COALESCE_WAIT 1000	/* 1000 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10	/* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 100
+
+#define TX_DESC_PER_IOCB 8
+/* The maximum number of frags we handle is based
+ * on PAGE_SIZE...
+ */
+#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)	/* 4k & 8k pages */
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#else /* all other page sizes */
+#define TX_DESC_PER_OAL 0
+#endif
+
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x)  ((u16)(x))
+#define MSW(x)  ((u16)((u32)(x) >> 16))
+#define LSD(x)  ((u32)((u64)(x)))
+#define MSD(x)  ((u32)((((u64)(x)) >> 16) >> 16))
+
+/* MPI test register definitions. This register
+ * is used for determining alternate NIC function's
+ * PCI->func number.
+ */
+enum {
+	MPI_TEST_FUNC_PORT_CFG = 0x1002,
+	MPI_TEST_NIC1_FUNC_SHIFT = 1,
+	MPI_TEST_NIC2_FUNC_SHIFT = 5,
+	MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+};
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+	/* Misc. stuff */
+	MAILBOX_COUNT = 16,
+
+	PROC_ADDR_RDY = (1 << 31),
+	PROC_ADDR_R = (1 << 30),
+	PROC_ADDR_ERR = (1 << 29),
+	PROC_ADDR_DA = (1 << 28),
+	PROC_ADDR_FUNC0_MBI = 0x00001180,
+	PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC0_CTL = 0x000011a1,
+	PROC_ADDR_FUNC2_MBI = 0x00001280,
+	PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+	PROC_ADDR_FUNC2_CTL = 0x000012a1,
+	PROC_ADDR_MPI_RISC = 0x00000000,
+	PROC_ADDR_MDE = 0x00010000,
+	PROC_ADDR_REGBLOCK = 0x00020000,
+	PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+	SYS_EFE = (1 << 0),
+	SYS_FAE = (1 << 1),
+	SYS_MDC = (1 << 2),
+	SYS_DST = (1 << 3),
+	SYS_DWC = (1 << 4),
+	SYS_EVW = (1 << 5),
+	SYS_OMP_DLY_MASK = 0x3f000000,
+	/*
+	 * There are no values defined as of edit #15.
+	 */
+	SYS_ODI = (1 << 14),
+};
+
+/*
+ *  Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+	RST_FO_TFO = (1 << 0),
+	RST_FO_RR_MASK = 0x00060000,
+	RST_FO_RR_CQ_CAM = 0x00000000,
+	RST_FO_RR_DROP = 0x00000001,
+	RST_FO_RR_DQ = 0x00000002,
+	RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
+	RST_FO_FRB = (1 << 12),
+	RST_FO_MOP = (1 << 13),
+	RST_FO_REG = (1 << 14),
+	RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+	FSC_DBRST_MASK = 0x00070000,
+	FSC_DBRST_256 = 0x00000000,
+	FSC_DBRST_512 = 0x00000001,
+	FSC_DBRST_768 = 0x00000002,
+	FSC_DBRST_1024 = 0x00000003,
+	FSC_DBL_MASK = 0x00180000,
+	FSC_DBL_DBRST = 0x00000000,
+	FSC_DBL_MAX_PLD = 0x00000008,
+	FSC_DBL_MAX_BRST = 0x00000010,
+	FSC_DBL_128_BYTES = 0x00000018,
+	FSC_EC = (1 << 5),
+	FSC_EPC_MASK = 0x00c00000,
+	FSC_EPC_INBOUND = (1 << 6),
+	FSC_EPC_OUTBOUND = (1 << 7),
+	FSC_VM_PAGESIZE_MASK = 0x07000000,
+	FSC_VM_PAGE_2K = 0x00000100,
+	FSC_VM_PAGE_4K = 0x00000200,
+	FSC_VM_PAGE_8K = 0x00000300,
+	FSC_VM_PAGE_64K = 0x00000600,
+	FSC_SH = (1 << 11),
+	FSC_DSB = (1 << 12),
+	FSC_STE = (1 << 13),
+	FSC_FE = (1 << 15),
+};
+
+/*
+ *  Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+	CSR_ERR_STS_MASK = 0x0000003f,
+	/*
+	 * There are no valued defined as of edit #15.
+	 */
+	CSR_RR = (1 << 8),
+	CSR_HRI = (1 << 9),
+	CSR_RP = (1 << 10),
+	CSR_CMD_PARM_SHIFT = 22,
+	CSR_CMD_NOP = 0x00000000,
+	CSR_CMD_SET_RST = 0x10000000,
+	CSR_CMD_CLR_RST = 0x20000000,
+	CSR_CMD_SET_PAUSE = 0x30000000,
+	CSR_CMD_CLR_PAUSE = 0x40000000,
+	CSR_CMD_SET_H2R_INT = 0x50000000,
+	CSR_CMD_CLR_H2R_INT = 0x60000000,
+	CSR_CMD_PAR_EN = 0x70000000,
+	CSR_CMD_SET_BAD_PAR = 0x80000000,
+	CSR_CMD_CLR_BAD_PAR = 0x90000000,
+	CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ *  Configuration Register (CFG) bit definitions.
+ */
+enum {
+	CFG_LRQ = (1 << 0),
+	CFG_DRQ = (1 << 1),
+	CFG_LR = (1 << 2),
+	CFG_DR = (1 << 3),
+	CFG_LE = (1 << 5),
+	CFG_LCQ = (1 << 6),
+	CFG_DCQ = (1 << 7),
+	CFG_Q_SHIFT = 8,
+	CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ *  Status Register (STS) bit definitions.
+ */
+enum {
+	STS_FE = (1 << 0),
+	STS_PI = (1 << 1),
+	STS_PL0 = (1 << 2),
+	STS_PL1 = (1 << 3),
+	STS_PI0 = (1 << 4),
+	STS_PI1 = (1 << 5),
+	STS_FUNC_ID_MASK = 0x000000c0,
+	STS_FUNC_ID_SHIFT = 6,
+	STS_F0E = (1 << 8),
+	STS_F1E = (1 << 9),
+	STS_F2E = (1 << 10),
+	STS_F3E = (1 << 11),
+	STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+	INTR_EN_INTR_MASK = 0x007f0000,
+	INTR_EN_TYPE_MASK = 0x03000000,
+	INTR_EN_TYPE_ENABLE = 0x00000100,
+	INTR_EN_TYPE_DISABLE = 0x00000200,
+	INTR_EN_TYPE_READ = 0x00000300,
+	INTR_EN_IHD = (1 << 13),
+	INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+	INTR_EN_EI = (1 << 14),
+	INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+	INTR_MASK_PI = (1 << 0),
+	INTR_MASK_HL0 = (1 << 1),
+	INTR_MASK_LH0 = (1 << 2),
+	INTR_MASK_HL1 = (1 << 3),
+	INTR_MASK_LH1 = (1 << 4),
+	INTR_MASK_SE = (1 << 5),
+	INTR_MASK_LSC = (1 << 6),
+	INTR_MASK_MC = (1 << 7),
+	INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ *  Register (REV_ID) bit definitions.
+ */
+enum {
+	REV_ID_MASK = 0x0000000f,
+	REV_ID_NICROLL_SHIFT = 0,
+	REV_ID_NICREV_SHIFT = 4,
+	REV_ID_XGROLL_SHIFT = 8,
+	REV_ID_XGREV_SHIFT = 12,
+	REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ *  Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+	FRC_ECC_ERR_VW = (1 << 12),
+	FRC_ECC_ERR_VB = (1 << 13),
+	FRC_ECC_ERR_NI = (1 << 14),
+	FRC_ECC_ERR_NO = (1 << 15),
+	FRC_ECC_PFE_SHIFT = 16,
+	FRC_ECC_ERR_DO = (1 << 18),
+	FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ *  Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+	ERR_STS_NOF = (1 << 0),
+	ERR_STS_NIF = (1 << 1),
+	ERR_STS_DRP = (1 << 2),
+	ERR_STS_XGP = (1 << 3),
+	ERR_STS_FOU = (1 << 4),
+	ERR_STS_FOC = (1 << 5),
+	ERR_STS_FOF = (1 << 6),
+	ERR_STS_FIU = (1 << 7),
+	ERR_STS_FIC = (1 << 8),
+	ERR_STS_FIF = (1 << 9),
+	ERR_STS_MOF = (1 << 10),
+	ERR_STS_TA = (1 << 11),
+	ERR_STS_MA = (1 << 12),
+	ERR_STS_MPE = (1 << 13),
+	ERR_STS_SCE = (1 << 14),
+	ERR_STS_STE = (1 << 15),
+	ERR_STS_FOW = (1 << 16),
+	ERR_STS_UE = (1 << 17),
+	ERR_STS_MCH = (1 << 26),
+	ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ *  RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+	RAM_DBG_ADDR_FW = (1 << 30),
+	RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+	/*
+	 * Example:
+	 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+	 */
+	SEM_CLEAR = 0,
+	SEM_SET = 1,
+	SEM_FORCE = 3,
+	SEM_XGMAC0_SHIFT = 0,
+	SEM_XGMAC1_SHIFT = 2,
+	SEM_ICB_SHIFT = 4,
+	SEM_MAC_ADDR_SHIFT = 6,
+	SEM_FLASH_SHIFT = 8,
+	SEM_PROBE_SHIFT = 10,
+	SEM_RT_IDX_SHIFT = 12,
+	SEM_PROC_REG_SHIFT = 14,
+	SEM_XGMAC0_MASK = 0x00030000,
+	SEM_XGMAC1_MASK = 0x000c0000,
+	SEM_ICB_MASK = 0x00300000,
+	SEM_MAC_ADDR_MASK = 0x00c00000,
+	SEM_FLASH_MASK = 0x03000000,
+	SEM_PROBE_MASK = 0x0c000000,
+	SEM_RT_IDX_MASK = 0x30000000,
+	SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ *  10G MAC Address  Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+	XGMAC_ADDR_RDY = (1 << 31),
+	XGMAC_ADDR_R = (1 << 30),
+	XGMAC_ADDR_XME = (1 << 29),
+
+	/* XGMAC control registers */
+	PAUSE_SRC_LO = 0x00000100,
+	PAUSE_SRC_HI = 0x00000104,
+	GLOBAL_CFG = 0x00000108,
+	GLOBAL_CFG_RESET = (1 << 0),
+	GLOBAL_CFG_JUMBO = (1 << 6),
+	GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+	GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+	TX_CFG = 0x0000010c,
+	TX_CFG_RESET = (1 << 0),
+	TX_CFG_EN = (1 << 1),
+	TX_CFG_PREAM = (1 << 2),
+	RX_CFG = 0x00000110,
+	RX_CFG_RESET = (1 << 0),
+	RX_CFG_EN = (1 << 1),
+	RX_CFG_PREAM = (1 << 2),
+	FLOW_CTL = 0x0000011c,
+	PAUSE_OPCODE = 0x00000120,
+	PAUSE_TIMER = 0x00000124,
+	PAUSE_FRM_DEST_LO = 0x00000128,
+	PAUSE_FRM_DEST_HI = 0x0000012c,
+	MAC_TX_PARAMS = 0x00000134,
+	MAC_TX_PARAMS_JUMBO = (1 << 31),
+	MAC_TX_PARAMS_SIZE_SHIFT = 16,
+	MAC_RX_PARAMS = 0x00000138,
+	MAC_SYS_INT = 0x00000144,
+	MAC_SYS_INT_MASK = 0x00000148,
+	MAC_MGMT_INT = 0x0000014c,
+	MAC_MGMT_IN_MASK = 0x00000150,
+	EXT_ARB_MODE = 0x000001fc,
+
+	/* XGMAC TX statistics  registers */
+	TX_PKTS = 0x00000200,
+	TX_BYTES = 0x00000208,
+	TX_MCAST_PKTS = 0x00000210,
+	TX_BCAST_PKTS = 0x00000218,
+	TX_UCAST_PKTS = 0x00000220,
+	TX_CTL_PKTS = 0x00000228,
+	TX_PAUSE_PKTS = 0x00000230,
+	TX_64_PKT = 0x00000238,
+	TX_65_TO_127_PKT = 0x00000240,
+	TX_128_TO_255_PKT = 0x00000248,
+	TX_256_511_PKT = 0x00000250,
+	TX_512_TO_1023_PKT = 0x00000258,
+	TX_1024_TO_1518_PKT = 0x00000260,
+	TX_1519_TO_MAX_PKT = 0x00000268,
+	TX_UNDERSIZE_PKT = 0x00000270,
+	TX_OVERSIZE_PKT = 0x00000278,
+
+	/* XGMAC statistics control registers */
+	RX_HALF_FULL_DET = 0x000002a0,
+	TX_HALF_FULL_DET = 0x000002a4,
+	RX_OVERFLOW_DET = 0x000002a8,
+	TX_OVERFLOW_DET = 0x000002ac,
+	RX_HALF_FULL_MASK = 0x000002b0,
+	TX_HALF_FULL_MASK = 0x000002b4,
+	RX_OVERFLOW_MASK = 0x000002b8,
+	TX_OVERFLOW_MASK = 0x000002bc,
+	STAT_CNT_CTL = 0x000002c0,
+	STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+	STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+	AUX_RX_HALF_FULL_DET = 0x000002d0,
+	AUX_TX_HALF_FULL_DET = 0x000002d4,
+	AUX_RX_OVERFLOW_DET = 0x000002d8,
+	AUX_TX_OVERFLOW_DET = 0x000002dc,
+	AUX_RX_HALF_FULL_MASK = 0x000002f0,
+	AUX_TX_HALF_FULL_MASK = 0x000002f4,
+	AUX_RX_OVERFLOW_MASK = 0x000002f8,
+	AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+	/* XGMAC RX statistics  registers */
+	RX_BYTES = 0x00000300,
+	RX_BYTES_OK = 0x00000308,
+	RX_PKTS = 0x00000310,
+	RX_PKTS_OK = 0x00000318,
+	RX_BCAST_PKTS = 0x00000320,
+	RX_MCAST_PKTS = 0x00000328,
+	RX_UCAST_PKTS = 0x00000330,
+	RX_UNDERSIZE_PKTS = 0x00000338,
+	RX_OVERSIZE_PKTS = 0x00000340,
+	RX_JABBER_PKTS = 0x00000348,
+	RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+	RX_DROP_EVENTS = 0x00000358,
+	RX_FCERR_PKTS = 0x00000360,
+	RX_ALIGN_ERR = 0x00000368,
+	RX_SYMBOL_ERR = 0x00000370,
+	RX_MAC_ERR = 0x00000378,
+	RX_CTL_PKTS = 0x00000380,
+	RX_PAUSE_PKTS = 0x00000388,
+	RX_64_PKTS = 0x00000390,
+	RX_65_TO_127_PKTS = 0x00000398,
+	RX_128_255_PKTS = 0x000003a0,
+	RX_256_511_PKTS = 0x000003a8,
+	RX_512_TO_1023_PKTS = 0x000003b0,
+	RX_1024_TO_1518_PKTS = 0x000003b8,
+	RX_1519_TO_MAX_PKTS = 0x000003c0,
+	RX_LEN_ERR_PKTS = 0x000003c8,
+
+	/* XGMAC MDIO control registers */
+	MDIO_TX_DATA = 0x00000400,
+	MDIO_RX_DATA = 0x00000410,
+	MDIO_CMD = 0x00000420,
+	MDIO_PHY_ADDR = 0x00000430,
+	MDIO_PORT = 0x00000440,
+	MDIO_STATUS = 0x00000450,
+
+	XGMAC_REGISTER_END = 0x00000740,
+};
+
+/*
+ *  Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+	ETS_QUEUE_SHIFT = 29,
+	ETS_REF = (1 << 26),
+	ETS_RS = (1 << 27),
+	ETS_P = (1 << 28),
+	ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ *  Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+	FLASH_ADDR_RDY = (1 << 31),
+	FLASH_ADDR_R = (1 << 30),
+	FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ *  Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+	CQ_STOP_QUEUE_MASK = (0x007f0000),
+	CQ_STOP_TYPE_MASK = (0x03000000),
+	CQ_STOP_TYPE_START = 0x00000100,
+	CQ_STOP_TYPE_STOP = 0x00000200,
+	CQ_STOP_TYPE_READ = 0x00000300,
+	CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ *  MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+	MAC_ADDR_IDX_SHIFT = 4,
+	MAC_ADDR_TYPE_SHIFT = 16,
+	MAC_ADDR_TYPE_MASK = 0x000f0000,
+	MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+	MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+	MAC_ADDR_TYPE_VLAN = 0x00020000,
+	MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+	MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+	MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+	MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+	MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+	MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+	MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+	MAC_ADDR_ADR = (1 << 25),
+	MAC_ADDR_RS = (1 << 26),
+	MAC_ADDR_E = (1 << 27),
+	MAC_ADDR_MR = (1 << 30),
+	MAC_ADDR_MW = (1 << 31),
+	MAX_MULTICAST_ENTRIES = 32,
+};
+
+/*
+ *  MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+	SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ *  FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+	FC_RCV_CFG_ECT = (1 << 15),
+	FC_RCV_CFG_DFH = (1 << 20),
+	FC_RCV_CFG_DVF = (1 << 21),
+	FC_RCV_CFG_RCE = (1 << 27),
+	FC_RCV_CFG_RFE = (1 << 28),
+	FC_RCV_CFG_TEE = (1 << 29),
+	FC_RCV_CFG_TCE = (1 << 30),
+	FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ *  NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+	NIC_RCV_CFG_PPE = (1 << 0),
+	NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+	NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+	NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+	NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+	NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+	NIC_RCV_CFG_RV = (1 << 3),
+	NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+	NIC_RCV_CFG_DFQ_SHIFT = 8,
+	NIC_RCV_CFG_DFQ = 0,	/* HARDCODE default queue to 0. */
+};
+
+/*
+ *	Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+	MGMT_RCV_CFG_ARP = (1 << 0),
+	MGMT_RCV_CFG_DHC = (1 << 1),
+	MGMT_RCV_CFG_DHS = (1 << 2),
+	MGMT_RCV_CFG_NP = (1 << 3),
+	MGMT_RCV_CFG_I6N = (1 << 4),
+	MGMT_RCV_CFG_I6R = (1 << 5),
+	MGMT_RCV_CFG_DH6 = (1 << 6),
+	MGMT_RCV_CFG_UD1 = (1 << 7),
+	MGMT_RCV_CFG_UD0 = (1 << 8),
+	MGMT_RCV_CFG_BCT = (1 << 9),
+	MGMT_RCV_CFG_MCT = (1 << 10),
+	MGMT_RCV_CFG_DM = (1 << 11),
+	MGMT_RCV_CFG_RM = (1 << 12),
+	MGMT_RCV_CFG_STL = (1 << 13),
+	MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+	MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+	MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+	MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+	MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ *  Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+	RT_IDX_IDX_SHIFT = 8,
+	RT_IDX_TYPE_MASK = 0x000f0000,
+	RT_IDX_TYPE_RT = 0x00000000,
+	RT_IDX_TYPE_RT_INV = 0x00010000,
+	RT_IDX_TYPE_NICQ = 0x00020000,
+	RT_IDX_TYPE_NICQ_INV = 0x00030000,
+	RT_IDX_DST_MASK = 0x00700000,
+	RT_IDX_DST_RSS = 0x00000000,
+	RT_IDX_DST_CAM_Q = 0x00100000,
+	RT_IDX_DST_COS_Q = 0x00200000,
+	RT_IDX_DST_DFLT_Q = 0x00300000,
+	RT_IDX_DST_DEST_Q = 0x00400000,
+	RT_IDX_RS = (1 << 26),
+	RT_IDX_E = (1 << 27),
+	RT_IDX_MR = (1 << 30),
+	RT_IDX_MW = (1 << 31),
+
+	/* Nic Queue format - type 2 bits */
+	RT_IDX_BCAST = (1 << 0),
+	RT_IDX_MCAST = (1 << 1),
+	RT_IDX_MCAST_MATCH = (1 << 2),
+	RT_IDX_MCAST_REG_MATCH = (1 << 3),
+	RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+	RT_IDX_FC_MACH = (1 << 5),
+	RT_IDX_ETH_FCOE = (1 << 6),
+	RT_IDX_CAM_HIT = (1 << 7),
+	RT_IDX_CAM_BIT0 = (1 << 8),
+	RT_IDX_CAM_BIT1 = (1 << 9),
+	RT_IDX_VLAN_TAG = (1 << 10),
+	RT_IDX_VLAN_MATCH = (1 << 11),
+	RT_IDX_VLAN_FILTER = (1 << 12),
+	RT_IDX_ETH_SKIP1 = (1 << 13),
+	RT_IDX_ETH_SKIP2 = (1 << 14),
+	RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+	RT_IDX_802_3 = (1 << 16),
+	RT_IDX_LLDP = (1 << 17),
+	RT_IDX_UNUSED018 = (1 << 18),
+	RT_IDX_UNUSED019 = (1 << 19),
+	RT_IDX_UNUSED20 = (1 << 20),
+	RT_IDX_UNUSED21 = (1 << 21),
+	RT_IDX_ERR = (1 << 22),
+	RT_IDX_VALID = (1 << 23),
+	RT_IDX_TU_CSUM_ERR = (1 << 24),
+	RT_IDX_IP_CSUM_ERR = (1 << 25),
+	RT_IDX_MAC_ERR = (1 << 26),
+	RT_IDX_RSS_TCP6 = (1 << 27),
+	RT_IDX_RSS_TCP4 = (1 << 28),
+	RT_IDX_RSS_IPV6 = (1 << 29),
+	RT_IDX_RSS_IPV4 = (1 << 30),
+	RT_IDX_RSS_MATCH = (1 << 31),
+
+	/* Hierarchy for the NIC Queue Mask */
+	RT_IDX_ALL_ERR_SLOT = 0,
+	RT_IDX_MAC_ERR_SLOT = 0,
+	RT_IDX_IP_CSUM_ERR_SLOT = 1,
+	RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+	RT_IDX_BCAST_SLOT = 3,
+	RT_IDX_MCAST_MATCH_SLOT = 4,
+	RT_IDX_ALLMULTI_SLOT = 5,
+	RT_IDX_UNUSED6_SLOT = 6,
+	RT_IDX_UNUSED7_SLOT = 7,
+	RT_IDX_RSS_MATCH_SLOT = 8,
+	RT_IDX_RSS_IPV4_SLOT = 8,
+	RT_IDX_RSS_IPV6_SLOT = 9,
+	RT_IDX_RSS_TCP4_SLOT = 10,
+	RT_IDX_RSS_TCP6_SLOT = 11,
+	RT_IDX_CAM_HIT_SLOT = 12,
+	RT_IDX_UNUSED013 = 13,
+	RT_IDX_UNUSED014 = 14,
+	RT_IDX_PROMISCUOUS_SLOT = 15,
+	RT_IDX_MAX_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+	XG_SERDES_ADDR_RDY = (1 << 31),
+	XG_SERDES_ADDR_R = (1 << 30),
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+	PROC_ADDR = 0,		/* Use semaphore */
+	PROC_DATA = 0x04,	/* Use semaphore */
+	SYS = 0x08,
+	RST_FO = 0x0c,
+	FSC = 0x10,
+	CSR = 0x14,
+	LED = 0x18,
+	ICB_RID = 0x1c,		/* Use semaphore */
+	ICB_L = 0x20,		/* Use semaphore */
+	ICB_H = 0x24,		/* Use semaphore */
+	CFG = 0x28,
+	BIOS_ADDR = 0x2c,
+	STS = 0x30,
+	INTR_EN = 0x34,
+	INTR_MASK = 0x38,
+	ISR1 = 0x3c,
+	ISR2 = 0x40,
+	ISR3 = 0x44,
+	ISR4 = 0x48,
+	REV_ID = 0x4c,
+	FRC_ECC_ERR = 0x50,
+	ERR_STS = 0x54,
+	RAM_DBG_ADDR = 0x58,
+	RAM_DBG_DATA = 0x5c,
+	ECC_ERR_CNT = 0x60,
+	SEM = 0x64,
+	GPIO_1 = 0x68,		/* Use semaphore */
+	GPIO_2 = 0x6c,		/* Use semaphore */
+	GPIO_3 = 0x70,		/* Use semaphore */
+	RSVD2 = 0x74,
+	XGMAC_ADDR = 0x78,	/* Use semaphore */
+	XGMAC_DATA = 0x7c,	/* Use semaphore */
+	NIC_ETS = 0x80,
+	CNA_ETS = 0x84,
+	FLASH_ADDR = 0x88,	/* Use semaphore */
+	FLASH_DATA = 0x8c,	/* Use semaphore */
+	CQ_STOP = 0x90,
+	PAGE_TBL_RID = 0x94,
+	WQ_PAGE_TBL_LO = 0x98,
+	WQ_PAGE_TBL_HI = 0x9c,
+	CQ_PAGE_TBL_LO = 0xa0,
+	CQ_PAGE_TBL_HI = 0xa4,
+	MAC_ADDR_IDX = 0xa8,	/* Use semaphore */
+	MAC_ADDR_DATA = 0xac,	/* Use semaphore */
+	COS_DFLT_CQ1 = 0xb0,
+	COS_DFLT_CQ2 = 0xb4,
+	ETYPE_SKIP1 = 0xb8,
+	ETYPE_SKIP2 = 0xbc,
+	SPLT_HDR = 0xc0,
+	FC_PAUSE_THRES = 0xc4,
+	NIC_PAUSE_THRES = 0xc8,
+	FC_ETHERTYPE = 0xcc,
+	FC_RCV_CFG = 0xd0,
+	NIC_RCV_CFG = 0xd4,
+	FC_COS_TAGS = 0xd8,
+	NIC_COS_TAGS = 0xdc,
+	MGMT_RCV_CFG = 0xe0,
+	RT_IDX = 0xe4,
+	RT_DATA = 0xe8,
+	RSVD7 = 0xec,
+	XG_SERDES_ADDR = 0xf0,
+	XG_SERDES_DATA = 0xf4,
+	PRB_MX_ADDR = 0xf8,	/* Use semaphore */
+	PRB_MX_DATA = 0xfc,	/* Use semaphore */
+};
+
+/*
+ * CAM output format.
+ */
+enum {
+	CAM_OUT_ROUTE_FC = 0,
+	CAM_OUT_ROUTE_NIC = 1,
+	CAM_OUT_FUNC_SHIFT = 2,
+	CAM_OUT_RV = (1 << 4),
+	CAM_OUT_SH = (1 << 15),
+	CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox  definitions
+ */
+enum {
+	/* Asynchronous Event Notifications */
+	AEN_SYS_ERR = 0x00008002,
+	AEN_LINK_UP = 0x00008011,
+	AEN_LINK_DOWN = 0x00008012,
+	AEN_IDC_CMPLT = 0x00008100,
+	AEN_IDC_REQ = 0x00008101,
+	AEN_IDC_EXT = 0x00008102,
+	AEN_DCBX_CHG = 0x00008110,
+	AEN_AEN_LOST = 0x00008120,
+	AEN_AEN_SFP_IN = 0x00008130,
+	AEN_AEN_SFP_OUT = 0x00008131,
+	AEN_FW_INIT_DONE = 0x00008400,
+	AEN_FW_INIT_FAIL = 0x00008401,
+
+	/* Mailbox Command Opcodes. */
+	MB_CMD_NOP = 0x00000000,
+	MB_CMD_EX_FW = 0x00000002,
+	MB_CMD_MB_TEST = 0x00000006,
+	MB_CMD_CSUM_TEST = 0x00000007,	/* Verify Checksum */
+	MB_CMD_ABOUT_FW = 0x00000008,
+	MB_CMD_COPY_RISC_RAM = 0x0000000a,
+	MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+	MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+	MB_CMD_WRITE_RAM = 0x0000000d,
+	MB_CMD_INIT_RISC_RAM = 0x0000000e,
+	MB_CMD_READ_RAM = 0x0000000f,
+	MB_CMD_STOP_FW = 0x00000014,
+	MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+	MB_CMD_WRITE_SFP = 0x00000030,
+	MB_CMD_READ_SFP = 0x00000031,
+	MB_CMD_INIT_FW = 0x00000060,
+	MB_CMD_GET_IFCB = 0x00000061,
+	MB_CMD_GET_FW_STATE = 0x00000069,
+	MB_CMD_IDC_REQ = 0x00000100,	/* Inter-Driver Communication */
+	MB_CMD_IDC_ACK = 0x00000101,	/* Inter-Driver Communication */
+	MB_CMD_SET_WOL_MODE = 0x00000110,	/* Wake On Lan */
+	MB_WOL_DISABLE = 0,
+	MB_WOL_MAGIC_PKT = (1 << 1),
+	MB_WOL_FLTR = (1 << 2),
+	MB_WOL_UCAST = (1 << 3),
+	MB_WOL_MCAST = (1 << 4),
+	MB_WOL_BCAST = (1 << 5),
+	MB_WOL_LINK_UP = (1 << 6),
+	MB_WOL_LINK_DOWN = (1 << 7),
+	MB_WOL_MODE_ON = (1 << 16),		/* Wake on Lan Mode on */
+	MB_CMD_SET_WOL_FLTR = 0x00000111,	/* Wake On Lan Filter */
+	MB_CMD_CLEAR_WOL_FLTR = 0x00000112,	/* Wake On Lan Filter */
+	MB_CMD_SET_WOL_MAGIC = 0x00000113,	/* Wake On Lan Magic Packet */
+	MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,	/* Wake On Lan Magic Packet */
+	MB_CMD_SET_WOL_IMMED = 0x00000115,
+	MB_CMD_PORT_RESET = 0x00000120,
+	MB_CMD_SET_PORT_CFG = 0x00000122,
+	MB_CMD_GET_PORT_CFG = 0x00000123,
+	MB_CMD_GET_LINK_STS = 0x00000124,
+	MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+	MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
+	/* Sub-commands for IDC request.
+	 * This describes the reason for the
+	 * IDC request.
+	 * See Fcoeidcv0_5.doc
+	 */
+	MB_CMD_IOP_PREP_UPDATE_MPI	= 0x0001,
+	MB_CMD_IOP_COMP_UPDATE_MPI	= 0x0002,
+	MB_CMD_IOP_PREP_LINK_DOWN	= 0x0010,
+	MB_CMD_IOP_DVR_START	 = 0x0100,
+	MB_CMD_IOP_FLASH_ACC	 = 0x0101,
+	MB_CMD_IOP_RESTART_MPI	= 0x0102,
+	MB_CMD_IOP_CORE_DUMP_MPI	= 0x0103,
+
+	/* Mailbox Command Status. */
+	MB_CMD_STS_GOOD = 0x00004000,	/* Success. */
+	MB_CMD_STS_INTRMDT = 0x00001000,	/* Intermediate Complete. */
+	MB_CMD_STS_INVLD_CMD = 0x00004001,	/* Invalid. */
+	MB_CMD_STS_XFC_ERR = 0x00004002,	/* Interface Error. */
+	MB_CMD_STS_CSUM_ERR = 0x00004003,	/* Csum Error. */
+	MB_CMD_STS_ERR = 0x00004005,	/* Error. */
+	MB_CMD_STS_PARAM_ERR = 0x00004006,	/* Parameter Error. */
+};
+
+struct mbox_params {
+	u32 mbox_in[MAILBOX_COUNT];
+	u32 mbox_out[MAILBOX_COUNT];
+	int in_count;
+	int out_count;
+};
+
+struct flash_params_8012 {
+	u8 dev_id_str[4];
+	__le16 size;
+	__le16 csum;
+	__le16 ver;
+	__le16 sub_dev_id;
+	u8 mac_addr[6];
+	__le16 res;
+};
+
+/* 8000 device's flash is a different structure
+ * at a different offset in flash.
+ */
+#define FUNC0_FLASH_OFFSET 0x140200
+#define FUNC1_FLASH_OFFSET 0x140600
+
+/* Flash related data structures. */
+struct flash_params_8000 {
+	u8 dev_id_str[4];	/* "8000" */
+	__le16 ver;
+	__le16 size;
+	__le16 csum;
+	__le16 reserved0;
+	__le16 total_size;
+	__le16 entry_count;
+	u8 data_type0;
+	u8 data_size0;
+	u8 mac_addr[6];
+	u8 data_type1;
+	u8 data_size1;
+	u8 mac_addr1[6];
+	u8 data_type2;
+	u8 data_size2;
+	__le16 vlan_id;
+	u8 data_type3;
+	u8 data_size3;
+	__le16 last;
+	u8 reserved1[464];
+	__le16	subsys_ven_id;
+	__le16	subsys_dev_id;
+	u8 reserved2[4];
+};
+
+union flash_params {
+	struct flash_params_8012 flash_params_8012;
+	struct flash_params_8000 flash_params_8000;
+};
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+	u32 cnsmr_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+	u32 prod_idx;		/* 0x00 */
+	u32 valid;		/* 0x04 */
+	u32 reserved[4];	/* 0x08-0x14 */
+	u32 lbq_prod_idx;	/* 0x18 */
+	u32 sbq_prod_idx;	/* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+struct tx_buf_desc {
+	__le64 addr;
+	__le32 len;
+#define TX_DESC_LEN_MASK	0x000fffff
+#define TX_DESC_C	0x40000000
+#define TX_DESC_E	0x80000000
+} __attribute((packed));
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB			0x01
+#define OPCODE_OB_MAC_TSO_IOCB			0x02
+#define OPCODE_IB_MAC_IOCB			0x20
+#define OPCODE_IB_MPI_IOCB			0x21
+#define OPCODE_IB_AE_IOCB			0x3f
+
+struct ob_mac_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_IOCB_REQ_OI	0x01
+#define OB_MAC_IOCB_REQ_I	0x02
+#define OB_MAC_IOCB_REQ_D	0x08
+#define OB_MAC_IOCB_REQ_F	0x10
+	u8 flags2;
+	u8 flags3;
+#define OB_MAC_IOCB_DFP	0x02
+#define OB_MAC_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+	__le16 reserved2;
+	u32 tid;
+	u32 txq_idx;
+	__le32 reserved3;
+	__le16 vlan_tci;
+	__le16 reserved4;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_iocb_rsp {
+	u8 opcode;		/* */
+	u8 flags1;		/* */
+#define OB_MAC_IOCB_RSP_OI	0x01	/* */
+#define OB_MAC_IOCB_RSP_I	0x02	/* */
+#define OB_MAC_IOCB_RSP_E	0x08	/* */
+#define OB_MAC_IOCB_RSP_S	0x10	/* too Short */
+#define OB_MAC_IOCB_RSP_L	0x20	/* too Large */
+#define OB_MAC_IOCB_RSP_P	0x40	/* Padded */
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_IOCB_RSP_B	0x80	/* */
+	u32 tid;
+	u32 txq_idx;
+	__le32 reserved[13];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_req {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_OI	0x01
+#define OB_MAC_TSO_IOCB_I	0x02
+#define OB_MAC_TSO_IOCB_D	0x08
+#define OB_MAC_TSO_IOCB_IP4	0x40
+#define OB_MAC_TSO_IOCB_IP6	0x80
+	u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO	0x20
+#define OB_MAC_TSO_IOCB_UC	0x40
+#define OB_MAC_TSO_IOCB_TC	0x80
+	u8 flags3;
+#define OB_MAC_TSO_IOCB_IC	0x01
+#define OB_MAC_TSO_IOCB_DFP	0x02
+#define OB_MAC_TSO_IOCB_V	0x04
+	__le32 reserved1[2];
+	__le32 frame_len;
+	u32 tid;
+	u32 txq_idx;
+	__le16 total_hdrs_len;
+	__le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+	__le16 vlan_tci;
+	__le16 mss;
+	struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __attribute((packed));
+
+struct ob_mac_tso_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI	0x01
+#define OB_MAC_TSO_IOCB_RSP_I	0x02
+#define OB_MAC_TSO_IOCB_RSP_E	0x08
+#define OB_MAC_TSO_IOCB_RSP_S	0x10
+#define OB_MAC_TSO_IOCB_RSP_L	0x20
+#define OB_MAC_TSO_IOCB_RSP_P	0x40
+	u8 flags2;		/* */
+	u8 flags3;		/* */
+#define OB_MAC_TSO_IOCB_RSP_B	0x8000
+	u32 tid;
+	u32 txq_idx;
+	__le32 reserved2[13];
+} __attribute((packed));
+
+struct ib_mac_iocb_rsp {
+	u8 opcode;		/* 0x20 */
+	u8 flags1;
+#define IB_MAC_IOCB_RSP_OI	0x01	/* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I	0x02	/* Disble Intr Generation */
+#define IB_MAC_CSUM_ERR_MASK	0x1c	 /* A mask to use for csum errs */
+#define IB_MAC_IOCB_RSP_TE	0x04	/* Checksum error */
+#define IB_MAC_IOCB_RSP_NU	0x08	/* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE	0x10	/* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK	0x60	/* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH	0x20	/* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG	0x40	/* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM	0x60	/* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B	0x80	/* Broadcast frame */
+	u8 flags2;
+#define IB_MAC_IOCB_RSP_P	0x01	/* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V	0x02	/* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK	0x1c	/*  */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR	0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE	0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE	0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE	0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN	0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC		0x1c
+#define IB_MAC_IOCB_RSP_U	0x20	/* UDP packet */
+#define IB_MAC_IOCB_RSP_T	0x40	/* TCP packet */
+#define IB_MAC_IOCB_RSP_FO	0x80	/* Failover port */
+	u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK	0x07	/* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE	0x00	/* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4	0x04	/* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6	0x02	/* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4	0x05	/* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6	0x03	/* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4	0x08	/* IPV4 */
+#define IB_MAC_IOCB_RSP_V6	0x10	/* IPV6 */
+#define IB_MAC_IOCB_RSP_IH	0x20	/* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS	0x40	/* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL	0x80	/* data is in large buffer */
+	__le32 data_len;	/* */
+	__le64 data_addr;	/* */
+	__le32 rss;		/* */
+	__le16 vlan_id;		/* 12 bits */
+#define IB_MAC_IOCB_RSP_C	0x1000	/* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT	12	/* class of service value */
+#define IB_MAC_IOCB_RSP_VLAN_MASK	0x0ffff
+
+	__le16 reserved1;
+	__le32 reserved2[6];
+	u8 reserved3[3];
+	u8 flags4;
+#define IB_MAC_IOCB_RSP_HV	0x20
+#define IB_MAC_IOCB_RSP_HS	0x40
+#define IB_MAC_IOCB_RSP_HL	0x80
+	__le32 hdr_len;		/* */
+	__le64 hdr_addr;	/* */
+} __attribute((packed));
+
+struct ib_ae_iocb_rsp {
+	u8 opcode;
+	u8 flags1;
+#define IB_AE_IOCB_RSP_OI	0x01
+#define IB_AE_IOCB_RSP_I	0x02
+	u8 event;
+#define LINK_UP_EVENT		0x00
+#define LINK_DOWN_EVENT		0x01
+#define CAM_LOOKUP_ERR_EVENT	0x06
+#define SOFT_ECC_ERROR_EVENT	0x07
+#define MGMT_ERR_EVENT		0x08
+#define TEN_GIG_MAC_EVENT	0x09
+#define GPI0_H2L_EVENT		0x10
+#define GPI0_L2H_EVENT		0x20
+#define GPI1_H2L_EVENT		0x11
+#define GPI1_L2H_EVENT		0x21
+#define PCI_ERR_ANON_BUF_RD	0x40
+	u8 q_id;
+	__le32 reserved[15];
+} __attribute((packed));
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 length;
+	__le32 tid;
+	__le32 reserved[14];
+} __attribute((packed));
+
+struct net_req_iocb {
+	u8 opcode;
+	u8 flags0;
+	__le16 flags1;
+	__le32 tid;
+	__le32 reserved1[30];
+} __attribute((packed));
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+	__le16 len;
+#define Q_LEN_V		(1 << 4)
+#define Q_LEN_CPP_CONT	0x0000
+#define Q_LEN_CPP_16	0x0001
+#define Q_LEN_CPP_32	0x0002
+#define Q_LEN_CPP_64	0x0003
+#define Q_LEN_CPP_512	0x0006
+	__le16 flags;
+#define Q_PRI_SHIFT	1
+#define Q_FLAGS_LC	0x1000
+#define Q_FLAGS_LB	0x2000
+#define Q_FLAGS_LI	0x4000
+#define Q_FLAGS_LO	0x8000
+	__le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+	__le16 rid;
+	__le64 addr;
+	__le64 cnsmr_idx_addr;
+} __attribute((packed));
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+	u8 msix_vect;
+	u8 reserved1;
+	u8 reserved2;
+	u8 flags;
+#define FLAGS_LV	0x08
+#define FLAGS_LS	0x10
+#define FLAGS_LL	0x20
+#define FLAGS_LI	0x40
+#define FLAGS_LC	0x80
+	__le16 len;
+#define LEN_V		(1 << 4)
+#define LEN_CPP_CONT	0x0000
+#define LEN_CPP_32	0x0001
+#define LEN_CPP_64	0x0002
+#define LEN_CPP_128	0x0003
+	__le16 rid;
+	__le64 addr;
+	__le64 prod_idx_addr;
+	__le16 pkt_delay;
+	__le16 irq_delay;
+	__le64 lbq_addr;
+	__le16 lbq_buf_size;
+	__le16 lbq_len;		/* entry count */
+	__le64 sbq_addr;
+	__le16 sbq_buf_size;
+	__le16 sbq_len;		/* entry count */
+} __attribute((packed));
+
+struct ricb {
+	u8 base_cq;
+#define RSS_L4K 0x80
+	u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI  0x02
+#define RSS_LB  0x04
+#define RSS_LM  0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+	__le16 mask;
+	__le32 hash_cq_id[256];
+	__le32 ipv6_hash_key[10];
+	__le32 ipv4_hash_key[4];
+} __attribute((packed));
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+	struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+	DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+	struct sk_buff *skb;
+	struct ob_mac_iocb_req *queue_entry;
+	u32 index;
+	struct oal *oal;
+	struct map_list *map;
+	int map_cnt;
+	struct tx_ring_desc *next;
+};
+
+struct bq_desc {
+	union {
+		struct page *lbq_page;
+		struct sk_buff *skb;
+	} p;
+	char *pg_addr;
+	__le64 *addr;
+	u32 index;
+	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	 DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+	/*
+	 * queue info.
+	 */
+	struct wqicb wqicb;	/* structure used to inform chip of new queue */
+	void *wq_base;		/* pci_alloc:virtual addr for tx */
+	dma_addr_t wq_base_dma;	/* pci_alloc:dma addr for tx */
+	__le32 *cnsmr_idx_sh_reg;	/* shadow copy of consumer idx */
+	dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
+	u32 wq_size;		/* size in bytes of queue area */
+	u32 wq_len;		/* number of entries in queue */
+	void __iomem *prod_idx_db_reg; /* doorbell index reg at offset 0x00 */
+	void __iomem *valid_db_reg; /* doorbell valid reg at offset 0x04 */
+	u16 prod_idx;		/* current value for prod idx */
+	u16 cq_id;		/* completion (rx) queue for tx completions */
+	u8 wq_id;		/* queue id for this entry */
+	u8 reserved1[3];
+	struct tx_ring_desc *q;	/* descriptor list for the queue */
+	spinlock_t lock;
+	atomic_t tx_count;	/* counts down for every outstanding IO */
+	atomic_t queue_stopped;	/* Turns queue off when full. */
+	struct work_struct tx_work;
+	struct ql_adapter *qdev;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+	DEFAULT_Q = 2,		/* Handles slow queue and chip/MPI events. */
+	TX_Q = 3,		/* Handles outbound completions. */
+	RX_Q = 4,		/* Handles inbound completions. */
+};
+
+struct rx_ring {
+	struct cqicb cqicb; /* The chip's completion queue init control block.*/
+
+	/* Completion queue elements. */
+	void *cq_base;
+	dma_addr_t cq_base_dma;
+	u32 cq_size;
+	u32 cq_len;
+	u16 cq_id;
+	__le32 *prod_idx_sh_reg;	/* Shadowed producer register. */
+	dma_addr_t prod_idx_sh_reg_dma;
+	void __iomem *cnsmr_idx_db_reg;	/* PCI doorbell mem area + 0 */
+	u32 cnsmr_idx;		/* current sw idx */
+	struct ql_net_rsp_iocb *curr_entry;	/* next entry on queue */
+	void __iomem *valid_db_reg;	/* PCI doorbell mem area + 0x04 */
+
+	/* Large buffer queue elements. */
+	u32 lbq_len;		/* entry count */
+	u32 lbq_size;		/* size in bytes of queue */
+	u32 lbq_buf_map_size;
+	void *lbq_base;
+	dma_addr_t lbq_base_dma;
+	void *lbq_base_indirect;
+	dma_addr_t lbq_base_indirect_dma;
+	struct bq_desc *lbq;	/* array of control blocks */
+	void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
+	u32 lbq_prod_idx;	/* current sw prod idx */
+	u32 lbq_curr_idx;	/* next entry we expect */
+	u32 lbq_clean_idx;	/* beginning of new descs */
+	u32 lbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Small buffer queue elements. */
+	u32 sbq_len;		/* entry count */
+	u32 sbq_size;		/* size in bytes of queue */
+	u32 sbq_buf_size;
+	void *sbq_base;
+	dma_addr_t sbq_base_dma;
+	void *sbq_base_indirect;
+	dma_addr_t sbq_base_indirect_dma;
+	struct bq_desc *sbq;	/* array of control blocks */
+	void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+	u32 sbq_prod_idx;	/* current sw prod idx */
+	u32 sbq_curr_idx;	/* next entry we expect */
+	u32 sbq_clean_idx;	/* beginning of new descs */
+	u32 sbq_free_cnt;	/* free buffer desc cnt */
+
+	/* Misc. handler elements. */
+	u32 type;		/* Type of queue, tx, rx, or default. */
+	u32 irq;		/* Which vector this ring is assigned. */
+	u32 cpu;		/* Which CPU this should run on. */
+	char name[IFNAMSIZ + 5];
+	struct work_struct rx_work;
+	u8 reserved;
+	struct ql_adapter *qdev;
+	struct net_device *dummy_netdev;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+	u8 value[4];
+};
+
+struct nic_stats {
+	/*
+	 * These stats come from offset 200h to 278h
+	 * in the XGMAC register.
+	 */
+	u64 tx_pkts;
+	u64 tx_bytes;
+	u64 tx_mcast_pkts;
+	u64 tx_bcast_pkts;
+	u64 tx_ucast_pkts;
+	u64 tx_ctl_pkts;
+	u64 tx_pause_pkts;
+	u64 tx_64_pkt;
+	u64 tx_65_to_127_pkt;
+	u64 tx_128_to_255_pkt;
+	u64 tx_256_511_pkt;
+	u64 tx_512_to_1023_pkt;
+	u64 tx_1024_to_1518_pkt;
+	u64 tx_1519_to_max_pkt;
+	u64 tx_undersize_pkt;
+	u64 tx_oversize_pkt;
+
+	/*
+	 * These stats come from offset 300h to 3C8h
+	 * in the XGMAC register.
+	 */
+	u64 rx_bytes;
+	u64 rx_bytes_ok;
+	u64 rx_pkts;
+	u64 rx_pkts_ok;
+	u64 rx_bcast_pkts;
+	u64 rx_mcast_pkts;
+	u64 rx_ucast_pkts;
+	u64 rx_undersize_pkts;
+	u64 rx_oversize_pkts;
+	u64 rx_jabber_pkts;
+	u64 rx_undersize_fcerr_pkts;
+	u64 rx_drop_events;
+	u64 rx_fcerr_pkts;
+	u64 rx_align_err;
+	u64 rx_symbol_err;
+	u64 rx_mac_err;
+	u64 rx_ctl_pkts;
+	u64 rx_pause_pkts;
+	u64 rx_64_pkts;
+	u64 rx_65_to_127_pkts;
+	u64 rx_128_255_pkts;
+	u64 rx_256_511_pkts;
+	u64 rx_512_to_1023_pkts;
+	u64 rx_1024_to_1518_pkts;
+	u64 rx_1519_to_max_pkts;
+	u64 rx_len_err_pkts;
+};
+
+/* Address/Length pairs for the coredump. */
+enum {
+	MPI_CORE_REGS_ADDR = 0x00030000,
+	MPI_CORE_REGS_CNT = 127,
+	MPI_CORE_SH_REGS_CNT = 16,
+	TEST_REGS_ADDR = 0x00001000,
+	TEST_REGS_CNT = 23,
+	RMII_REGS_ADDR = 0x00001040,
+	RMII_REGS_CNT = 64,
+	FCMAC1_REGS_ADDR = 0x00001080,
+	FCMAC2_REGS_ADDR = 0x000010c0,
+	FCMAC_REGS_CNT = 64,
+	FC1_MBX_REGS_ADDR = 0x00001100,
+	FC2_MBX_REGS_ADDR = 0x00001240,
+	FC_MBX_REGS_CNT = 64,
+	IDE_REGS_ADDR = 0x00001140,
+	IDE_REGS_CNT = 64,
+	NIC1_MBX_REGS_ADDR = 0x00001180,
+	NIC2_MBX_REGS_ADDR = 0x00001280,
+	NIC_MBX_REGS_CNT = 64,
+	SMBUS_REGS_ADDR = 0x00001200,
+	SMBUS_REGS_CNT = 64,
+	I2C_REGS_ADDR = 0x00001fc0,
+	I2C_REGS_CNT = 64,
+	MEMC_REGS_ADDR = 0x00003000,
+	MEMC_REGS_CNT = 256,
+	PBUS_REGS_ADDR = 0x00007c00,
+	PBUS_REGS_CNT = 256,
+	MDE_REGS_ADDR = 0x00010000,
+	MDE_REGS_CNT = 6,
+	CODE_RAM_ADDR = 0x00020000,
+	CODE_RAM_CNT = 0x2000,
+	MEMC_RAM_ADDR = 0x00100000,
+	MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+	u32	cookie;
+	u8	idString[16];
+	u32	timeLo;
+	u32	timeHi;
+	u32	imageSize;
+	u32	headerSize;
+	u8	info[220];
+};
+
+struct mpi_coredump_segment_header {
+	u32	cookie;
+	u32	segNum;
+	u32	segSize;
+	u32	extra;
+	u8	description[16];
+};
+
+/* Reg dump segment numbers. */
+enum {
+	CORE_SEG_NUM = 1,
+	TEST_LOGIC_SEG_NUM = 2,
+	RMII_SEG_NUM = 3,
+	FCMAC1_SEG_NUM = 4,
+	FCMAC2_SEG_NUM = 5,
+	FC1_MBOX_SEG_NUM = 6,
+	IDE_SEG_NUM = 7,
+	NIC1_MBOX_SEG_NUM = 8,
+	SMBUS_SEG_NUM = 9,
+	FC2_MBOX_SEG_NUM = 10,
+	NIC2_MBOX_SEG_NUM = 11,
+	I2C_SEG_NUM = 12,
+	MEMC_SEG_NUM = 13,
+	PBUS_SEG_NUM = 14,
+	MDE_SEG_NUM = 15,
+	NIC1_CONTROL_SEG_NUM = 16,
+	NIC2_CONTROL_SEG_NUM = 17,
+	NIC1_XGMAC_SEG_NUM = 18,
+	NIC2_XGMAC_SEG_NUM = 19,
+	WCS_RAM_SEG_NUM = 20,
+	MEMC_RAM_SEG_NUM = 21,
+	XAUI_AN_SEG_NUM = 22,
+	XAUI_HSS_PCS_SEG_NUM = 23,
+	XFI_AN_SEG_NUM = 24,
+	XFI_TRAIN_SEG_NUM = 25,
+	XFI_HSS_PCS_SEG_NUM = 26,
+	XFI_HSS_TX_SEG_NUM = 27,
+	XFI_HSS_RX_SEG_NUM = 28,
+	XFI_HSS_PLL_SEG_NUM = 29,
+	MISC_NIC_INFO_SEG_NUM = 30,
+	INTR_STATES_SEG_NUM = 31,
+	CAM_ENTRIES_SEG_NUM = 32,
+	ROUTING_WORDS_SEG_NUM = 33,
+	XGMAC_SEG_NUM = 34,
+	ETS_SEG_NUM = 35,
+};
+
+struct ql_nic_misc {
+	u32 rx_ring_count;
+	u32 tx_ring_count;
+	u32 intr_count;
+	u32 function;
+};
+
+struct ql_mpi_coredump {
+	struct mpi_coredump_global_header mpi_global_header;
+
+	struct mpi_coredump_segment_header core_regs_seg_hdr;
+	u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+	u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+	struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+	u32 test_logic_regs[TEST_REGS_CNT];
+
+	struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+	u32 rmii_regs[RMII_REGS_CNT];
+
+	struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+	u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+	struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+	u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+	struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+	u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+	struct mpi_coredump_segment_header ide_regs_seg_hdr;
+	u32 ide_regs[IDE_REGS_CNT];
+
+	struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+	u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+	struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+	u32 smbus_regs[SMBUS_REGS_CNT];
+
+	struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+	u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+	struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+	u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+	struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+	u32 i2c_regs[I2C_REGS_CNT];
+
+	struct mpi_coredump_segment_header memc_regs_seg_hdr;
+	u32 memc_regs[MEMC_REGS_CNT];
+
+	struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+	u32 pbus_regs[PBUS_REGS_CNT];
+
+	struct mpi_coredump_segment_header mde_regs_seg_hdr;
+	u32 mde_regs[MDE_REGS_CNT];
+
+
+	struct mpi_coredump_segment_header xaui_an_hdr;
+	u32 serdes_xaui_an[14];
+
+	struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+	u32 serdes_xaui_hss_pcs[33];
+
+	struct mpi_coredump_segment_header xfi_an_hdr;
+	u32 serdes_xfi_an[14];
+
+	struct mpi_coredump_segment_header xfi_train_hdr;
+	u32 serdes_xfi_train[12];
+
+	struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+	u32 serdes_xfi_hss_pcs[15];
+
+	struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+	u32 serdes_xfi_hss_tx[32];
+
+	struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+	u32 serdes_xfi_hss_rx[32];
+
+	struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+	u32 serdes_xfi_hss_pll[32];
+
+	struct mpi_coredump_segment_header misc_nic_seg_hdr;
+	struct ql_nic_misc misc_nic_info;
+
+	struct mpi_coredump_segment_header nic_regs_seg_hdr;
+	u32 nic_regs[64];
+
+	/* one interrupt state for each CQ */
+	struct mpi_coredump_segment_header intr_states_seg_hdr;
+	u32 intr_states[MAX_RX_RINGS];
+
+	/* 3 cam words each for 16 unicast,
+	 * 2 cam words for each of 32 multicast.
+	 */
+	struct mpi_coredump_segment_header cam_entries_seg_hdr;
+	u32 cam_entries[(16 * 3) + (32 * 3)];
+
+	struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+	u32 nic_routing_words[16];
+
+	struct mpi_coredump_segment_header xgmac_seg_hdr;
+	u32 xgmac[(XGMAC_REGISTER_END - PAUSE_SRC_LO) * 4];
+
+	struct mpi_coredump_segment_header ets_seg_hdr;
+	u32 ets[8+2];
+
+	struct mpi_coredump_segment_header code_ram_seg_hdr;
+	u32 code_ram[CODE_RAM_CNT];
+
+	struct mpi_coredump_segment_header memc_ram_seg_hdr;
+	u32 memc_ram[MEMC_RAM_CNT];
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts.  It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+	struct ql_adapter *qdev;
+	u32 intr;
+	u32 hooked;
+	u32 intr_en_mask;	/* value/mask used to enable this intr */
+	u32 intr_dis_mask;	/* value/mask used to disable this intr */
+	u32 intr_read_mask;	/* value/mask used to read this intr */
+	char name[IFNAMSIZ * 2];
+	atomic_t irq_cnt;	/* irq_cnt is used in single vector
+				 * environment.  It's incremented for each
+				 * irq handler that is scheduled.  When each
+				 * handler finishes it decrements irq_cnt and
+				 * enables interrupts if it's zero. */
+	irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+	QL_ADAPTER_UP = (1 << 0),	/* Adapter has been brought up. */
+	QL_LEGACY_ENABLED = (1 << 3),
+	QL_MSI_ENABLED = (1 << 3),
+	QL_MSIX_ENABLED = (1 << 4),
+	QL_DMA64 = (1 << 5),
+	QL_PROMISCUOUS = (1 << 6),
+	QL_ALLMULTI = (1 << 7),
+	QL_PORT_CFG = (1 << 8),
+	QL_CAM_RT_SET = (1 << 9),
+	QL_TESTING = (1 << 10),
+};
+
+/* link_status bit definitions */
+enum {
+	STS_LOOPBACK_MASK = 0x00000700,
+	STS_LOOPBACK_PCS = 0x00000100,
+	STS_LOOPBACK_HSS = 0x00000200,
+	STS_LOOPBACK_EXT = 0x00000300,
+	STS_PAUSE_MASK = 0x000000c0,
+	STS_PAUSE_STD = 0x00000040,
+	STS_PAUSE_PRI = 0x00000080,
+	STS_SPEED_MASK = 0x00000038,
+	STS_SPEED_100Mb = 0x00000000,
+	STS_SPEED_1Gb = 0x00000008,
+	STS_SPEED_10Gb = 0x00000010,
+	STS_LINK_TYPE_MASK = 0x00000007,
+	STS_LINK_TYPE_XFI = 0x00000001,
+	STS_LINK_TYPE_XAUI = 0x00000002,
+	STS_LINK_TYPE_XFI_BP = 0x00000003,
+	STS_LINK_TYPE_XAUI_BP = 0x00000004,
+	STS_LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/* link_config bit definitions */
+enum {
+	CFG_JUMBO_FRAME_SIZE = 0x00010000,
+	CFG_PAUSE_MASK = 0x00000060,
+	CFG_PAUSE_STD = 0x00000020,
+	CFG_PAUSE_PRI = 0x00000040,
+	CFG_DCBX = 0x00000010,
+	CFG_LOOPBACK_MASK = 0x00000007,
+	CFG_LOOPBACK_PCS = 0x00000002,
+	CFG_LOOPBACK_HSS = 0x00000004,
+	CFG_LOOPBACK_EXT = 0x00000006,
+	CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
+};
+
+/* LED blink bit definitions */
+#define QL_LED_BLINK	0x03e803e8
+
+struct nic_operations {
+	int (*get_flash) (struct ql_adapter *);
+	int (*port_initialize) (struct ql_adapter *);
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+	struct ricb ricb;
+	unsigned long flags;
+	u32 wol;
+
+	struct nic_stats nic_stats;
+
+	struct vlan_group *vlgrp;
+
+	/* PCI Configuration information for this device */
+	struct pci_dev *pdev;
+	struct net_device *ndev;	/* Parent NET device */
+
+	/* Hardware information */
+	u32 chip_rev_id;
+	u32 fw_rev_id;
+	u32 func;		/* PCI function for this adapter */
+	u32 alt_func;		/* PCI function for alternate adapter */
+	u32 port;		/* Port number this adapter */
+
+	spinlock_t adapter_lock;
+	spinlock_t hw_lock;
+	spinlock_t stats_lock;
+
+	/* PCI Bus Relative Register Addresses */
+	void __iomem *reg_base;
+	void __iomem *doorbell_area;
+	u32 doorbell_area_size;
+
+	u32 msg_enable;
+
+	/* Page for Shadow Registers */
+	void *rx_ring_shadow_reg_area;
+	dma_addr_t rx_ring_shadow_reg_dma;
+	void *tx_ring_shadow_reg_area;
+	dma_addr_t tx_ring_shadow_reg_dma;
+
+	u32 mailbox_in;
+	u32 mailbox_out;
+	struct mbox_params idc_mbc;
+	struct mutex	mpi_mutex;
+
+	int tx_ring_size;
+	int rx_ring_size;
+	u32 intr_count;
+	struct msix_entry *msi_x_entry;
+	struct intr_context intr_context[MAX_RX_RINGS];
+
+	int tx_ring_count;	/* One per online CPU. */
+	u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
+	u32 rss_ring_count;	/* One per online CPU.  */
+	/*
+	 * rx_ring_count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 */
+	int rx_ring_count;
+	int ring_mem_size;
+	void *ring_mem;
+
+	struct rx_ring rx_ring[MAX_RX_RINGS];
+	struct tx_ring tx_ring[MAX_TX_RINGS];
+
+	int rx_csum;
+	u32 default_rx_queue;
+
+	u16 rx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 rx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+	u16 tx_coalesce_usecs;	/* cqicb->int_delay */
+	u16 tx_max_coalesced_frames;	/* cqicb->pkt_int_delay */
+
+	u32 xg_sem_mask;
+	u32 port_link_up;
+	u32 port_init;
+	u32 link_status;
+	struct ql_mpi_coredump *mpi_coredump;
+	u32 core_is_dumped;
+	u32 link_config;
+	u32 led_config;
+	u32 max_frame_size;
+
+	union flash_params flash;
+
+	struct net_device_stats stats;
+	struct workqueue_struct *q_workqueue;
+	struct workqueue_struct *workqueue;
+	struct work_struct asic_reset_work;
+	struct work_struct mpi_reset_work;
+	struct work_struct mpi_work;
+	struct work_struct mpi_port_cfg_work;
+	struct work_struct mpi_idc_work;
+	struct completion ide_completion;
+	struct nic_operations *nic_ops;
+	u16 device_id;
+	uint32_t *config_space;
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+	return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+	writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization.  The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue.  Each queue uses
+ * 1 4k chunk of memory.  The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+	mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline u32 ql_read_sh_reg(__le32  *addr)
+{
+	u32 reg;
+	reg =  le32_to_cpu(*addr);
+	rmb();
+	return reg;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern struct ethtool_ops qlge_ethtool_ops;
+
+extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+				u32 *value);
+extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+			u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_idc_work(struct work_struct *work);
+void ql_mpi_port_cfg_work(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+int ql_cam_route_initialize(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
+int ql_mb_get_fw_state(struct ql_adapter *qdev);
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+		u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+		struct ql_mpi_coredump *mpi_coredump);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
+int ql_mb_sys_err(struct ql_adapter *qdev);
+int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_wol(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
+int qlge_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_receive_frame(struct sk_buff *skb);
+
+#if 1
+#define QL_ALL_DUMP
+#define QL_REG_DUMP
+#define QL_DEV_DUMP
+#define QL_CB_DUMP
+#endif
+
+#ifdef QL_REG_DUMP
+extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+extern void ql_dump_routing_entries(struct ql_adapter *qdev);
+extern void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+extern void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+extern void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+extern void ql_dump_wqicb(struct wqicb *wqicb);
+extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
+extern void ql_dump_ricb(struct ricb *ricb);
+extern void ql_dump_cqicb(struct cqicb *cqicb);
+extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
+extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+		ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef	QL_ALL_DUMP
+extern void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 0000000..1325947
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,1541 @@
+#include "qlge.h"
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf)
+{
+	int status = 0;
+	int i;
+
+	for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+		/* We're reading 400 xgmac registers, but we filter out
+		 * serveral locations that are non-responsive to reads.
+		 */
+		if ((i == 0x00000114) ||
+			(i == 0x00000118) ||
+			(i == 0x0000013c) ||
+			(i == 0x00000140) ||
+			(i > 0x00000150 && i < 0x000001fc) ||
+			(i > 0x00000278 && i < 0x000002a0) ||
+			(i > 0x000002c0 && i < 0x000002cf) ||
+			(i > 0x000002dc && i < 0x000002f0) ||
+			(i > 0x000003c8 && i < 0x00000400) ||
+			(i > 0x00000400 && i < 0x00000410) ||
+			(i > 0x00000410 && i < 0x00000420) ||
+			(i > 0x00000420 && i < 0x00000430) ||
+			(i > 0x00000430 && i < 0x00000440) ||
+			(i > 0x00000440 && i < 0x00000450) ||
+			(i > 0x00000450 && i < 0x00000500) ||
+			(i > 0x0000054c && i < 0x00000568) ||
+			(i > 0x000005c8 && i < 0x00000600)) {
+			*buf = 0xdeadbeef;
+			continue;
+		}
+
+		status = ql_read_xgmac_reg(qdev, i, buf);
+		if (status)
+			goto err;
+	}
+err:
+	return status;
+}
+
+int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+	return status;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+				struct ql_mpi_coredump *mpi_coredump)
+{
+	u32 i, j;
+	int status;
+
+	for (i = 0, j = 0; i <= 0x000000034; i += 4) {
+		status = ql_read_serdes_reg(qdev, i,
+					&mpi_coredump->serdes_xaui_an[j++]);
+		if (status)
+			goto err;
+
+	}
+
+	for (i = 0x800, j = 0; i <= 0x880; i += 4) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xaui_hss_pcs[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1000, j = 0; i <= 0x1034; i += 4) {
+		status = ql_read_serdes_reg(qdev, i,
+					&mpi_coredump->serdes_xfi_an[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1050, j = 0; i <= 0x107c; i += 4) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xfi_train[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1800, j = 0; i <= 0x1838; i += 4) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xfi_hss_pcs[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1c00, j = 0; i <= 0x1c1f; i++) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xfi_hss_tx[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1c40, j = 0; i <= 0x1c5f; i++) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xfi_hss_rx[j++]);
+		if (status)
+			goto err;
+	}
+
+	for (i = 0x1e00, j = 0; i <= 0x1e1f; i++) {
+		status = ql_read_serdes_reg(qdev, i,
+						&mpi_coredump->
+						serdes_xfi_hss_pll[j++]);
+		if (status)
+			goto err;
+	}
+
+err:
+	if (status)
+		QPRINTK(qdev, DRV, ERR,
+			"Serdes register 0x%.08x access error\n", i);
+
+	return status;
+}
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+{
+	int status = 0;
+	int i;
+
+	for (i = 0; i < 8; i++, buf++) {
+		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, NIC_ETS);
+	}
+
+	for (i = 0; i < 2; i++, buf++) {
+		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+		*buf = ql_read32(qdev, CNA_ETS);
+	}
+
+	return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+{
+	int i;
+
+	for (i = 0; i < MAX_RX_RINGS; i++, buf++) {
+		ql_write32(qdev, INTR_EN,
+				qdev->intr_context[i].intr_read_mask);
+		*buf = ql_read32(qdev, INTR_EN);
+	}
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int i, status;
+	u32 value[3];
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_CAM_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower MAC address */
+		*buf++ = value[1];	/* upper MAC address */
+		*buf++ = value[2];	/* output */
+	}
+	for (i = 0; i < 32; i++) {
+		status = ql_get_mac_addr_reg(qdev,
+					MAC_ADDR_TYPE_MULTI_MAC, i, value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of mac index register.\n");
+			goto err;
+		}
+		*buf++ = value[0];	/* lower Mcast address */
+		*buf++ = value[1];	/* upper Mcast address */
+	}
+err:
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+{
+	int status;
+	u32 value, i;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	for (i = 0; i < 16; i++) {
+		status = ql_get_routing_reg(qdev, i, &value);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed read of routing index register.\n");
+			goto err;
+		} else {
+			*buf++ = value;
+		}
+	}
+err:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+	u32 i;
+	int status;
+
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+
+	for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+		status = ql_write_mpi_reg(qdev, RISC_124,
+					  (SHADOW_OFFSET | i << 20));
+		if (status)
+			goto end;
+		status = ql_read_mpi_reg(qdev, RISC_127, buf);
+		if (status)
+			goto end;
+	}
+end:
+	return status;
+}
+
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+				u32 offset, u32 count)
+{
+	int i, status = 0;
+	for (i = 0; i < count; i++, buf++) {
+		status = ql_read_mpi_reg(qdev, offset + i, buf);
+		if (status)
+			return status;
+	}
+	return status;
+}
+
+static void ql_build_coredump_seg_header(struct mpi_coredump_segment_header
+			*seg_hdr, u32 seg_number, u32 seg_size, u8 *desc)
+{
+	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+	seg_hdr->segNum = seg_number;
+	seg_hdr->segSize = seg_size;
+	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+	int status;
+	int i;
+
+	if (!mpi_coredump) {
+		QPRINTK(qdev, DRV, ERR,
+			"No memory available.\n");
+		return -ENOMEM;
+	}
+
+	/* Try to get the spinlock, but dont worry if
+	 * it isn't available.  If the firmware died it
+	 * might be holding the sem.
+	 */
+	ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+	status = ql_pause_mpi_risc(qdev);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed RISC pause. Status = 0x%.08x\n", status);
+		goto err;
+	}
+
+	memset(&(mpi_coredump->mpi_global_header), 0,
+		sizeof(struct mpi_coredump_global_header));
+	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+	mpi_coredump->mpi_global_header.headerSize =
+		sizeof(struct mpi_coredump_global_header);
+	mpi_coredump->mpi_global_header.imageSize =
+		sizeof(struct ql_mpi_coredump);
+	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.idString));
+
+	ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+				CORE_SEG_NUM,
+				sizeof(mpi_coredump->core_regs_seg_hdr) +
+				sizeof(mpi_coredump->mpi_core_regs) +
+				sizeof(mpi_coredump->mpi_core_sh_regs),
+				"Core Registers");
+	/* Get the MPI Core Registers */
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+				 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+	if (status)
+		goto err;
+	/* Get the 16 MPI shadow registers */
+	status = ql_get_mpi_shadow_regs(qdev,
+					&mpi_coredump->mpi_core_sh_regs[0]);
+	if (status)
+		goto err;
+
+	/* Get the Test Logic Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+				TEST_LOGIC_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->test_logic_regs),
+				"Test Logic Regs");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+				 TEST_REGS_ADDR, TEST_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the RMII Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+				RMII_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->rmii_regs),
+				"RMII Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+				 RMII_REGS_ADDR, RMII_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the FCMAC1 Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+				FCMAC1_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->fcmac1_regs),
+				"FCMAC1 Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+				 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the FCMAC2 Registers */
+
+	ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+				FCMAC2_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->fcmac2_regs),
+				"FCMAC2 Registers");
+
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+				 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the FC1 MBX Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+				FC1_MBOX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->fc1_mbx_regs),
+				"FC1 MBox Regs");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+				 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the IDE Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+				IDE_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->ide_regs),
+				"IDE Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+				 IDE_REGS_ADDR, IDE_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the NIC1 MBX Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+				NIC1_MBOX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic1_mbx_regs),
+				"NIC1 MBox Regs");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+				 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the SMBus Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+				SMBUS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->smbus_regs),
+				"SMBus Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+				 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the FC2 MBX Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+				FC2_MBOX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->fc2_mbx_regs),
+				"FC2 MBox Regs");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+				 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the NIC2 MBX Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+				NIC2_MBOX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic2_mbx_regs),
+				"NIC2 MBox Regs");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+				 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the I2C Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+				I2C_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->i2c_regs),
+				"I2C Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+				 I2C_REGS_ADDR, I2C_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the MEMC Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+				MEMC_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->memc_regs),
+				"MEMC Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+				 MEMC_REGS_ADDR, MEMC_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the PBus Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+				PBUS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->pbus_regs),
+				"PBUS Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+				 PBUS_REGS_ADDR, PBUS_REGS_CNT);
+	if (status)
+		goto err;
+
+	/* Get the MDE Registers */
+	ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+				MDE_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->mde_regs),
+				"MDE Registers");
+	status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+				 MDE_REGS_ADDR, MDE_REGS_CNT);
+	if (status)
+		goto err;
+
+	ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+				XAUI_HSS_PCS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+
+				sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+				"XAUI HSS PCS Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_an),
+				"XFI AN Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+				XFI_TRAIN_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_train),
+				"XFI TRAIN Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+				XFI_HSS_PCS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+				"XFI HSS PCS Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+				XFI_HSS_TX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+				"XFI HSS TX Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+				XFI_HSS_RX_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+				"XFI HSS RX Registers");
+
+	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+				XFI_HSS_PLL_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+				"XFI HSS PLL Registers");
+
+	status = ql_get_serdes_regs(qdev, mpi_coredump);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+			status);
+		goto err;
+	}
+
+	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+				MISC_NIC_INFO_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->misc_nic_info),
+				"MISC NIC INFO");
+	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+	mpi_coredump->misc_nic_info.function = qdev->func;
+
+	/* Get generic reg dump */
+	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+				NIC1_CONTROL_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_regs),
+				"NIC Registers");
+	for (i = 0; i < 64; i++)
+		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+	/* Get indexed register values. */
+	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+				INTR_STATES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->intr_states),
+				"INTR States");
+	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+				CAM_ENTRIES_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->cam_entries),
+				"CAM Entries");
+	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+	if (status)
+		goto err;
+
+	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+				ROUTING_WORDS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_routing_words),
+				"Routing Words");
+	status = ql_get_routing_entries(qdev,
+			 &mpi_coredump->nic_routing_words[0]);
+	if (status)
+		goto err;
+
+	ql_build_coredump_seg_header(&mpi_coredump->xgmac_seg_hdr,
+				XGMAC_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->xgmac),
+				"XGMac Registers");
+	status = ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac[0]);
+	if (status)
+		goto err;
+
+	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, ETS_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->ets),
+				"ETS Registers");
+	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+	if (status)
+		goto err;
+
+	/* clear the pause */
+	status = ql_unpause_mpi_risc(qdev);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed RISC unpause. Status = 0x%.08x\n", status);
+		goto err;
+	}
+
+	/* Reset the RISC so we can dump RAM */
+	status = ql_hard_reset_mpi_risc(qdev);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed RISC reset. Status = 0x%.08x\n", status);
+		goto err;
+	}
+
+	ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+				WCS_RAM_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->code_ram),
+				"WCS RAM");
+	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+					CODE_RAM_ADDR, CODE_RAM_CNT);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
+		goto err;
+	}
+
+	ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+				MEMC_RAM_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->memc_ram),
+				"MEMC RAM");
+	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+					MEMC_RAM_ADDR, MEMC_RAM_CNT);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
+		goto err;
+	}
+err:
+	ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+	return status;
+
+}
+
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+			struct ql_mpi_coredump *mpi_coredump)
+{
+	int i;
+
+	memset(mpi_coredump, 0, sizeof(struct ql_mpi_coredump));
+	/* Fill in the global dump header. */
+	memset(&(mpi_coredump->mpi_global_header), 0,
+		sizeof(struct mpi_coredump_global_header));
+	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+	mpi_coredump->mpi_global_header.headerSize =
+		sizeof(struct mpi_coredump_global_header);
+	mpi_coredump->mpi_global_header.imageSize =
+		sizeof(struct ql_mpi_coredump);
+	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+		sizeof(mpi_coredump->mpi_global_header.idString));
+
+	/* Fill in the generic dump segment header. */
+	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+				NIC1_CONTROL_SEG_NUM,
+				sizeof(struct mpi_coredump_segment_header)
+				+ sizeof(mpi_coredump->nic_regs),
+				"NIC Registers");
+	/* Get generic reg dump */
+	for (i = 0; i < 64; i++)
+		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+	/*
+	 * If the dump has already been taken and is stored
+	 * in our internal buffer, then just copy it to
+	 * the user's buffer.  Otherwise, take a snapshot
+	 * of the general regs.
+	 */
+	if (qdev->core_is_dumped && qdev->mpi_coredump)
+		memcpy(buff, qdev->mpi_coredump,
+			sizeof(struct ql_mpi_coredump));
+	else
+		ql_gen_reg_dump(qdev, buff);
+}
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+	for (i = 0; i < qdev->intr_count; i++) {
+		ql_write32(qdev, INTR_EN,
+			 qdev->intr_context[i].intr_read_mask);
+		value = ql_read32(qdev, INTR_EN);
+		printk(KERN_ERR PFX
+			"%s: Interrupt %d is %s.\n",
+			qdev->ndev->name, i,
+			(value & INTR_EN_EN ? "enabled" : "disabled"));
+	}
+}
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+	u32 data;
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+		printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
+		return;
+	}
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
+	printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
+		qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
+	printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
+		qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
+	printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
+		qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
+	printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
+		qdev->ndev->name, data);
+	ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
+	printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
+		data);
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value[3];
+
+	i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (i)
+		return;
+
+	for (i = 0; i < 4; i++) {
+		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i,
+					value)) {
+			printk(KERN_ERR PFX
+				"%s: Failed read of mac index register.\n",
+				__func__);
+			ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+					"%s: CAM index %d CAM Lookup Lower = \
+					0x%.08x:%.08x, Output = 0x%.08x.\n",
+					qdev->ndev->name, i, value[1], value[0],
+					value[2]);
+		}
+	}
+	for (i = 0; i < 32; i++) {
+		if (ql_get_mac_addr_reg
+			(qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+			printk(KERN_ERR PFX
+				"%s: Failed read of mac index register.\n",
+				__func__);
+			ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+			return;
+		} else {
+			if (value[0])
+				printk(KERN_ERR PFX
+					"%s: MCAST index %d CAM Lookup Lower \
+					= 0x%.08x:%.08x.\n", qdev->ndev->name,
+					i, value[1], value[0]);
+		}
+	}
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+	int i;
+	u32 value;
+
+	i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (i)
+		return;
+
+	for (i = 0; i < 16; i++) {
+		value = 0;
+		if (ql_get_routing_reg(qdev, i, &value)) {
+			printk(KERN_ERR PFX
+				"%s: Failed read of routing index register.\n",
+				__func__);
+			ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+			return;
+		} else {
+			if (value)
+				printk(KERN_ERR PFX
+					"%s: Routing Mask %d = 0x%.08x.\n",
+					qdev->ndev->name, i, value);
+		}
+	}
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
+	printk(KERN_ERR PFX "SYS	 			= 0x%x.\n",
+		ql_read32(qdev, SYS));
+	printk(KERN_ERR PFX "RST_FO 			= 0x%x.\n",
+		ql_read32(qdev, RST_FO));
+	printk(KERN_ERR PFX "FSC 				= 0x%x.\n",
+		ql_read32(qdev, FSC));
+	printk(KERN_ERR PFX "CSR 				= 0x%x.\n",
+		ql_read32(qdev, CSR));
+	printk(KERN_ERR PFX "ICB_RID 			= 0x%x.\n",
+		ql_read32(qdev, ICB_RID));
+	printk(KERN_ERR PFX "ICB_L 				= 0x%x.\n",
+		ql_read32(qdev, ICB_L));
+	printk(KERN_ERR PFX "ICB_H 				= 0x%x.\n",
+		ql_read32(qdev, ICB_H));
+	printk(KERN_ERR PFX "CFG 				= 0x%x.\n",
+		ql_read32(qdev, CFG));
+	printk(KERN_ERR PFX "BIOS_ADDR 			= 0x%x.\n",
+		ql_read32(qdev, BIOS_ADDR));
+	printk(KERN_ERR PFX "STS 				= 0x%x.\n",
+		ql_read32(qdev, STS));
+	printk(KERN_ERR PFX "INTR_EN			= 0x%x.\n",
+		ql_read32(qdev, INTR_EN));
+	printk(KERN_ERR PFX "INTR_MASK 			= 0x%x.\n",
+		ql_read32(qdev, INTR_MASK));
+	printk(KERN_ERR PFX "ISR1 				= 0x%x.\n",
+		ql_read32(qdev, ISR1));
+	printk(KERN_ERR PFX "ISR2 				= 0x%x.\n",
+		ql_read32(qdev, ISR2));
+	printk(KERN_ERR PFX "ISR3 				= 0x%x.\n",
+		ql_read32(qdev, ISR3));
+	printk(KERN_ERR PFX "ISR4 				= 0x%x.\n",
+		ql_read32(qdev, ISR4));
+	printk(KERN_ERR PFX "REV_ID 			= 0x%x.\n",
+		ql_read32(qdev, REV_ID));
+	printk(KERN_ERR PFX "FRC_ECC_ERR 			= 0x%x.\n",
+		ql_read32(qdev, FRC_ECC_ERR));
+	printk(KERN_ERR PFX "ERR_STS 			= 0x%x.\n",
+		ql_read32(qdev, ERR_STS));
+	printk(KERN_ERR PFX "RAM_DBG_ADDR 			= 0x%x.\n",
+		ql_read32(qdev, RAM_DBG_ADDR));
+	printk(KERN_ERR PFX "RAM_DBG_DATA 			= 0x%x.\n",
+		ql_read32(qdev, RAM_DBG_DATA));
+	printk(KERN_ERR PFX "ECC_ERR_CNT 			= 0x%x.\n",
+		ql_read32(qdev, ECC_ERR_CNT));
+	printk(KERN_ERR PFX "SEM 				= 0x%x.\n",
+		ql_read32(qdev, SEM));
+	printk(KERN_ERR PFX "GPIO_1 			= 0x%x.\n",
+		ql_read32(qdev, GPIO_1));
+	printk(KERN_ERR PFX "GPIO_2 			= 0x%x.\n",
+		ql_read32(qdev, GPIO_2));
+	printk(KERN_ERR PFX "GPIO_3 			= 0x%x.\n",
+		ql_read32(qdev, GPIO_3));
+	printk(KERN_ERR PFX "XGMAC_ADDR 			= 0x%x.\n",
+		ql_read32(qdev, XGMAC_ADDR));
+	printk(KERN_ERR PFX "XGMAC_DATA 			= 0x%x.\n",
+		ql_read32(qdev, XGMAC_DATA));
+	printk(KERN_ERR PFX "NIC_ETS 			= 0x%x.\n",
+		ql_read32(qdev, NIC_ETS));
+	printk(KERN_ERR PFX "CNA_ETS 			= 0x%x.\n",
+		ql_read32(qdev, CNA_ETS));
+	printk(KERN_ERR PFX "FLASH_ADDR 			= 0x%x.\n",
+		ql_read32(qdev, FLASH_ADDR));
+	printk(KERN_ERR PFX "FLASH_DATA 			= 0x%x.\n",
+		ql_read32(qdev, FLASH_DATA));
+	printk(KERN_ERR PFX "CQ_STOP 			= 0x%x.\n",
+		ql_read32(qdev, CQ_STOP));
+	printk(KERN_ERR PFX "PAGE_TBL_RID 			= 0x%x.\n",
+		ql_read32(qdev, PAGE_TBL_RID));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_LO 		= 0x%x.\n",
+		ql_read32(qdev, WQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "WQ_PAGE_TBL_HI 		= 0x%x.\n",
+		ql_read32(qdev, WQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_LO 		= 0x%x.\n",
+		ql_read32(qdev, CQ_PAGE_TBL_LO));
+	printk(KERN_ERR PFX "CQ_PAGE_TBL_HI 		= 0x%x.\n",
+		ql_read32(qdev, CQ_PAGE_TBL_HI));
+	printk(KERN_ERR PFX "COS_DFLT_CQ1 			= 0x%x.\n",
+		ql_read32(qdev, COS_DFLT_CQ1));
+	printk(KERN_ERR PFX "COS_DFLT_CQ2 			= 0x%x.\n",
+		ql_read32(qdev, COS_DFLT_CQ2));
+	printk(KERN_ERR PFX "SPLT_HDR 			= 0x%x.\n",
+		ql_read32(qdev, SPLT_HDR));
+	printk(KERN_ERR PFX "FC_PAUSE_THRES 		= 0x%x.\n",
+		ql_read32(qdev, FC_PAUSE_THRES));
+	printk(KERN_ERR PFX "NIC_PAUSE_THRES 		= 0x%x.\n",
+		ql_read32(qdev, NIC_PAUSE_THRES));
+	printk(KERN_ERR PFX "FC_ETHERTYPE 			= 0x%x.\n",
+		ql_read32(qdev, FC_ETHERTYPE));
+	printk(KERN_ERR PFX "FC_RCV_CFG 			= 0x%x.\n",
+		ql_read32(qdev, FC_RCV_CFG));
+	printk(KERN_ERR PFX "NIC_RCV_CFG 			= 0x%x.\n",
+		ql_read32(qdev, NIC_RCV_CFG));
+	printk(KERN_ERR PFX "FC_COS_TAGS 			= 0x%x.\n",
+		ql_read32(qdev, FC_COS_TAGS));
+	printk(KERN_ERR PFX "NIC_COS_TAGS 			= 0x%x.\n",
+		ql_read32(qdev, NIC_COS_TAGS));
+	printk(KERN_ERR PFX "MGMT_RCV_CFG 			= 0x%x.\n",
+		ql_read32(qdev, MGMT_RCV_CFG));
+	printk(KERN_ERR PFX "XG_SERDES_ADDR 		= 0x%x.\n",
+		ql_read32(qdev, XG_SERDES_ADDR));
+	printk(KERN_ERR PFX "XG_SERDES_DATA 		= 0x%x.\n",
+		ql_read32(qdev, XG_SERDES_DATA));
+	printk(KERN_ERR PFX "PRB_MX_ADDR 			= 0x%x.\n",
+		ql_read32(qdev, PRB_MX_ADDR));
+	printk(KERN_ERR PFX "PRB_MX_DATA 			= 0x%x.\n",
+		ql_read32(qdev, PRB_MX_DATA));
+	ql_dump_intr_states(qdev);
+	ql_dump_xgmac_control_regs(qdev);
+	ql_dump_ets_regs(qdev);
+	ql_dump_cam_entries(qdev);
+	ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+	printk(KERN_ERR "%s: Enter.\n", __func__);
+	printk(KERN_ERR "tx_pkts = %ld\n",
+		(unsigned long)qdev->nic_stats.tx_pkts);
+	printk(KERN_ERR "tx_bytes = %ld\n",
+		(unsigned long)qdev->nic_stats.tx_bytes);
+	printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_mcast_pkts);
+	printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_bcast_pkts);
+	printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_ucast_pkts);
+	printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_ctl_pkts);
+	printk(KERN_ERR "tx_pause_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_pause_pkts);
+	printk(KERN_ERR "tx_64_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_64_pkt);
+	printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
+	printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
+	printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_256_511_pkt);
+	printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
+	printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
+	printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
+	printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_undersize_pkt);
+	printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
+		(unsigned long)qdev->nic_stats.tx_oversize_pkt);
+	printk(KERN_ERR "rx_bytes = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_bytes);
+	printk(KERN_ERR "rx_bytes_ok = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_bytes_ok);
+	printk(KERN_ERR "rx_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_pkts);
+	printk(KERN_ERR "rx_pkts_ok = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_pkts_ok);
+	printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_bcast_pkts);
+	printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_mcast_pkts);
+	printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_ucast_pkts);
+	printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_undersize_pkts);
+	printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_oversize_pkts);
+	printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_jabber_pkts);
+	printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
+	printk(KERN_ERR "rx_drop_events = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_drop_events);
+	printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_fcerr_pkts);
+	printk(KERN_ERR "rx_align_err = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_align_err);
+	printk(KERN_ERR "rx_symbol_err = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_symbol_err);
+	printk(KERN_ERR "rx_mac_err = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_mac_err);
+	printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_ctl_pkts);
+	printk(KERN_ERR "rx_pause_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_pause_pkts);
+	printk(KERN_ERR "rx_64_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_64_pkts);
+	printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
+	printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_128_255_pkts);
+	printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_256_511_pkts);
+	printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
+	printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
+	printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
+	printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
+		(unsigned long)qdev->nic_stats.rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+	int i;
+	printk(KERN_ERR PFX "qdev->flags 			= %lx.\n",
+		qdev->flags);
+	printk(KERN_ERR PFX "qdev->vlgrp 			= %p.\n",
+		qdev->vlgrp);
+	printk(KERN_ERR PFX "qdev->pdev 			= %p.\n",
+		qdev->pdev);
+	printk(KERN_ERR PFX "qdev->ndev 			= %p.\n",
+		qdev->ndev);
+	printk(KERN_ERR PFX "qdev->chip_rev_id 		= %d.\n",
+		qdev->chip_rev_id);
+	printk(KERN_ERR PFX "qdev->reg_base 		= %p.\n",
+		qdev->reg_base);
+	printk(KERN_ERR PFX "qdev->doorbell_area 	= %p.\n",
+		qdev->doorbell_area);
+	printk(KERN_ERR PFX "qdev->doorbell_area_size 	= %d.\n",
+		qdev->doorbell_area_size);
+	printk(KERN_ERR PFX "msg_enable 		= %x.\n",
+		qdev->msg_enable);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area	= %p.\n",
+		qdev->rx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma 	= %llx.\n",
+		(unsigned long long) qdev->rx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area	= %p.\n",
+		qdev->tx_ring_shadow_reg_area);
+	printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma	= %llx.\n",
+		(unsigned long long) qdev->tx_ring_shadow_reg_dma);
+	printk(KERN_ERR PFX "qdev->intr_count 		= %d.\n",
+		qdev->intr_count);
+	if (qdev->msi_x_entry)
+		for (i = 0; i < qdev->intr_count; i++) {
+			printk(KERN_ERR PFX
+				"msi_x_entry.[%d]vector	= %d.\n", i,
+				qdev->msi_x_entry[i].vector);
+			printk(KERN_ERR PFX
+				"msi_x_entry.[%d]entry	= %d.\n", i,
+				qdev->msi_x_entry[i].entry);
+		}
+	for (i = 0; i < qdev->intr_count; i++) {
+		printk(KERN_ERR PFX
+			"intr_context[%d].qdev		= %p.\n", i,
+			qdev->intr_context[i].qdev);
+		printk(KERN_ERR PFX
+			"intr_context[%d].intr		= %d.\n", i,
+			qdev->intr_context[i].intr);
+		printk(KERN_ERR PFX
+			"intr_context[%d].hooked		= %d.\n", i,
+			qdev->intr_context[i].hooked);
+		printk(KERN_ERR PFX
+			"intr_context[%d].intr_en_mask	= 0x%08x.\n", i,
+			qdev->intr_context[i].intr_en_mask);
+		printk(KERN_ERR PFX
+			"intr_context[%d].intr_dis_mask	= 0x%08x.\n", i,
+			qdev->intr_context[i].intr_dis_mask);
+		printk(KERN_ERR PFX
+			"intr_context[%d].intr_read_mask	= 0x%08x.\n", i,
+			qdev->intr_context[i].intr_read_mask);
+	}
+	printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
+	printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
+	printk(KERN_ERR PFX "qdev->ring_mem 	= %p.\n", qdev->ring_mem);
+	printk(KERN_ERR PFX "qdev->intr_count 	= %d.\n", qdev->intr_count);
+	printk(KERN_ERR PFX "qdev->tx_ring		= %p.\n",
+		qdev->tx_ring);
+	printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id 	= %d.\n",
+		qdev->rss_ring_first_cq_id);
+	printk(KERN_ERR PFX "qdev->rss_ring_count 	= %d.\n",
+		qdev->rss_ring_count);
+	printk(KERN_ERR PFX "qdev->rx_ring	= %p.\n", qdev->rx_ring);
+	printk(KERN_ERR PFX "qdev->default_rx_queue	= %d.\n",
+		qdev->default_rx_queue);
+	printk(KERN_ERR PFX "qdev->xg_sem_mask		= 0x%08x.\n",
+		qdev->xg_sem_mask);
+	printk(KERN_ERR PFX "qdev->port_link_up		= 0x%08x.\n",
+		qdev->port_link_up);
+	printk(KERN_ERR PFX "qdev->port_init		= 0x%08x.\n",
+		qdev->port_init);
+
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+	printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
+	printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
+	printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
+	printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
+		le16_to_cpu(wqicb->cq_id_rss));
+	printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
+	printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(wqicb->addr));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+	if (tx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+		"===================== Dumping tx_ring %d ===============.\n",
+		tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
+	printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
+		(unsigned long long) tx_ring->wq_base_dma);
+	printk(KERN_ERR PFX
+		"tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
+		tx_ring->cnsmr_idx_sh_reg,
+		tx_ring->cnsmr_idx_sh_reg
+			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
+	printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
+	printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
+	printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
+		tx_ring->prod_idx_db_reg);
+	printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
+		tx_ring->valid_db_reg);
+	printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
+	printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
+	printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
+	printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
+	printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
+		atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+	int i;
+	printk(KERN_ERR PFX
+		"===================== Dumping ricb ===============.\n");
+	printk(KERN_ERR PFX "Dumping ricb stuff...\n");
+
+	printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
+	printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
+		ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+		ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+		ricb->flags & RSS_LI ? "RSS_LI " : "",
+		ricb->flags & RSS_LB ? "RSS_LB " : "",
+		ricb->flags & RSS_LM ? "RSS_LM " : "",
+		ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+		ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+		ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+		ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+	printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
+	for (i = 0; i < 16; i++)
+		printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
+			le32_to_cpu(ricb->hash_cq_id[i]));
+	for (i = 0; i < 10; i++)
+		printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
+			le32_to_cpu(ricb->ipv6_hash_key[i]));
+	for (i = 0; i < 4; i++)
+		printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
+			le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+	printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
+
+	printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
+	printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
+	printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
+	printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(cqicb->addr));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
+	printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
+		le16_to_cpu(cqicb->pkt_delay));
+	printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
+		le16_to_cpu(cqicb->irq_delay));
+	printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(cqicb->lbq_addr));
+	printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
+		le16_to_cpu(cqicb->lbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
+		le16_to_cpu(cqicb->lbq_len));
+	printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
+		(unsigned long long) le64_to_cpu(cqicb->sbq_addr));
+	printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
+		le16_to_cpu(cqicb->sbq_buf_size));
+	printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
+		le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+	if (rx_ring == NULL)
+		return;
+	printk(KERN_ERR PFX
+		"===================== Dumping rx_ring %d ===============.\n",
+		rx_ring->cq_id);
+	printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
+		rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+		rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+		rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+	printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
+	printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
+	printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
+		(unsigned long long) rx_ring->cq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
+	printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
+	printk(KERN_ERR PFX
+		"rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
+		rx_ring->prod_idx_sh_reg,
+		rx_ring->prod_idx_sh_reg
+			? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
+	printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
+		(unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
+		rx_ring->cnsmr_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
+	printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
+	printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
+		rx_ring->valid_db_reg);
+
+	printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
+		(unsigned long long) rx_ring->lbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
+		rx_ring->lbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
+		(unsigned long long) rx_ring->lbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
+	printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
+	printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
+		rx_ring->lbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
+		rx_ring->lbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
+		rx_ring->lbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
+		rx_ring->lbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
+		rx_ring->lbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->lbq_buf_map_size = %d.\n",
+		rx_ring->lbq_buf_map_size);
+
+	printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
+		(unsigned long long) rx_ring->sbq_base_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
+		rx_ring->sbq_base_indirect);
+	printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
+		(unsigned long long) rx_ring->sbq_base_indirect_dma);
+	printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
+	printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
+	printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
+		rx_ring->sbq_prod_idx_db_reg);
+	printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
+		rx_ring->sbq_prod_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
+		rx_ring->sbq_curr_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
+		rx_ring->sbq_clean_idx);
+	printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
+		rx_ring->sbq_free_cnt);
+	printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
+		rx_ring->sbq_buf_size);
+	printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
+	printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
+	printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
+	printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+	void *ptr;
+
+	printk(KERN_ERR PFX "%s: Enter.\n", __func__);
+
+	ptr = kmalloc(size, GFP_ATOMIC);
+	if (ptr == NULL) {
+		printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
+			__func__);
+		return;
+	}
+
+	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+		printk(KERN_ERR "%s: Failed to upload control block!\n",
+			__func__);
+		goto fail_it;
+	}
+	switch (bit) {
+	case CFG_DRQ:
+		ql_dump_wqicb((struct wqicb *)ptr);
+		break;
+	case CFG_DCQ:
+		ql_dump_cqicb((struct cqicb *)ptr);
+		break;
+	case CFG_DR:
+		ql_dump_ricb((struct ricb *)ptr);
+		break;
+	default:
+		printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
+			__func__, bit);
+		break;
+	}
+fail_it:
+	kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+		le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len	= %d\n",
+		le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+		tbd->len & TX_DESC_C ? "C" : ".",
+		tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+		le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len	= %d\n",
+		le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+		tbd->len & TX_DESC_C ? "C" : ".",
+		tbd->len & TX_DESC_E ? "E" : ".");
+	tbd++;
+	printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+		le64_to_cpu((u64) tbd->addr));
+	printk(KERN_ERR PFX "tbd->len	= %d\n",
+		le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+	printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+		tbd->len & TX_DESC_C ? "C" : ".",
+		tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+		(struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+	struct tx_buf_desc *tbd;
+	u16 frame_len;
+
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode	 = %s\n",
+		(ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+	printk(KERN_ERR PFX "flags1	  = %s %s %s %s %s\n",
+		ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+		ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+		ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+		ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+		ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+	printk(KERN_ERR PFX "flags2	  = %s %s %s\n",
+		ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+		ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+		ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+	printk(KERN_ERR PFX "flags3	  = %s %s %s \n",
+		ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+		ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+		ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
+	printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
+	printk(KERN_ERR PFX "vlan_tci	= %x\n", ob_mac_tso_iocb->vlan_tci);
+	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+		printk(KERN_ERR PFX "frame_len	= %d\n",
+			le32_to_cpu(ob_mac_tso_iocb->frame_len));
+		printk(KERN_ERR PFX "mss	= %d\n",
+			le16_to_cpu(ob_mac_tso_iocb->mss));
+		printk(KERN_ERR PFX "prot_hdr_len	= %d\n",
+			le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+		printk(KERN_ERR PFX "hdr_offset	= 0x%.04x\n",
+			le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+	} else {
+		printk(KERN_ERR PFX "frame_len	= %d\n",
+			le16_to_cpu(ob_mac_iocb->frame_len));
+		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+	}
+	tbd = &ob_mac_iocb->tbd[0];
+	ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode	 = %d\n", ob_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags	  = %s %s %s %s %s %s %s\n",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+		ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+		ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+	printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	printk(KERN_ERR PFX "%s\n", __func__);
+	printk(KERN_ERR PFX "opcode	 = 0x%x\n", ib_mac_rsp->opcode);
+	printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+		ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+		printk(KERN_ERR PFX "%s%s%s Multicast.\n",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+	printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
+		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+		printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
+			(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+			IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+			(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+			IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+			(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+			IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+			(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+			IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+			(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+			IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+	printk(KERN_ERR PFX "flags3 = %s%s.\n",
+		ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+		ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
+			((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+			((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+			((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+			((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+			IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+	printk(KERN_ERR PFX "data_len	= %d\n",
+		le32_to_cpu(ib_mac_rsp->data_len));
+	printk(KERN_ERR PFX "data_addr	= 0x%llx\n",
+		(unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+		printk(KERN_ERR PFX "rss	= %x\n",
+			le32_to_cpu(ib_mac_rsp->rss));
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+		printk(KERN_ERR PFX "vlan_id	= %x\n",
+			le16_to_cpu(ib_mac_rsp->vlan_id));
+
+	printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
+		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+		printk(KERN_ERR PFX "hdr length	= %d.\n",
+			le32_to_cpu(ib_mac_rsp->hdr_len));
+		printk(KERN_ERR PFX "hdr addr	= 0x%llx.\n",
+			(unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
+	}
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+	int i;
+
+	QL_DUMP_REGS(qdev);
+	QL_DUMP_QDEV(qdev);
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+	}
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+	}
+}
+#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 0000000..efacbee
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,680 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <linux/version.h>
+
+#include "qlge.h"
+
+#ifdef ETHTOOL_TEST
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Loopback test  (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+	struct cqicb *cqicb;
+
+	if (!netif_running(qdev->ndev))
+		return status;
+
+	spin_lock(&qdev->hw_lock);
+	/* Skip the default queue, and update the outbound handler
+	 * queues if they changed.
+	 */
+	cqicb = (struct cqicb *)&qdev->rx_ring[1];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+		le16_to_cpu(cqicb->pkt_delay) !=
+					qdev->tx_max_coalesced_frames) {
+		for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+			cqicb->pkt_delay =
+				cpu_to_le16(qdev->tx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+
+	/* Update the inbound (RSS) handler queues if they changed. */
+	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
+	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+		le16_to_cpu(cqicb->pkt_delay) !=
+					 qdev->rx_max_coalesced_frames) {
+		for (i = qdev->rss_ring_first_cq_id;
+			i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
+			i++) {
+			rx_ring = &qdev->rx_ring[i];
+			cqicb = (struct cqicb *)rx_ring;
+			cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+			cqicb->pkt_delay =
+				cpu_to_le16(qdev->rx_max_coalesced_frames);
+			cqicb->flags = FLAGS_LI;
+			status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
+						CFG_LCQ, rx_ring->cq_id);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed to load CQICB.\n");
+				goto exit;
+			}
+		}
+	}
+exit:
+	spin_unlock(&qdev->hw_lock);
+	return status;
+}
+
+static void ql_update_stats(struct ql_adapter *qdev)
+{
+	u32 i;
+	u64 data;
+	u64 *iter = &qdev->nic_stats.tx_pkts;
+
+	spin_lock(&qdev->stats_lock);
+	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Couldn't get xgmac sem.\n");
+		goto quit;
+	}
+	/*
+	 * Get TX statistics.
+	 */
+	for (i = 0x200; i < 0x280; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+	/*
+	 * Get RX statistics.
+	 */
+	for (i = 0x300; i < 0x3d0; i += 8) {
+		if (ql_read_xgmac_reg64(qdev, i, &data)) {
+			QPRINTK(qdev, DRV, ERR,
+				"Error reading status register 0x%.04x.\n", i);
+			goto end;
+		} else
+			*iter = data;
+		iter++;
+	}
+
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+	spin_unlock(&qdev->stats_lock);
+
+	QL_DUMP_STAT(qdev);
+
+	return;
+}
+
+static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
+	{"tx_pkts"},
+	{"tx_bytes"},
+	{"tx_mcast_pkts"},
+	{"tx_bcast_pkts"},
+	{"tx_ucast_pkts"},
+	{"tx_ctl_pkts"},
+	{"tx_pause_pkts"},
+	{"tx_64_pkts"},
+	{"tx_65_to_127_pkts"},
+	{"tx_128_to_255_pkts"},
+	{"tx_256_511_pkts"},
+	{"tx_512_to_1023_pkts"},
+	{"tx_1024_to_1518_pkts"},
+	{"tx_1519_to_max_pkts"},
+	{"tx_undersize_pkts"},
+	{"tx_oversize_pkts"},
+	{"rx_bytes"},
+	{"rx_bytes_ok"},
+	{"rx_pkts"},
+	{"rx_pkts_ok"},
+	{"rx_bcast_pkts"},
+	{"rx_mcast_pkts"},
+	{"rx_ucast_pkts"},
+	{"rx_undersize_pkts"},
+	{"rx_oversize_pkts"},
+	{"rx_jabber_pkts"},
+	{"rx_undersize_fcerr_pkts"},
+	{"rx_drop_events"},
+	{"rx_fcerr_pkts"},
+	{"rx_align_err"},
+	{"rx_symbol_err"},
+	{"rx_mac_err"},
+	{"rx_ctl_pkts"},
+	{"rx_pause_pkts"},
+	{"rx_64_pkts"},
+	{"rx_65_to_127_pkts"},
+	{"rx_128_255_pkts"},
+	{"rx_256_511_pkts"},
+	{"rx_512_to_1023_pkts"},
+	{"rx_1024_to_1518_pkts"},
+	{"rx_1519_to_max_pkts"},
+	{"rx_len_err_pkts"},
+};
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(buf, *ql_gstrings_test,
+			QLGE_TEST_LEN * ETH_GSTRING_LEN);
+		break;
+
+	case ETH_SS_STATS:
+		memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+		break;
+	}
+}
+
+static int ql_get_stats_count(struct net_device *dev)
+{
+		return ARRAY_SIZE(ql_stats_str_arr);
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+			struct ethtool_stats *stats, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	struct nic_stats *s = &qdev->nic_stats;
+
+	ql_update_stats(qdev);
+
+	*data++ = s->tx_pkts;
+	*data++ = s->tx_bytes;
+	*data++ = s->tx_mcast_pkts;
+	*data++ = s->tx_bcast_pkts;
+	*data++ = s->tx_ucast_pkts;
+	*data++ = s->tx_ctl_pkts;
+	*data++ = s->tx_pause_pkts;
+	*data++ = s->tx_64_pkt;
+	*data++ = s->tx_65_to_127_pkt;
+	*data++ = s->tx_128_to_255_pkt;
+	*data++ = s->tx_256_511_pkt;
+	*data++ = s->tx_512_to_1023_pkt;
+	*data++ = s->tx_1024_to_1518_pkt;
+	*data++ = s->tx_1519_to_max_pkt;
+	*data++ = s->tx_undersize_pkt;
+	*data++ = s->tx_oversize_pkt;
+	*data++ = s->rx_bytes;
+	*data++ = s->rx_bytes_ok;
+	*data++ = s->rx_pkts;
+	*data++ = s->rx_pkts_ok;
+	*data++ = s->rx_bcast_pkts;
+	*data++ = s->rx_mcast_pkts;
+	*data++ = s->rx_ucast_pkts;
+	*data++ = s->rx_undersize_pkts;
+	*data++ = s->rx_oversize_pkts;
+	*data++ = s->rx_jabber_pkts;
+	*data++ = s->rx_undersize_fcerr_pkts;
+	*data++ = s->rx_drop_events;
+	*data++ = s->rx_fcerr_pkts;
+	*data++ = s->rx_align_err;
+	*data++ = s->rx_symbol_err;
+	*data++ = s->rx_mac_err;
+	*data++ = s->rx_ctl_pkts;
+	*data++ = s->rx_pause_pkts;
+	*data++ = s->rx_64_pkts;
+	*data++ = s->rx_65_to_127_pkts;
+	*data++ = s->rx_128_255_pkts;
+	*data++ = s->rx_256_511_pkts;
+	*data++ = s->rx_512_to_1023_pkts;
+	*data++ = s->rx_1024_to_1518_pkts;
+	*data++ = s->rx_1519_to_max_pkts;
+	*data++ = s->rx_len_err_pkts;
+}
+
+static int ql_get_settings(struct net_device *ndev,
+				struct ethtool_cmd *ecmd)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->advertising = ADVERTISED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_ENABLE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+	if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
+			STS_LINK_TYPE_10GBASET) {
+		ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+		ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+		ecmd->port = PORT_TP;
+	} else {
+		ecmd->supported |= SUPPORTED_FIBRE;
+		ecmd->advertising |= ADVERTISED_FIBRE;
+		ecmd->port = PORT_FIBRE;
+	}
+
+	ecmd->speed = SPEED_10000;
+	ecmd->duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	strncpy(drvinfo->driver, qlge_driver_name, 32);
+	strncpy(drvinfo->version, qlge_driver_version, 32);
+	snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+		 (qdev->fw_rev_id & 0x00ff0000) >> 16,
+		 (qdev->fw_rev_id & 0x0000ff00) >> 8,
+		 (qdev->fw_rev_id & 0x000000ff));
+	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+	drvinfo->n_stats = ARRAY_SIZE(ql_stats_str_arr);
+	drvinfo->testinfo_len = QLGE_TEST_LEN;
+	drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+	drvinfo->eedump_len = 0;
+}
+
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	/* What we support. */
+	wol->supported = WAKE_MAGIC;
+	/* What we've currently got set. */
+	wol->wolopts = qdev->wol;
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int status;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+	qdev->wol = wol->wolopts;
+
+	QPRINTK(qdev, DRV, DEBUG, "Set wol option 0x%x on %s\n",
+			 qdev->wol, ndev->name);
+	if (!qdev->wol) {
+		u32 wol = 0;
+		u8 zero_mac_addr[6];
+
+		memset(zero_mac_addr, 0, sizeof(zero_mac_addr));
+		/* Disable WOL does not work,
+		 * so enable WOL with Invalid mac address
+		 */
+		status = ql_mb_wol_set_magic(qdev, 0);
+		if (status) {
+			QPRINTK(qdev, IFDOWN, ERR,
+				"Failed to clear magic packet on %s\n",
+				qdev->ndev->name);
+			return status;
+		} else
+			QPRINTK(qdev, DRV, INFO,
+				"Cleared magic packet successfully on %s!\n",
+				qdev->ndev->name);
+
+		wol |= MB_WOL_MAGIC_PKT;
+		wol |= MB_WOL_MODE_ON;
+		status = ql_mb_wol_mode(qdev, wol);
+
+		QPRINTK(qdev, DRV, DEBUG, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "cleared sucessfully" : "clear failed",
+			wol, qdev->ndev->name);
+	}
+
+	return 0;
+}
+
+static int ql_phys_id(struct net_device *ndev, u32 data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 led_reg, i;
+	int status;
+
+	/* Save the current LED settings */
+	status = ql_mb_get_led_cfg(qdev);
+	if (status)
+		return status;
+	led_reg = qdev->led_config;
+
+	/* Start blinking the led */
+	if (!data || data > 300)
+		data = 300;
+
+	for (i = 0; i < (data * 10); i++)
+		ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+
+	/* Restore LED settings */
+	status = ql_mb_set_led_cfg(qdev, led_reg);
+	if (status)
+		return status;
+
+	return 0;
+}
+
+static int ql_setup_loopback_test(struct ql_adapter *qdev)
+{
+	int status = 0;
+
+	status = ql_mb_get_port_cfg(qdev);
+	if (status)
+		return status;
+	qdev->link_config |= CFG_LOOPBACK_PCS;
+	if (netif_carrier_ok(qdev->ndev))
+		netif_carrier_off(qdev->ndev);
+
+	status = ql_mb_set_port_cfg(qdev);
+	if (status)
+		return status;
+	return status;
+}
+void ql_loopback_cleanup(struct ql_adapter *qdev)
+{
+	qdev->link_config &= ~CFG_LOOPBACK_PCS;
+	ql_mb_set_port_cfg(qdev);
+	if (!netif_carrier_ok(qdev->ndev))
+		netif_carrier_on(qdev->ndev);
+
+}
+static void ql_create_lbtest_frame(struct sk_buff *skb,
+					unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+int ql_check_lbtest_frame(struct sk_buff *skb,
+					unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+			(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+void ql_check_receive_frame(struct sk_buff *skb)
+{
+	unsigned long time;
+	int good_cnt = 0, ret_val = 0;
+
+	time = jiffies;
+	good_cnt = 0;
+	do {
+		ret_val = ql_check_lbtest_frame(skb, 256);
+		if (!ret_val)
+			good_cnt++;
+	} while (good_cnt < 64 && jiffies < (time + 20));
+
+	if (good_cnt != 64)
+		ret_val = 2;
+
+	if (jiffies >= (time + 20))
+		ret_val = 3;
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+	int i, ret_val = 0;
+	struct sk_buff *skb;
+	unsigned int size = 256;
+
+	for (i = 0; i < 64; i++) {
+		skb = alloc_skb(size, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 1;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		ql_create_lbtest_frame(skb, size);
+		qlge_send(skb, qdev->ndev);
+	}
+	msleep(200);
+
+err_nomem:
+	return ret_val;
+}
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+	*data = ql_setup_loopback_test(qdev);
+	if (*data)
+		goto out;
+
+	*data = ql_run_loopback_test(qdev);
+	ql_loopback_cleanup(qdev);
+out:
+	return *data;
+}
+
+static int ql_self_test_count(struct net_device *ndev)
+{
+	return QLGE_TEST_LEN;
+}
+
+static void ql_self_test(struct net_device *ndev,
+				struct ethtool_test *eth_test, u64 *data)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		set_bit(QL_TESTING, &qdev->flags);
+		if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+			/* Offline tests */
+			if (ql_loopback_test(qdev, &data[0]))
+				eth_test->flags |= ETH_TEST_FL_FAILED;
+
+			clear_bit(QL_TESTING, &qdev->flags);
+		} else {
+			/* Online tests
+			 * Online tests aren't run; pass by default
+			 */
+			data[0] = 0;
+			clear_bit(QL_TESTING, &qdev->flags);
+		}
+	} else {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: is down, Loopback test will fail.\n", ndev->name);
+		eth_test->flags |= ETH_TEST_FL_FAILED;
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+	return sizeof(struct ql_mpi_coredump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+			struct ethtool_regs *regs, void *p)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	ql_get_dump(qdev, p);
+	qdev->core_is_dumped = 0;
+	regs->len = sizeof(struct ql_mpi_coredump);
+}
+
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(dev);
+
+	c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+	c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+	/* This chip coalesces as follows:
+	 * If a packet arrives, hold off interrupts until
+	 * cqicb->int_delay expires, but if no other packets arrive don't
+	 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+	 * timer to coalesce on a frame basis.  So, we have to take ethtool's
+	 * max_coalesced_frames value and convert it to a delay in microseconds.
+	 * We do this by using a basic thoughput of 1,000,000 frames per
+	 * second @ (1024 bytes).  This means one frame per usec. So it's a
+	 * simple one to one ratio.
+	 */
+	c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+	c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+	return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/* Validate user parameters. */
+	if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+		return -EINVAL;
+	/* Don't wait more than 10 usec. */
+	if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+	if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+		return -EINVAL;
+	if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+		return -EINVAL;
+
+	/* Verify a change took place before updating the hardware. */
+	if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+		qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+		qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+		qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+		return 0;
+
+	qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+	qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+	qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+	qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+	return ql_update_ring_coalescing(qdev);
+}
+
+static u32 ql_get_rx_csum(struct net_device *netdev)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	return qdev->rx_csum;
+}
+
+static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+	qdev->rx_csum = data;
+	return 0;
+}
+
+static u32 ql_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int ql_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+	if (data)
+		netdev->features |= NETIF_F_IP_CSUM;
+	else
+		netdev->features &= ~NETIF_F_IP_CSUM;
+	return 0;
+}
+
+static int ql_set_tso(struct net_device *ndev, uint32_t data)
+{
+
+	if (data) {
+		ndev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		ndev->features |= NETIF_F_TSO6;
+#endif
+	} else {
+		ndev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		ndev->features &= ~NETIF_F_TSO6;
+#endif
+	}
+	return 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	qdev->msg_enable = value;
+}
+
+struct ethtool_ops qlge_ethtool_ops = {
+	.get_settings		 = ql_get_settings,
+	.get_drvinfo		 = ql_get_drvinfo,
+	.get_wol		 = ql_get_wol,
+	.set_wol		 = ql_set_wol,
+	.get_regs_len		 = ql_get_regs_len,
+	.get_regs		 = ql_get_regs,
+	.get_msglevel		 = ql_get_msglevel,
+	.set_msglevel		 = ql_set_msglevel,
+	.get_link		 = ethtool_op_get_link,
+	.phys_id		 = ql_phys_id,
+	.self_test_count	 = ql_self_test_count,
+	.self_test		 = ql_self_test,
+	.get_rx_csum		 = ql_get_rx_csum,
+	.set_rx_csum		 = ql_set_rx_csum,
+	.get_tx_csum		 = ql_get_tx_csum,
+	.set_tx_csum		 = ql_set_tx_csum,
+	.get_sg			 = ethtool_op_get_sg,
+	.set_sg			 = ethtool_op_set_sg,
+	.get_tso		 = ethtool_op_get_tso,
+	.set_tso		 = ql_set_tso,
+	.get_coalesce		 = ql_get_coalesce,
+	.set_coalesce		 = ql_set_coalesce,
+	.get_stats_count	 = ql_get_stats_count,
+	.get_strings		 = ql_get_strings,
+	.get_ethtool_stats	 = ql_get_ethtool_stats,
+};
+
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 0000000..00134c3
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,4496 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c)  2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author:	Linux qlge network device driver by
+ *			Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <net/ip6_checksum.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
+static const u32 default_msg =
+	NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+	NETIF_MSG_IFDOWN |
+	NETIF_MSG_IFUP |
+	NETIF_MSG_RX_ERR |
+	NETIF_MSG_TX_ERR |
+	NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = 0x00007fff;	/* defaults above */
+module_param(debug, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int qlge_irq_type = MSIX_IRQ;
+module_param(qlge_irq_type, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+int qlge_mpi_coredump = 1;
+module_param(qlge_mpi_coredump, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+		"Option to enable allocation of memory for an MPI "
+		"firmware dump. Default is 1 - allocate memory.");
+
+static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+		{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+		{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
+
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+			u16 index);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	u32 sem_bits = 0;
+
+	switch (sem_mask) {
+	case SEM_XGMAC0_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+		break;
+	case SEM_XGMAC1_MASK:
+		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+		break;
+	case SEM_ICB_MASK:
+		sem_bits = SEM_SET << SEM_ICB_SHIFT;
+		break;
+	case SEM_MAC_ADDR_MASK:
+		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+		break;
+	case SEM_FLASH_MASK:
+		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+		break;
+	case SEM_PROBE_MASK:
+		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+		break;
+	case SEM_RT_IDX_MASK:
+		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+		break;
+	case SEM_PROC_REG_MASK:
+		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+		break;
+	default:
+		QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+		return -EINVAL;
+	}
+
+	ql_write32(qdev, SEM, sem_bits | sem_mask);
+	return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	unsigned int wait_count = 30;
+	do {
+		if (!ql_sem_trylock(qdev, sem_mask))
+			return 0;
+		udelay(100);
+	} while (--wait_count);
+	return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+	ql_write32(qdev, SEM, sem_mask);
+	ql_read32(qdev, SEM);	/* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register.  It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+	u32 temp;
+	int count = UDELAY_COUNT;
+
+	while (count) {
+		temp = ql_read32(qdev, reg);
+
+		/* check for errors */
+		if (temp & err_bit) {
+			QPRINTK(qdev, PROBE, ALERT,
+				"register 0x%.08x access error,	value = \
+				0x%.08x, (temp & err_bit) = 0x%.08x!.\n",
+				reg, temp, (temp & err_bit));
+			return -EIO;
+		} else if (temp & bit)
+				return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	QPRINTK(qdev, PROBE, ALERT,
+		"Timed out waiting for reg %x to come ready.\n", reg);
+	return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+	int count = UDELAY_COUNT;
+	u32 temp;
+
+	while (count) {
+		temp = ql_read32(qdev, CFG);
+		if (temp & CFG_LE)
+			return -EIO;
+		if (!(temp & bit))
+			return 0;
+		udelay(UDELAY_DELAY);
+		count--;
+	}
+	return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+		 u16 q_id)
+{
+	u64 map;
+	int status = 0;
+	int direction;
+	u32 mask;
+	u32 value;
+	direction =
+		(bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+		PCI_DMA_FROMDEVICE;
+
+	map = pci_map_single(qdev->pdev, ptr, size, direction);
+	if (pci_dma_mapping_error(map)) {
+		QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+		return -ENOMEM;
+	}
+
+	status = ql_wait_cfg(qdev, bit);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Timed out waiting for CFG to come ready.\n");
+		goto exit;
+	}
+
+	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+	if (status)
+		goto exit;
+	ql_write32(qdev, ICB_L, (u32) map);
+	ql_write32(qdev, ICB_H, (u32) (map >> 32));
+	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
+
+	mask = CFG_Q_MASK | (bit << 16);
+	value = bit | (q_id << CFG_Q_SHIFT);
+	ql_write32(qdev, CFG, (mask | value));
+
+	/*
+	 * Wait for the bit to clear after signaling hw.
+	 */
+	status = ql_wait_cfg(qdev, bit);
+exit:
+	pci_unmap_single(qdev->pdev, map, size, direction);
+	return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+	int status;
+	unsigned long hw_flags = 0;
+
+	if (!netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, ERR, "%s: Link is Up.\n",
+					 qdev->ndev->name);
+		netif_carrier_on(qdev->ndev);
+		netif_wake_queue(qdev->ndev);
+		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Unable to get hardware semaphore on %s\n",
+				qdev->ndev->name);
+			return;
+		}
+		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+		if (ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->dev_addr,
+				MAC_ADDR_TYPE_CAM_MAC, qdev->port*MAX_CQ)) {
+			QPRINTK(qdev, IFUP, ERR,
+					"Failed to restore mac address.\n");
+		}
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	}
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+	char zero_mac_addr[6];
+	int status;
+	unsigned long hw_flags = 0;
+
+	if (netif_carrier_ok(qdev->ndev)) {
+		QPRINTK(qdev, LINK, ERR, "%s: Link is Down.\n",
+					 qdev->ndev->name);
+		netif_carrier_off(qdev->ndev);
+		netif_stop_queue(qdev->ndev);
+		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Unable to get hardware semaphore on %s\n",
+				 qdev->ndev->name);
+			return;
+		}
+		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+		memset(zero_mac_addr, 0, sizeof(zero_mac_addr));
+
+		if (ql_set_mac_addr_reg(qdev, (u8 *) zero_mac_addr,
+				MAC_ADDR_TYPE_CAM_MAC, qdev->port*MAX_CQ)) {
+			QPRINTK(qdev, IFUP, ERR,
+					"Failed to clear mac address.\n");
+		}
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	}
+}
+
+/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+			u32 *value)
+{
+	u32 offset = 0;
+	int status;
+
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				(index << MAC_ADDR_IDX_SHIFT) | /* index */
+				MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				(index << MAC_ADDR_IDX_SHIFT) | /* index */
+				MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+			if (status)
+				goto exit;
+			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				status =
+					ql_wait_reg_rdy(qdev,
+					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+				if (status)
+					goto exit;
+				ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+					(index << MAC_ADDR_IDX_SHIFT) |
+					MAC_ADDR_ADR | MAC_ADDR_RS | type);
+				status =
+					ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+							MAC_ADDR_MR, 0);
+				if (status)
+					goto exit;
+				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+			u16 index)
+{
+	u32 offset = 0;
+	int status = 0;
+
+	switch (type) {
+	case MAC_ADDR_TYPE_MULTI_MAC:
+	case MAC_ADDR_TYPE_CAM_MAC:
+		{
+			u32 cam_output;
+			u32 upper = (addr[0] << 8) | addr[1];
+			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
+					(addr[4] << 8) | (addr[5]);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"Adding %s address %pM"
+				" at index %d in the CAM.\n",
+				((type ==
+				  MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
+				 "UNICAST"), addr, index);
+
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				(index << MAC_ADDR_IDX_SHIFT) | /* index */
+				type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, lower);
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+				(index << MAC_ADDR_IDX_SHIFT) | /* index */
+				type);	/* type */
+			ql_write32(qdev, MAC_ADDR_DATA, upper);
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
+				(index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				type);	/* type */
+			/* This field should also include the queue id
+			 * and possibly the function id.  Right now we hardcode
+			 * the route field to NIC core.
+			 */
+			if (type == MAC_ADDR_TYPE_CAM_MAC) {
+				cam_output = (CAM_OUT_ROUTE_NIC |
+					(qdev->port << CAM_OUT_FUNC_SHIFT) |
+					(qdev->
+					rss_ring_first_cq_id <<
+					CAM_OUT_CQ_ID_SHIFT));
+				if (qdev->vlgrp)
+					cam_output |= CAM_OUT_RV;
+				/* route to NIC core */
+				ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+			}
+			break;
+		}
+	case MAC_ADDR_TYPE_VLAN:
+		{
+			u32 enable_bit = *((u32 *) &addr[0]);
+			/* For VLAN, the addr actually holds a bit that
+			 * either enables or disables the vlan id we are
+			 * addressing. It's either MAC_ADDR_E on or off.
+			 * That's bit-27 we're talking about.
+			 */
+			QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
+				(enable_bit ? "Adding" : "Removing"),
+				index, (enable_bit ? "to" : "from"));
+
+			status =
+				ql_wait_reg_rdy(qdev,
+				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+			if (status)
+				goto exit;
+			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
+				(index << MAC_ADDR_IDX_SHIFT) |	/* index */
+				type |	/* type */
+					enable_bit);	/* enable/disable */
+			break;
+		}
+	case MAC_ADDR_TYPE_MULTI_FLTR:
+	default:
+		QPRINTK(qdev, IFUP, CRIT,
+			"Address type %d not yet supported.\n", type);
+		status = -EPERM;
+	}
+exit:
+	return status;
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+	int status = 0;
+
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+	if (status)
+		goto exit;
+
+	ql_write32(qdev, RT_IDX,
+		RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
+	if (status)
+		goto exit;
+	*value = ql_read32(qdev, RT_DATA);
+exit:
+	return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes.  Each one can be used
+ * to route different frame types to various inbound queues.  We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+				int enable)
+{
+	int status = -EINVAL; /* Return error if no mask match. */
+	u32 value = 0;
+
+	QPRINTK(qdev, IFUP, DEBUG,
+		"%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing \
+		reg.\n", (enable ? "Adding" : "Removing"),
+		((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
+		((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
+		((index ==
+		  RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
+		((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
+		((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
+		((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
+		((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
+		((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
+		((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
+		((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
+		((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
+		((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
+		((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
+		((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
+		((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
+		((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
+		(enable ? "to" : "from"));
+
+	switch (mask) {
+	case RT_IDX_CAM_HIT:
+		{
+			value = RT_IDX_DST_CAM_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_CAM_HIT_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_PROMISCUOUS_SLOT <<
+				RT_IDX_IDX_SHIFT);/* index */
+			break;
+		}
+	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_ALL_ERR_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_BCAST_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_ALLMULTI_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
+		{
+			value = RT_IDX_DST_CAM_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_MCAST_MATCH_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
+		{
+			value = RT_IDX_DST_RSS | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(RT_IDX_RSS_MATCH_SLOT <<
+				RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	case 0:		/* Clear the E-bit on an entry. */
+		{
+			value = RT_IDX_DST_DFLT_Q | /* dest */
+				RT_IDX_TYPE_NICQ | /* type */
+				(index << RT_IDX_IDX_SHIFT); /* index */
+			break;
+		}
+	default:
+		QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
+			mask);
+		status = -EPERM;
+		goto exit;
+	}
+
+	if (value) {
+		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+		if (status)
+			goto exit;
+		value |= (enable ? RT_IDX_E : 0);
+		ql_write32(qdev, RT_IDX, value);
+		ql_write32(qdev, RT_DATA, enable ? mask : 0);
+	}
+exit:
+	return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented everytime we queue a worker and decremented everytime
+ * a worker finishes.  Once it hits zero we enable the interrupt.
+ */
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	u32 var = 0;
+	unsigned long hw_flags = 0;
+	struct intr_context *ctx = qdev->intr_context + intr;
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
+		/* Always enable if we're MSIX multi interrupts and
+		 * it's not the default (zeroeth) interrupt.
+		 */
+		ql_write32(qdev, INTR_EN,
+				ctx->intr_en_mask);
+		return var;
+	}
+
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if (atomic_dec_and_test(&ctx->irq_cnt)) {
+		ql_write32(qdev, INTR_EN,
+				ctx->intr_en_mask);
+		var = ql_read32(qdev, STS);
+	}
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	return var;
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+	u32 var = 0;
+	struct intr_context *ctx;
+
+	/* HW disables for us if we're MSIX multi interrupts and
+	 * it's not the default (zeroeth) interrupt.
+	 */
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
+		return 0;
+
+	ctx = qdev->intr_context + intr;
+	spin_lock(&qdev->hw_lock);
+	if (!atomic_read(&ctx->irq_cnt)) {
+		ql_write32(qdev, INTR_EN,
+		ctx->intr_dis_mask);
+		var = ql_read32(qdev, STS);
+	}
+	atomic_inc(&ctx->irq_cnt);
+	spin_unlock(&qdev->hw_lock);
+	return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+	int i;
+	for (i = 0; i < qdev->intr_count; i++) {
+		/* The enable call does a atomic_dec_and_test
+		 * and enables only if the result is zero.
+		 * So we precharge it here.
+		 */
+		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
+			i == 0))
+			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+		ql_enable_completion_interrupt(qdev, i);
+	}
+
+}
+
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+	int status, i;
+	u16 csum = 0;
+	__le16 *flash = (__le16 *)&qdev->flash;
+
+	status = strncmp((char *)&qdev->flash, str, 4);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+		return	status;
+	}
+
+	for (i = 0; i < size; i++)
+		csum += le16_to_cpu(*flash++);
+
+	if (csum)
+		QPRINTK(qdev, IFUP, ERR, "Invalid flash checksum, csum = \
+			0x%.04x.\n", csum);
+
+	return csum;
+}
+
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* This data is stored on flash as an array of
+	 * __le32.  Since ql_read32() returns cpu endian
+	 * we need to swap it back.
+	 */
+	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+exit:
+	return status;
+}
+
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+	u32 i, size;
+	int status;
+	__le32 *p = (__le32 *)&qdev->flash;
+	u32 offset;
+	u8 mac_addr[6];
+
+	/* Get flash offset for function and adjust
+	 * for dword access.
+	 */
+	if (!qdev->port)
+		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+	else
+		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+		return -ETIMEDOUT;
+
+	size = sizeof(struct flash_params_8000) / sizeof(u32);
+	for (i = 0; i < size; i++, p++) {
+		status = ql_read_flash_word(qdev, i+offset, p);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+			goto exit;
+		}
+	}
+
+	status = ql_validate_flash(qdev,
+			sizeof(struct flash_params_8000) / sizeof(u16),
+			"8000");
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+		status = -EINVAL;
+		goto exit;
+	}
+
+	/* Extract either manufacturer or BOFM modified
+	 * MAC address.
+	 */
+	if (qdev->flash.flash_params_8000.data_type1 == 2)
+		memcpy(mac_addr,
+			qdev->flash.flash_params_8000.mac_addr1,
+			qdev->ndev->addr_len);
+	else
+		memcpy(mac_addr,
+			qdev->flash.flash_params_8000.mac_addr,
+			qdev->ndev->addr_len);
+
+	if (!is_valid_ether_addr(mac_addr)) {
+		QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+		status = -EINVAL;
+		goto exit;
+	}
+
+	memcpy(qdev->ndev->dev_addr,
+		mac_addr,
+		qdev->ndev->addr_len);
+
+exit:
+	ql_sem_unlock(qdev, SEM_FLASH_MASK);
+	return status;
+}
+
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
+{
+	int i;
+	int status;
+	__le32 *p = (__le32 *)&qdev->flash;
+	u32 offset = 0;
+	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
+
+	/* Second function's parameters follow the first
+	 * function's.
+	 */
+	if (qdev->port)
+		offset = size;
+
+	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+		return -ETIMEDOUT;
+
+	for (i = 0; i < size; i++, p++) {
+		status = ql_read_flash_word(qdev, i+offset, p);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+			goto exit;
+		}
+
+	}
+
+	status = ql_validate_flash(qdev,
+			sizeof(struct flash_params_8012) / sizeof(u16),
+			"8012");
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+		status = -EINVAL;
+		goto exit;
+	}
+
+	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+		status = -EINVAL;
+		goto exit;
+	}
+
+	memcpy(qdev->ndev->dev_addr,
+		qdev->flash.flash_params_8012.mac_addr,
+		qdev->ndev->addr_len);
+
+exit:
+	ql_sem_unlock(qdev, SEM_FLASH_MASK);
+	return status;
+}
+
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		return status;
+	/* write the data to the data reg */
+	ql_write32(qdev, XGMAC_DATA, data);
+	/* trigger the write */
+	ql_write32(qdev, XGMAC_ADDR, reg);
+	return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair.  Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev,
+			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, XGMAC_DATA);
+exit:
+	return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+	int status = 0;
+	u32 hi = 0;
+	u32 lo = 0;
+
+	status = ql_read_xgmac_reg(qdev, reg, &lo);
+	if (status)
+		goto exit;
+
+	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+	if (status)
+		goto exit;
+
+	*data = (u64) lo | ((u64) hi << 32);
+
+exit:
+	return status;
+}
+
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+	int status;
+	status = ql_mb_about_fw(qdev);
+	if (status)
+		goto exit;
+	status = ql_mb_get_fw_state(qdev);
+	if (status)
+		goto exit;
+	/* Wake up a worker to get/set the TX/RX frame sizes. */
+	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+	return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 data;
+
+	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+		/* Another function has the semaphore, so
+		 * wait for the port init bit to come ready.
+		 */
+		QPRINTK(qdev, LINK, INFO,
+			"Another function has the semaphore, so wait for the \
+			port init bit to come ready.\n");
+		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+		if (status) {
+			QPRINTK(qdev, LINK, CRIT,
+				"Port initialize timed out.\n");
+		}
+		return status;
+	}
+
+	QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+	/* Set the core reset. */
+	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+	if (status)
+		goto end;
+	data |= GLOBAL_CFG_RESET;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Clear the core reset and turn on jumbo for receiver. */
+	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
+	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
+	data |= GLOBAL_CFG_TX_STAT_EN;
+	data |= GLOBAL_CFG_RX_STAT_EN;
+	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable transmitter, and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
+	data |= TX_CFG_EN;	/* Enable the transmitter. */
+	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Enable receiver and clear it's reset. */
+	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+	if (status)
+		goto end;
+	data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
+	data |= RX_CFG_EN;	/* Enable the receiver. */
+	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+	if (status)
+		goto end;
+
+	/* Turn on jumbo. */
+	status =
+		ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO |
+					 (0x2580 << 16));
+	if (status)
+		goto end;
+	status =
+		ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+	if (status)
+		goto end;
+
+	/* Signal to the world that the port is enabled.	*/
+	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+	ql_sem_unlock(qdev, qdev->xg_sem_mask);
+	return status;
+}
+
+/* Get the next large buffer. */
+static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+	rx_ring->lbq_curr_idx++;
+	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+		rx_ring->lbq_curr_idx = 0;
+	rx_ring->lbq_free_cnt++;
+	return lbq_desc;
+}
+
+/* Get the next small buffer. */
+static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+	rx_ring->sbq_curr_idx++;
+	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+		rx_ring->sbq_curr_idx = 0;
+	rx_ring->sbq_free_cnt++;
+	return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+	rx_ring->cnsmr_idx++;
+	rx_ring->curr_entry++;
+	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+		rx_ring->cnsmr_idx = 0;
+		rx_ring->curr_entry = rx_ring->cq_base;
+	}
+	prefetch(rx_ring->curr_entry);
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	u32 clean_idx = rx_ring->lbq_clean_idx;
+	u32 start_idx = clean_idx;
+	struct bq_desc *lbq_desc;
+	u64 map;
+	int i;
+
+	while (rx_ring->lbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"lbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			lbq_desc = &rx_ring->lbq[clean_idx];
+			if (lbq_desc->p.lbq_page == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"lbq: getting new page for index %d.\n",
+					lbq_desc->index);
+				lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
+				if (lbq_desc->p.lbq_page == NULL) {
+					rx_ring->lbq_clean_idx = clean_idx;
+					QPRINTK(qdev, DRV, ERR,
+						"Couldn't get a page.\n");
+					return;
+				}
+				map = pci_map_page(qdev->pdev,
+						lbq_desc->p.lbq_page, 0,
+						rx_ring->lbq_buf_map_size,
+						PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(map)) {
+					rx_ring->lbq_clean_idx = clean_idx;
+					put_page(lbq_desc->p.lbq_page);
+					lbq_desc->p.lbq_page = NULL;
+					QPRINTK(qdev, RX_STATUS, ERR,
+						"PCI mapping failed.\n");
+					return;
+				}
+				pci_unmap_addr_set(lbq_desc, mapaddr, map);
+				pci_unmap_len_set(lbq_desc, maplen,
+						rx_ring->lbq_buf_map_size);
+				*lbq_desc->addr = cpu_to_le64(map);
+				lbq_desc->pg_addr =
+					page_address(lbq_desc->p.lbq_page);
+			}
+			clean_idx++;
+			if (clean_idx == rx_ring->lbq_len)
+				clean_idx = 0;
+		}
+
+		rx_ring->lbq_clean_idx = clean_idx;
+		rx_ring->lbq_prod_idx += 16;
+		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+			rx_ring->lbq_prod_idx = 0;
+		rx_ring->lbq_free_cnt -= 16;
+	}
+
+	if (start_idx != clean_idx) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"lbq: updating prod idx = %d.\n",
+			rx_ring->lbq_prod_idx);
+		ql_write_db_reg(rx_ring->lbq_prod_idx,
+				rx_ring->lbq_prod_idx_db_reg);
+	}
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	u32 clean_idx = rx_ring->sbq_clean_idx;
+	u32 start_idx = clean_idx;
+	struct bq_desc *sbq_desc;
+	u64 map;
+	int i;
+
+	while (rx_ring->sbq_free_cnt > 16) {
+		for (i = 0; i < 16; i++) {
+			sbq_desc = &rx_ring->sbq[clean_idx];
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"sbq: try cleaning clean_idx = %d.\n",
+				clean_idx);
+			if (sbq_desc->p.skb == NULL) {
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"sbq: getting new skb for index %d.\n",
+					sbq_desc->index);
+				sbq_desc->p.skb =
+					netdev_alloc_skb(qdev->ndev,
+							rx_ring->sbq_buf_size);
+				if (sbq_desc->p.skb == NULL) {
+					QPRINTK(qdev, PROBE, ERR,
+						"Couldn't get an skb.\n");
+					rx_ring->sbq_clean_idx = clean_idx;
+					return;
+				}
+				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+				map = pci_map_single(qdev->pdev,
+							sbq_desc->p.skb->data,
+							rx_ring->sbq_buf_size,
+							PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(map)) {
+					QPRINTK(qdev, IFUP, ERR,
+						"PCI mapping failed.\n");
+					rx_ring->sbq_clean_idx = clean_idx;
+					dev_kfree_skb_any(sbq_desc->p.skb);
+					sbq_desc->p.skb = NULL;
+					return;
+				}
+				pci_unmap_addr_set(sbq_desc, mapaddr, map);
+				pci_unmap_len_set(sbq_desc, maplen,
+						  rx_ring->sbq_buf_size);
+				*sbq_desc->addr = cpu_to_le64(map);
+			}
+
+			clean_idx++;
+			if (clean_idx == rx_ring->sbq_len)
+				clean_idx = 0;
+		}
+		rx_ring->sbq_clean_idx = clean_idx;
+		rx_ring->sbq_prod_idx += 16;
+		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+			rx_ring->sbq_prod_idx = 0;
+		rx_ring->sbq_free_cnt -= 16;
+	}
+
+	if (start_idx != clean_idx) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"sbq: updating prod idx = %d.\n",
+			rx_ring->sbq_prod_idx);
+		ql_write_db_reg(rx_ring->sbq_prod_idx,
+				rx_ring->sbq_prod_idx_db_reg);
+	}
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+					struct rx_ring *rx_ring)
+{
+	ql_update_sbq(qdev, rx_ring);
+	ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers.  Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+			  struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+	int i;
+	for (i = 0; i < mapped; i++) {
+		if (i == 0 || (i == 7 && mapped > 7)) {
+			/*
+			 * Unmap the skb->data area, or the
+			 * external sglist (AKA the Outbound
+			 * Address List (OAL)).
+			 * If its the zeroeth element, then it's
+			 * the skb->data area.  If it's the 7th
+			 * element and there is more than 6 frags,
+			 * then its an OAL.
+			 */
+			if (i == 7) {
+				QPRINTK(qdev, TX_DONE, DEBUG,
+					"unmapping OAL area.\n");
+			}
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(&tx_ring_desc->map[i],
+							mapaddr),
+					 pci_unmap_len(&tx_ring_desc->map[i],
+							maplen),
+					 PCI_DMA_TODEVICE);
+		} else {
+			QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
+				i);
+			pci_unmap_page(qdev->pdev,
+					pci_unmap_addr(&tx_ring_desc->map[i],
+							mapaddr),
+					pci_unmap_len(&tx_ring_desc->map[i],
+							maplen),
+					PCI_DMA_TODEVICE);
+		}
+	}
+
+}
+
+/* Map the buffers for this transmit.  This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+			struct ob_mac_iocb_req *mac_iocb_ptr,
+			struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+	int len = skb_headlen(skb);
+	dma_addr_t map;
+	int frag_idx, err, map_idx = 0;
+	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+	int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+	if (frag_cnt)
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+	/*
+	 * Map the skb buffer first.
+	 */
+	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+	err = pci_dma_mapping_error(map);
+	if (err) {
+		QPRINTK(qdev, TX_QUEUED, ERR,
+			"PCI mapping failed with error: %d\n", err);
+		return NETDEV_TX_BUSY;
+	}
+
+	tbd->len = cpu_to_le32(len);
+	tbd->addr = cpu_to_le64(map);
+	pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+	pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+	map_idx++;
+
+	/*
+	 * This loop fills the remainder of the 8 address descriptors
+	 * in the IOCB.  If there are more than 7 fragments, then the
+	 * eighth address desc will point to an external list (OAL).
+	 * When this happens, the remainder of the frags will be stored
+	 * in this list.
+	 */
+	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+		tbd++;
+		if (frag_idx == 6 && frag_cnt > 7) {
+			/* Let's tack on an sglist.
+			 * Our control block will now
+			 * look like this:
+			 * iocb->seg[0] = skb->data
+			 * iocb->seg[1] = frag[0]
+			 * iocb->seg[2] = frag[1]
+			 * iocb->seg[3] = frag[2]
+			 * iocb->seg[4] = frag[3]
+			 * iocb->seg[5] = frag[4]
+			 * iocb->seg[6] = frag[5]
+			 * iocb->seg[7] = ptr to OAL (external sglist)
+			 * oal->seg[0] = frag[6]
+			 * oal->seg[1] = frag[7]
+			 * oal->seg[2] = frag[8]
+			 * oal->seg[3] = frag[9]
+			 * oal->seg[4] = frag[10]
+			 *	etc...
+			 */
+			/* Tack on the OAL in the eighth segment of IOCB. */
+			map = pci_map_single(qdev->pdev, tx_ring_desc->oal,
+						sizeof(struct oal),
+						PCI_DMA_TODEVICE);
+		err = pci_dma_mapping_error(map);
+			if (err) {
+				QPRINTK(qdev, TX_QUEUED, ERR,
+					"PCI mapping outbound address list \
+					with error: %d\n", err);
+				goto map_error;
+			}
+
+			tbd->addr = cpu_to_le64(map);
+			/*
+			 * The length is the number of fragments
+			 * that remain to be mapped times the length
+			 * of our sglist (OAL).
+			 */
+			tbd->len =
+				cpu_to_le32((sizeof(struct tx_buf_desc) *
+					 (frag_cnt - frag_idx)) | TX_DESC_C);
+			pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+						map);
+			pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+					  sizeof(struct oal));
+			tbd = (struct tx_buf_desc *)tx_ring_desc->oal;
+			map_idx++;
+		}
+
+		map =
+			pci_map_page(qdev->pdev, frag->page,
+				 frag->page_offset, frag->size,
+				 PCI_DMA_TODEVICE);
+
+	err = pci_dma_mapping_error(map);
+		if (err) {
+			QPRINTK(qdev, TX_QUEUED, ERR,
+				"PCI mapping frags failed with error: %d.\n",
+				err);
+			goto map_error;
+		}
+
+		tbd->addr = cpu_to_le64(map);
+		tbd->len = cpu_to_le32(frag->size);
+		pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+		pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+				  frag->size);
+
+	}
+	/* Save the number of segments we've mapped. */
+	tx_ring_desc->map_cnt = map_idx;
+	/* Terminate the last segment. */
+	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+	return NETDEV_TX_OK;
+
+map_error:
+	/*
+	 * If the first frag mapping failed, then i will be zero.
+	 * This causes the unmap of the skb->data area.  Otherwise
+	 * we pass in the number of frags that mapped successfully
+	 * so they can be umapped.
+	 */
+	ql_unmap_send(qdev, tx_ring_desc, map_idx);
+	return NETDEV_TX_BUSY;
+}
+
+static void ql_realign_skb(struct sk_buff *skb, int len)
+{
+#if 0
+	void *temp_addr = skb->data;
+
+	/* Undo the skb_reserve(skb,32) we did before
+	 * giving to hardware, and realign data on
+	 * a 2-byte boundary.
+	 */
+	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+	memcpy(skb->data, temp_addr, len);
+#endif
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion.  It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+					struct rx_ring *rx_ring,
+					struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct bq_desc *lbq_desc;
+	struct bq_desc *sbq_desc;
+	struct sk_buff *skb = NULL;
+	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+
+	/*
+	 * Handle the header buffer if present.
+	 */
+	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+		ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small \
+			buffer.\n", hdr_len);
+		/*
+		 * Headers fit nicely into a small buffer.
+		 */
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				pci_unmap_addr(sbq_desc, mapaddr),
+				pci_unmap_len(sbq_desc, maplen),
+				PCI_DMA_FROMDEVICE);
+		skb = sbq_desc->p.skb;
+		ql_realign_skb(skb, hdr_len);
+		skb_put(skb, hdr_len);
+		sbq_desc->p.skb = NULL;
+	}
+
+	/*
+	 * Handle the data buffer(s).
+	 */
+	if (unlikely(!length)) {	/* Is there data too? */
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No Data buffer in this packet.\n");
+		return skb;
+	}
+
+	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, data of %d bytes in small, \
+				combine them.\n", length);
+			/*
+			 * Data is less than small buffer size so it's
+			 * stuffed in a small buffer.
+			 * For this case we append the data
+			 * from the "data" small buffer to the "header" small
+			 * buffer.
+			 */
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			pci_dma_sync_single_for_cpu(qdev->pdev,
+							pci_unmap_addr
+							(sbq_desc, mapaddr),
+							pci_unmap_len
+							(sbq_desc, maplen),
+							PCI_DMA_FROMDEVICE);
+			memcpy(skb_put(skb, length),
+				sbq_desc->p.skb->data, length);
+			pci_dma_sync_single_for_device(qdev->pdev,
+							pci_unmap_addr
+							(sbq_desc,
+							mapaddr),
+							pci_unmap_len
+							(sbq_desc,
+							maplen),
+							PCI_DMA_FROMDEVICE);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes in a single small buffer.\n", length);
+			sbq_desc = ql_get_curr_sbuf(rx_ring);
+			skb = sbq_desc->p.skb;
+			ql_realign_skb(skb, length);
+			skb_put(skb, length);
+			pci_unmap_single(qdev->pdev,
+					 pci_unmap_addr(sbq_desc,
+							mapaddr),
+					 pci_unmap_len(sbq_desc,
+							maplen),
+					 PCI_DMA_FROMDEVICE);
+			sbq_desc->p.skb = NULL;
+		}
+	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Header in small, %d bytes in large. Chain \
+				large to small!\n", length);
+			/*
+			 * The data is in a single large buffer.  We
+			 * chain it to the header buffer's skb and let
+			 * it rip.
+			 */
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			pci_unmap_page(qdev->pdev,
+					pci_unmap_addr(lbq_desc,
+							mapaddr),
+					pci_unmap_len(lbq_desc, maplen),
+					PCI_DMA_FROMDEVICE);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Chaining page to skb.\n");
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+						0, length);
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			lbq_desc->p.lbq_page = NULL;
+		} else {
+			/*
+			 * The headers and data are in a single large buffer.
+			 * We copy it to a new skb and let it go. This can
+			 * happen with jumbo mtu on a non-TCP/UDP frame.
+			 */
+			void *vaddr;
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			skb = netdev_alloc_skb(qdev->ndev, length);
+			if (skb == NULL) {
+				QPRINTK(qdev, PROBE, ERR,
+					"No skb available, drop the packet.\n");
+				return NULL;
+			}
+			pci_unmap_page(qdev->pdev,
+					pci_unmap_addr(lbq_desc,
+							mapaddr),
+					pci_unmap_len(lbq_desc, maplen),
+					PCI_DMA_FROMDEVICE);
+			prefetch(lbq_desc->pg_addr);
+			skb_reserve(skb, NET_IP_ALIGN);
+			vaddr = kmap_atomic(lbq_desc->p.lbq_page,
+						KM_SKB_DATA_SOFTIRQ);
+			memcpy(skb_put(skb, ETH_HLEN), vaddr, ETH_HLEN);
+			kunmap_atomic(vaddr,
+					KM_SKB_DATA_SOFTIRQ);
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers and data in large. Chain \
+				page to new skb and pull tail.\n", length);
+			skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
+						ETH_HLEN, length-ETH_HLEN);
+			skb->len += length-ETH_HLEN;
+			skb->data_len += length-ETH_HLEN;
+			skb->truesize += length-ETH_HLEN;
+			lbq_desc->p.lbq_page = NULL;
+		}
+	} else {
+		/*
+		 * The data is in a chain of large buffers
+		 * pointed to by a small buffer.  We loop
+		 * thru and chain them to the our small header
+		 * buffer's skb.
+		 * frags:  There are 18 max frags and our small
+		 *	 buffer will hold 32 of them. The thing is,
+		 *	 we'll use 3 max for our 9000 byte jumbo
+		 *	 frames.  If the MTU goes up we could
+		 *	  eventually be in trouble.
+		 */
+		int size, offset, i = 0;
+		__le64 *bq, bq_array[8];
+		sbq_desc = ql_get_curr_sbuf(rx_ring);
+		pci_unmap_single(qdev->pdev,
+				 pci_unmap_addr(sbq_desc, mapaddr),
+				 pci_unmap_len(sbq_desc, maplen),
+				 PCI_DMA_FROMDEVICE);
+		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+			/*
+			 * This is an non TCP/UDP IP frame, so
+			 * the headers aren't split into a small
+			 * buffer.  We have to use the small buffer
+			 * that contains our sg list as our skb to
+			 * send upstairs. Copy the sg list here to
+			 * a local buffer and use it to find the
+			 * pages to chain.
+			 */
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"%d bytes of headers & data in chain of \
+				large.\n", length);
+			skb = sbq_desc->p.skb;
+			bq = &bq_array[0];
+			memcpy(bq, skb->data, sizeof(bq_array));
+			sbq_desc->p.skb = NULL;
+			skb_reserve(skb, NET_IP_ALIGN);
+		} else {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Headers in small, %d bytes of data in chain \
+				of large.\n", length);
+			bq = (__le64 *)sbq_desc->p.skb->data;
+		}
+		while (length > 0) {
+			lbq_desc = ql_get_curr_lbuf(rx_ring);
+			pci_unmap_page(qdev->pdev,
+					pci_unmap_addr(lbq_desc,
+							mapaddr),
+					pci_unmap_len(lbq_desc,
+							maplen),
+					PCI_DMA_FROMDEVICE);
+			if (i == 0) {
+				/* If this is the first large buffer then
+				 * we copy a piece to the skb-data section
+				 * so eth_type_trans() will have something
+				 * to read.
+				 */
+				void *vaddr;
+				vaddr = kmap_atomic(lbq_desc->p.lbq_page,
+							KM_SKB_DATA_SOFTIRQ);
+				memcpy(skb_put(skb, ETH_HLEN), vaddr, ETH_HLEN);
+				kunmap_atomic(vaddr,
+					KM_SKB_DATA_SOFTIRQ);
+				size = rx_ring->lbq_buf_map_size - ETH_HLEN;
+				offset = ETH_HLEN;
+				length -= ETH_HLEN;
+			} else {
+				size = (length < rx_ring->lbq_buf_map_size) ?
+					 length : rx_ring->lbq_buf_map_size;
+				offset = 0;
+			}
+
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Adding page %d to skb for %d bytes.\n",
+				i, size);
+			skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
+						offset, size);
+			skb->len += size;
+			skb->data_len += size;
+			skb->truesize += size;
+			length -= size;
+			lbq_desc->p.lbq_page = NULL;
+			i++;
+		}
+
+	}
+	return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+					struct rx_ring *rx_ring,
+					struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+	struct net_device *ndev = qdev->ndev;
+	struct sk_buff *skb = NULL;
+
+	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+	if (unlikely(!skb)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"No skb available, drop packet.\n");
+		return;
+	}
+
+	/* Frame error, so drop the packet. */
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+		QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+					ib_mac_rsp->flags2);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* loopback self test for ethtool */
+	if (test_bit(QL_TESTING, &qdev->flags)) {
+		ql_check_receive_frame(skb);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* The max framesize filter on this chip is set higher than
+	 * MTU since FCoE uses 2k frames.
+	 */
+	if (skb->len > ndev->mtu + ETH_HLEN) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	prefetch(skb->data);
+	skb->dev = ndev;
+	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+		QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+			(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+			IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+	}
+	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+		QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+
+	skb->protocol = eth_type_trans(skb, ndev);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* If rx checksum is on, and there are no
+	 * csum or frame errors.
+	 */
+	if (qdev->rx_csum &&
+		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+		/* TCP frame. */
+		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+					"TCP checksum done!\n");
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && \
+				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+			/* Unfragmented ipv4 UDP frame. */
+			struct iphdr *iph = (struct iphdr *) skb->data;
+			if (!(iph->frag_off &
+				cpu_to_be16(IP_MF|IP_OFFSET))) {
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+						"TCP checksum done!\n");
+			}
+		}
+	}
+	qdev->stats.rx_packets++;
+	qdev->stats.rx_bytes += skb->len;
+
+	if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a VLAN packet upstream.\n");
+		vlan_hwaccel_rx(skb, qdev->vlgrp,
+				le16_to_cpu(ib_mac_rsp->vlan_id));
+	} else {
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"Passing a normal packet upstream.\n");
+		netif_rx(skb);
+	}
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+					struct ob_mac_iocb_rsp *mac_rsp)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+
+	QL_DUMP_OB_MAC_RSP(mac_rsp);
+	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+	qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
+	qdev->stats.tx_packets++;
+	dev_kfree_skb_any(tx_ring_desc->skb);
+	tx_ring_desc->skb = NULL;
+
+	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
+					OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_P |
+					OB_MAC_IOCB_RSP_B))) {
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Total descriptor length did not match \
+				transfer length.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too short to be legal, not sent.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"Frame too long, but sent anyway.\n");
+		}
+		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+			QPRINTK(qdev, TX_DONE, WARNING,
+				"PCI backplane error. Frame not sent.\n");
+		}
+	}
+	atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+	ql_link_off(qdev);
+	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+	ql_link_off(qdev);
+	ql_disable_interrupts(qdev);
+	/* Clear adapter up bit to signal the recovery
+	 * process that it shouldn't kill the reset worker
+	 * thread
+	 */
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+					struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+	switch (ib_ae_rsp->event) {
+	case MGMT_ERR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"Management Processor Fatal Error.\n");
+		ql_queue_fw_error(qdev);
+		return;
+
+	case CAM_LOOKUP_ERR_EVENT:
+		QPRINTK(qdev, LINK, ERR,
+			"Multiple CAM hits lookup occurred.\n");
+		QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+		ql_queue_asic_error(qdev);
+		return;
+
+	case SOFT_ECC_ERROR_EVENT:
+		QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+		ql_queue_asic_error(qdev);
+		break;
+
+	case PCI_ERR_ANON_BUF_RD:
+		QPRINTK(qdev, RX_ERR, ERR,
+			"PCI error occurred when reading anonymous buffers \
+			from rx_ring %d.\n", ib_ae_rsp->q_id);
+		ql_queue_asic_error(qdev);
+		break;
+
+	default:
+		QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
+			ib_ae_rsp->event);
+		ql_queue_asic_error(qdev);
+		break;
+	}
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ob_mac_iocb_rsp *net_rsp = NULL;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+
+		case OPCODE_OB_MAC_TSO_IOCB:
+		case OPCODE_OB_MAC_IOCB:
+			ql_process_mac_tx_intr(qdev, net_rsp);
+			break;
+		default:
+			QPRINTK(qdev, RX_STATUS, DEBUG,
+				"Hit default case, not handled!	\
+				dropping the packet, opcode = %x.\n",
+				net_rsp->opcode);
+		}
+		ql_update_cq(rx_ring);
+		count++;
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	}
+	ql_write_cq_idx(rx_ring);
+	if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
+		struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+		if (atomic_read(&tx_ring->queue_stopped) &&
+			(atomic_read(&tx_ring->tx_count) >
+					(tx_ring->wq_len / 4)))
+			/*
+			 * The queue got stopped because the tx_ring was full.
+			 * Wake it up, because it's now at least 25% empty.
+			 */
+			netif_wake_queue(qdev->ndev);
+	}
+
+	return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+	struct ql_adapter *qdev = rx_ring->qdev;
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+	struct ql_net_rsp_iocb *net_rsp;
+	int count = 0;
+
+	/* While there are entries in the completion queue. */
+	while (prod != rx_ring->cnsmr_idx) {
+
+		QPRINTK(qdev, RX_STATUS, DEBUG,
+			"cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
+			prod, rx_ring->cnsmr_idx);
+
+		net_rsp = rx_ring->curr_entry;
+		rmb();
+		switch (net_rsp->opcode) {
+		case OPCODE_IB_MAC_IOCB:
+			ql_process_mac_rx_intr(qdev, rx_ring,
+						(struct ib_mac_iocb_rsp *)
+						net_rsp);
+			break;
+
+		case OPCODE_IB_AE_IOCB:
+			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+						net_rsp);
+			break;
+		default:
+			{
+				QPRINTK(qdev, RX_STATUS, DEBUG,
+					"Hit default case, not handled!	\
+					dropping the packet, opcode = %x.\n",
+					net_rsp->opcode);
+			}
+		}
+		ql_update_cq(rx_ring);
+		count++;
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+		if (count == budget)
+			break;
+	}
+	ql_update_buffer_queues(qdev, rx_ring);
+	ql_write_cq_idx(rx_ring);
+	return count;
+}
+
+static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	qdev->vlgrp = grp;
+	if (grp) {
+		QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+				NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+	} else {
+		QPRINTK(qdev, IFUP, DEBUG,
+			"Turning off VLAN in NIC_RCV_CFG.\n");
+		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+	}
+}
+
+static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = MAC_ADDR_E;
+	int status;
+	unsigned long hw_flags = 0;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return;
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if (ql_set_mac_addr_reg
+		(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+	}
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	u32 enable_bit = 0;
+	unsigned long hw_flags = 0;
+	int status;
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return;
+
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if (ql_set_mac_addr_reg
+		(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+	}
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
+static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	ql_clean_outbound_rx_ring(rx_ring);
+	ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
+	return IRQ_HANDLED;
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+
+	netif_rx_schedule(rx_ring->dummy_netdev);
+	return IRQ_HANDLED;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+	struct rx_ring *rx_ring = dev_id;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+	u32 var;
+	int i;
+	int work_done = 0;
+
+	spin_lock(&qdev->hw_lock);
+	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
+		QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+		spin_unlock(&qdev->hw_lock);
+		return IRQ_NONE;
+	}
+	spin_unlock(&qdev->hw_lock);
+
+	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+
+	/*
+	 * Check for fatal error.
+	 */
+	if (var & STS_FE) {
+		ql_queue_asic_error(qdev);
+		QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+		var = ql_read32(qdev, ERR_STS);
+		QPRINTK(qdev, INTR, ERR,
+			"Resetting chip. Error Status Register = 0x%x\n", var);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Check MPI processor activity.
+	 */
+	if (var & STS_PI) {
+		/*
+		 * We've got an async event or mailbox completion.
+		 * Handle it and clear the source of the interrupt.
+		 */
+		QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		queue_delayed_work(qdev->workqueue,
+					&qdev->mpi_work, 0);
+		work_done++;
+	}
+
+	/*
+	 * Check the default queue and wake handler if active.
+	 */
+	rx_ring = &qdev->rx_ring[0];
+	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
+		QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
+		ql_disable_completion_interrupt(qdev, intr_context->intr);
+		/* Default inbound completion, schedule NAPI processing */
+		netif_rx_schedule(rx_ring->dummy_netdev);
+		work_done++;
+	}
+
+	if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		/*
+		 * Start the DPC for each active queue.
+		 */
+		for (i = 1; i < qdev->rx_ring_count; i++) {
+			rx_ring = &qdev->rx_ring[i];
+			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+				rx_ring->cnsmr_idx) {
+				QPRINTK(qdev, INTR, INFO,
+					"Waking handler for rx_ring[%d].\n", i);
+				ql_disable_completion_interrupt(qdev,
+								intr_context->
+								intr);
+				if (i < qdev->rss_ring_first_cq_id) {
+					/* No NAPI for outbound completions */
+					ql_clean_outbound_rx_ring(rx_ring);
+					ql_enable_completion_interrupt(
+						rx_ring->qdev, rx_ring->irq);
+				} else {
+
+					/* Inbound completion,
+					 * schedule NAPI processing
+					 */
+					netif_rx_schedule(
+							rx_ring->dummy_netdev);
+				}
+
+				work_done++;
+			}
+		}
+	}
+	ql_enable_completion_interrupt(qdev, intr_context->intr);
+	return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+	if (skb_is_gso(skb)) {
+		int err;
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+		mac_iocb_ptr->total_hdrs_len =
+			cpu_to_le16(skb_transport_offset(skb) +
+					 tcp_hdrlen(skb));
+		mac_iocb_ptr->net_trans_offset =
+			cpu_to_le16(skb_network_offset(skb) |
+				skb_transport_offset(skb)
+				<< OB_MAC_TRANSPORT_HDR_SHIFT);
+		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+		if (likely(skb->protocol == htons(ETH_P_IP))) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->check = 0;
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						&ipv6_hdr(skb)->daddr,
+						0, IPPROTO_TCP, 0);
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+				struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+	int len;
+	struct iphdr *iph = ip_hdr(skb);
+	__sum16 *check;
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+	mac_iocb_ptr->net_trans_offset =
+		cpu_to_le16(skb_network_offset(skb) |
+		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+	if (likely(iph->protocol == IPPROTO_TCP)) {
+		check = &(tcp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+		mac_iocb_ptr->total_hdrs_len =
+			cpu_to_le16(skb_transport_offset(skb) +
+				(tcp_hdr(skb)->doff << 2));
+	} else {
+		check = &(udp_hdr(skb)->check);
+		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+		mac_iocb_ptr->total_hdrs_len =
+			cpu_to_le16(skb_transport_offset(skb) +
+				sizeof(struct udphdr));
+	}
+	*check = ~csum_tcpudp_magic(iph->saddr,
+					iph->daddr, len, iph->protocol, 0);
+}
+
+int qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int tso;
+	struct tx_ring *tx_ring;
+	u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
+
+	tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+		QPRINTK(qdev, TX_QUEUED, INFO,
+			"shutting down tx queue %d du to lack of resources.\n",
+			tx_ring_idx);
+		netif_stop_queue(ndev);
+		atomic_inc(&tx_ring->queue_stopped);
+		return NETDEV_TX_BUSY;
+	}
+	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+	mac_iocb_ptr = tx_ring_desc->queue_entry;
+	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
+
+	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+	mac_iocb_ptr->tid = tx_ring_desc->index;
+	/* We use the upper 32-bits to store the tx queue for this IO.
+	 * When we get the completion we can use it to establish the context.
+	 */
+	mac_iocb_ptr->txq_idx = tx_ring_idx;
+	tx_ring_desc->skb = skb;
+
+	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+	if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
+		QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
+			vlan_tx_tag_get(skb));
+		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+	}
+	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+		ql_hw_csum_setup(skb,
+				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+	}
+	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc)
+				!= NETDEV_TX_OK) {
+		QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
+		return NETDEV_TX_BUSY;
+	}
+	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+	tx_ring->prod_idx++;
+	if (tx_ring->prod_idx == tx_ring->wq_len)
+		tx_ring->prod_idx = 0;
+	wmb();
+
+	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+	ndev->trans_start = jiffies;
+	QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
+		tx_ring->prod_idx, skb->len);
+
+	atomic_dec(&tx_ring->tx_count);
+	return NETDEV_TX_OK;
+}
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+	if (qdev->rx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+					PAGE_SIZE,
+					qdev->rx_ring_shadow_reg_area,
+					qdev->rx_ring_shadow_reg_dma);
+		qdev->rx_ring_shadow_reg_area = NULL;
+	}
+	if (qdev->tx_ring_shadow_reg_area) {
+		pci_free_consistent(qdev->pdev,
+					PAGE_SIZE,
+					qdev->tx_ring_shadow_reg_area,
+					qdev->tx_ring_shadow_reg_dma);
+		qdev->tx_ring_shadow_reg_area = NULL;
+	}
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+	qdev->rx_ring_shadow_reg_area =
+		pci_alloc_consistent(qdev->pdev,
+				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
+	if (qdev->rx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of RX shadow space failed.\n");
+		return -ENOMEM;
+	}
+	memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
+	qdev->tx_ring_shadow_reg_area =
+		pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
+				 &qdev->tx_ring_shadow_reg_dma);
+	if (qdev->tx_ring_shadow_reg_area == NULL) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Allocation of TX shadow space failed.\n");
+		goto err_wqp_sh_area;
+	}
+	memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
+	return 0;
+
+err_wqp_sh_area:
+	pci_free_consistent(qdev->pdev,
+				PAGE_SIZE,
+				qdev->rx_ring_shadow_reg_area,
+				qdev->rx_ring_shadow_reg_dma);
+	return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	int i;
+	struct ob_mac_iocb_req *mac_iocb_ptr;
+
+	mac_iocb_ptr = tx_ring->wq_base;
+	tx_ring_desc = tx_ring->q;
+	for (i = 0; i < tx_ring->wq_len; i++) {
+		tx_ring_desc->index = i;
+		tx_ring_desc->skb = NULL;
+		tx_ring_desc->queue_entry = mac_iocb_ptr;
+		mac_iocb_ptr++;
+		tx_ring_desc++;
+	}
+	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+	atomic_set(&tx_ring->queue_stopped, 0);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	int i;
+	struct tx_ring_desc *tx_ring_desc;
+	if (tx_ring->wq_base) {
+		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+					tx_ring->wq_base, tx_ring->wq_base_dma);
+		tx_ring->wq_base = NULL;
+	}
+
+	tx_ring_desc = tx_ring->q;
+	for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+		if (tx_ring_desc->oal) {
+			kfree(tx_ring_desc->oal);
+			tx_ring_desc->oal = NULL;
+			tx_ring_desc->map = NULL;
+		}
+	}
+	kfree(tx_ring->q);
+	tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+				 struct tx_ring *tx_ring)
+{
+	struct tx_ring_desc *tx_ring_desc;
+	int i;
+	tx_ring->wq_base =
+		pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+				 &tx_ring->wq_base_dma);
+
+	if ((tx_ring->wq_base == NULL)
+		|| tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
+		if (tx_ring->wq_base) {
+			pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+				tx_ring->wq_base, tx_ring->wq_base_dma);
+			tx_ring->wq_base = NULL;
+		}
+		QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+	tx_ring->q =
+		kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc),
+				GFP_KERNEL);
+	if (tx_ring->q == NULL)
+		goto err;
+	else {
+		memset(tx_ring->q, 0, tx_ring->wq_len *
+				sizeof(struct tx_ring_desc));
+		tx_ring_desc = tx_ring->q;
+		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+			/* Allocate a large enough structure to hold
+			 * oal and map
+			 */
+			tx_ring_desc->oal = kmalloc(sizeof(struct oal) +
+						(sizeof(struct map_list) *
+						(MAX_SKB_FRAGS + 1)),
+						GFP_KERNEL);
+			if (tx_ring_desc->oal) {
+				tx_ring_desc->map = (struct map_list *)
+						(((u8 *)tx_ring_desc->oal) +
+						sizeof(struct oal));
+			} else
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+				tx_ring->wq_base, tx_ring->wq_base_dma);
+	tx_ring_desc = tx_ring->q;
+	for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
+		if (tx_ring_desc->oal) {
+			kfree(tx_ring_desc->oal);
+			tx_ring_desc->oal = NULL;
+			tx_ring_desc->map = NULL;
+		}
+	}
+	if (tx_ring->q) {
+		kfree(tx_ring->q);
+		tx_ring->q = NULL;
+	}
+	return -ENOMEM;
+}
+
+static void ql_free_lbq_buffers(struct ql_adapter *qdev,
+					struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		if (lbq_desc && lbq_desc->p.lbq_page) {
+			pci_unmap_page(qdev->pdev,
+					pci_unmap_addr(lbq_desc, mapaddr),
+					pci_unmap_len(lbq_desc, maplen),
+					PCI_DMA_FROMDEVICE);
+			put_page(lbq_desc->p.lbq_page);
+			lbq_desc->p.lbq_page = NULL;
+		}
+	}
+}
+
+static void ql_free_sbq_buffers(struct ql_adapter *qdev,
+					struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		if (sbq_desc && sbq_desc->p.skb) {
+			pci_unmap_single(qdev->pdev,
+					pci_unmap_addr(sbq_desc, mapaddr),
+					pci_unmap_len(sbq_desc, maplen),
+					PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(sbq_desc->p.skb);
+			sbq_desc->p.skb = NULL;
+		}
+	}
+}
+
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+	int i;
+	struct rx_ring *rx_ring;
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		if (rx_ring->lbq)
+			ql_free_lbq_buffers(qdev, rx_ring);
+		if (rx_ring->sbq)
+			ql_free_sbq_buffers(qdev, rx_ring);
+	}
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+	struct rx_ring *rx_ring;
+	int i;
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		if (rx_ring->type != TX_Q)
+			ql_update_buffer_queues(qdev, rx_ring);
+	}
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *lbq_desc;
+	__le64 *bq = rx_ring->lbq_base;
+
+	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+	for (i = 0; i < rx_ring->lbq_len; i++) {
+		lbq_desc = &rx_ring->lbq[i];
+		memset(lbq_desc, 0, sizeof(*lbq_desc));
+		lbq_desc->index = i;
+		lbq_desc->addr = bq;
+		bq++;
+	}
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
+				struct rx_ring *rx_ring)
+{
+	int i;
+	struct bq_desc *sbq_desc;
+	__le64 *bq = rx_ring->sbq_base;
+
+	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
+	for (i = 0; i < rx_ring->sbq_len; i++) {
+		sbq_desc = &rx_ring->sbq[i];
+		memset(sbq_desc, 0, sizeof(*sbq_desc));
+		sbq_desc->index = i;
+		sbq_desc->addr = bq;
+		bq++;
+	}
+}
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+	/* Free the small buffer queue. */
+	if (rx_ring->sbq_base) {
+		pci_free_consistent(qdev->pdev,
+				rx_ring->sbq_size,
+				rx_ring->sbq_base, rx_ring->sbq_base_dma);
+		rx_ring->sbq_base = NULL;
+	}
+
+	/* Free the small buffer queue control blocks. */
+	kfree(rx_ring->sbq);
+	rx_ring->sbq = NULL;
+
+	/* Free the large buffer queue. */
+	if (rx_ring->lbq_base) {
+		pci_free_consistent(qdev->pdev,
+				rx_ring->lbq_size,
+				rx_ring->lbq_base, rx_ring->lbq_base_dma);
+		rx_ring->lbq_base = NULL;
+	}
+
+	/* Free the large buffer queue control blocks. */
+	kfree(rx_ring->lbq);
+	rx_ring->lbq = NULL;
+
+	/* Free the rx queue. */
+	if (rx_ring->cq_base) {
+		pci_free_consistent(qdev->pdev,
+				rx_ring->cq_size,
+				rx_ring->cq_base, rx_ring->cq_base_dma);
+		rx_ring->cq_base = NULL;
+	}
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+				 struct rx_ring *rx_ring)
+{
+
+	/*
+	 * Allocate the completion queue for this rx_ring.
+	 */
+	rx_ring->cq_base =
+		pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+				 &rx_ring->cq_base_dma);
+
+	if (rx_ring->cq_base == NULL) {
+		QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	if (rx_ring->sbq_len) {
+		/*
+		 * Allocate small buffer queue.
+		 */
+		rx_ring->sbq_base =
+			pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+					 &rx_ring->sbq_base_dma);
+
+		if (rx_ring->sbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+
+		/*
+		 * Allocate small buffer queue control blocks.
+		 */
+		rx_ring->sbq =
+			kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
+				GFP_KERNEL);
+		if (rx_ring->sbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Small buffer queue control block allocation \
+				failed.\n");
+			goto err_mem;
+		}
+
+		ql_init_sbq_ring(qdev, rx_ring);
+	}
+
+	if (rx_ring->lbq_len) {
+		/*
+		 * Allocate large buffer queue.
+		 */
+		rx_ring->lbq_base =
+			pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+					 &rx_ring->lbq_base_dma);
+
+		if (rx_ring->lbq_base == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue allocation failed.\n");
+			goto err_mem;
+		}
+		/*
+		 * Allocate large buffer queue control blocks.
+		 */
+		rx_ring->lbq =
+			kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
+				GFP_KERNEL);
+		if (rx_ring->lbq == NULL) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Large buffer queue control block allocation \
+				failed.\n");
+			goto err_mem;
+		}
+
+		ql_init_lbq_ring(qdev, rx_ring);
+	}
+
+	return 0;
+
+err_mem:
+	ql_free_rx_resources(qdev, rx_ring);
+	return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+	struct tx_ring *tx_ring;
+	struct tx_ring_desc *tx_ring_desc;
+	int i, j;
+
+	/*
+	 * Loop through all queues and free
+	 * any resources.
+	 */
+	for (j = 0; j < qdev->tx_ring_count; j++) {
+		tx_ring = &qdev->tx_ring[j];
+		for (i = 0; i < tx_ring->wq_len; i++) {
+			tx_ring_desc = &tx_ring->q[i];
+			if (tx_ring_desc && tx_ring_desc->skb) {
+				QPRINTK(qdev, IFDOWN, ERR,
+					"Freeing lost SKB %p, from queue %d, \
+					index %d.\n", tx_ring_desc->skb, j,
+					tx_ring_desc->index);
+				ql_unmap_send(qdev, tx_ring_desc,
+						tx_ring_desc->map_cnt);
+				dev_kfree_skb(tx_ring_desc->skb);
+				tx_ring_desc->skb = NULL;
+			}
+		}
+	}
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	for (i = 0; i < qdev->tx_ring_count; i++)
+		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+	for (i = 0; i < qdev->rx_ring_count; i++)
+		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+	ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+	int i;
+
+	/* Allocate space for our shadow registers and such. */
+	if (ql_alloc_shadow_space(qdev))
+		return -ENOMEM;
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"RX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	/* Allocate tx queue resources */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+			QPRINTK(qdev, IFUP, ERR,
+				"TX resource allocation failed.\n");
+			goto err_mem;
+		}
+	}
+	return 0;
+
+err_mem:
+	ql_free_mem_resources(qdev);
+	return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+	struct cqicb *cqicb = &rx_ring->cqicb;
+	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+	void __iomem *doorbell_area =
+		qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+	int err = 0;
+	u16 bq_len;
+	u64 tmp;
+	__le64 *base_indirect_ptr;
+	int page_entries;
+
+	/* Set up the shadow registers for this ring. */
+	rx_ring->prod_idx_sh_reg = shadow_reg;
+	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+	shadow_reg += sizeof(u64);
+	shadow_reg_dma += sizeof(u64);
+	rx_ring->lbq_base_indirect = shadow_reg;
+	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+
+	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+
+	rx_ring->sbq_base_indirect = shadow_reg;
+	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+	/* PCI doorbell mem area + 0x00 for consumer index register */
+	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+	rx_ring->cnsmr_idx = 0;
+	rx_ring->curr_entry = rx_ring->cq_base;
+
+	/* PCI doorbell mem area + 0x04 for valid register */
+	rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/* PCI doorbell mem area + 0x18 for large buffer consumer */
+	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+
+	/* PCI doorbell mem area + 0x1c */
+	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+
+	memset((void *)cqicb, 0, sizeof(struct cqicb));
+	cqicb->msix_vect = rx_ring->irq;
+
+	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
+	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+
+	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
+
+	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
+
+	/*
+	 * Set up the control block load flags.
+	 */
+	cqicb->flags = FLAGS_LC |	/* Load queue base address */
+		FLAGS_LV;		/* Load MSI-X vector */
+
+	if (rx_ring->lbq_len) {
+		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
+		tmp = (u64)rx_ring->lbq_base_dma;;
+		base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
+		page_entries = 0;
+		do {
+			*base_indirect_ptr = cpu_to_le64(tmp);
+			tmp += DB_PAGE_SIZE;
+			base_indirect_ptr++;
+			page_entries++;
+		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+
+		cqicb->lbq_addr =
+			cpu_to_le64(rx_ring->lbq_base_indirect_dma);
+		bq_len = (rx_ring->lbq_buf_map_size == 65536) ? 0 :
+			(u16) rx_ring->lbq_buf_map_size;
+		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
+		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
+			(u16) rx_ring->lbq_len;
+		cqicb->lbq_len = cpu_to_le16(bq_len);
+		rx_ring->lbq_prod_idx = 0;
+		rx_ring->lbq_curr_idx = 0;
+		rx_ring->lbq_clean_idx = 0;
+		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
+	}
+	if (rx_ring->sbq_len) {
+		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
+		tmp = (u64)rx_ring->sbq_base_dma;;
+		base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
+		page_entries = 0;
+		do {
+			*base_indirect_ptr = cpu_to_le64(tmp);
+			tmp += DB_PAGE_SIZE;
+			base_indirect_ptr++;
+			page_entries++;
+		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+
+		cqicb->sbq_addr =
+			cpu_to_le64(rx_ring->sbq_base_indirect_dma);
+		cqicb->sbq_buf_size =
+			cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
+			(u16) rx_ring->sbq_len;
+		cqicb->sbq_len = cpu_to_le16(bq_len);
+		rx_ring->sbq_prod_idx = 0;
+		rx_ring->sbq_curr_idx = 0;
+		rx_ring->sbq_clean_idx = 0;
+		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
+	}
+	switch (rx_ring->type) {
+	case TX_Q:
+		cqicb->flags |= FLAGS_LI;	/* Load irq delay values */
+		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+		break;
+	case DEFAULT_Q:
+		cqicb->irq_delay = 0;
+		cqicb->pkt_delay = 0;
+		break;
+	case RX_Q:
+		/* Inbound completion handling rx_rings run in
+		 * separate NAPI contexts.
+		 */
+		cqicb->flags |= FLAGS_LI;
+		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+		break;
+	default:
+		QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
+			rx_ring->type);
+	}
+	QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
+	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+				CFG_LCQ, rx_ring->cq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+		return err;
+	}
+	return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+	struct wqicb *wqicb = (struct wqicb *)tx_ring;
+	void __iomem *doorbell_area =
+		qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+		(tx_ring->wq_id * sizeof(u64));
+	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+		(tx_ring->wq_id * sizeof(u64));
+	int err = 0;
+
+	/*
+	 * Assign doorbell registers for this tx_ring.
+	 */
+	/* TX PCI doorbell mem area for tx producer index */
+	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+	tx_ring->prod_idx = 0;
+	/* TX PCI doorbell mem area + 0x04 */
+	tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+	/*
+	 * Assign shadow registers for this tx_ring.
+	 */
+	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+					Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+	wqicb->rid = 0;
+	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
+
+	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
+
+	ql_init_tx_ring(qdev, tx_ring);
+
+	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
+				(u16) tx_ring->wq_id);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+		return err;
+	}
+	QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
+	return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+		pci_disable_msix(qdev->pdev);
+		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+		kfree(qdev->msi_x_entry);
+		qdev->msi_x_entry = NULL;
+	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+		pci_disable_msi(qdev->pdev);
+		clear_bit(QL_MSI_ENABLED, &qdev->flags);
+	}
+}
+
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+	int i;
+
+	qdev->intr_count = 1;
+	/* Get the MSIX vectors. */
+	if (qlge_irq_type == MSIX_IRQ) {
+		/* Try to alloc space for the msix struct,
+		 * if it fails then go to MSI/legacy.
+		 */
+		qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
+						sizeof(struct msix_entry),
+						GFP_KERNEL);
+		if (!qdev->msi_x_entry) {
+			qlge_irq_type = MSI_IRQ;
+			goto msi;
+		}
+
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->msi_x_entry[i].entry = i;
+
+		if (!pci_enable_msix
+			(qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
+			set_bit(QL_MSIX_ENABLED, &qdev->flags);
+			qdev->intr_count = qdev->rx_ring_count;
+			QPRINTK(qdev, IFUP, INFO,
+				"MSI-X Enabled, got %d vectors.\n",
+				qdev->intr_count);
+			return;
+		} else {
+			kfree(qdev->msi_x_entry);
+			qdev->msi_x_entry = NULL;
+			QPRINTK(qdev, IFUP, WARNING,
+				"No MSI-X, trying MSI.\n");
+			qlge_irq_type = MSI_IRQ;
+		}
+	}
+msi:
+	if (qlge_irq_type == MSI_IRQ) {
+		if (!pci_enable_msi(qdev->pdev)) {
+			set_bit(QL_MSI_ENABLED, &qdev->flags);
+			QPRINTK(qdev, IFUP, INFO,
+				"Running with MSI interrupts.\n");
+			return;
+		}
+	}
+	qlge_irq_type = LEG_IRQ;
+	QPRINTK(qdev, IFUP, INFO, "Running with legacy interrupts.\n");
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+	int i = 0;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_enable_msix(qdev);
+
+	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+		/* Each rx_ring has it's
+		 * own intr_context since we have separate
+		 * vectors for each queue.
+		 * This only true when MSI-X is enabled.
+		 */
+		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+			qdev->rx_ring[i].irq = i;
+			intr_context->intr = i;
+			intr_context->qdev = qdev;
+			/*
+			 * We set up each vectors enable/disable/read bits so
+			 * there's no bit/mask calculations in the critical
+			 * path.
+			 */
+			intr_context->intr_en_mask =
+				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+				INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
+				INTR_EN_IHD | i;
+			intr_context->intr_dis_mask =
+				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+				INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+				INTR_EN_IHD | i;
+			intr_context->intr_read_mask =
+				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+				INTR_EN_TYPE_READ | INTR_EN_IHD_MASK |
+				INTR_EN_IHD | i;
+
+			if (i == 0) {
+				/*
+				 * Default queue handles bcast/mcast plus
+				 * async events.  Needs buffers.
+				 */
+				intr_context->handler = qlge_isr;
+				sprintf(intr_context->name, "%s-default-queue",
+					qdev->ndev->name);
+			} else if (i < qdev->rss_ring_first_cq_id) {
+				/*
+				 * Outbound queue is for outbound
+				 * completions only.
+				 */
+				intr_context->handler = qlge_msix_tx_isr;
+				sprintf(intr_context->name, "%s-tx-%d",
+					qdev->ndev->name, i);
+			} else {
+				/*
+				 * Inbound queues handle unicast frames only.
+				 */
+				intr_context->handler = qlge_msix_rx_isr;
+				sprintf(intr_context->name, "%s-rx-%d",
+					qdev->ndev->name, i);
+			}
+		}
+	} else {
+		/*
+		 * All rx_rings use the same intr_context since
+		 * there is only one vector.
+		 */
+		intr_context->intr = 0;
+		intr_context->qdev = qdev;
+		/*
+		 * We set up each vectors enable/disable/read bits so
+		 * there's no bit/mask calculations in the critical path.
+		 */
+		intr_context->intr_en_mask =
+			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			INTR_EN_TYPE_ENABLE;
+		intr_context->intr_dis_mask =
+			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			INTR_EN_TYPE_DISABLE;
+		intr_context->intr_read_mask =
+			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+			INTR_EN_TYPE_READ;
+		/*
+		 * Single interrupt means one handler for all rings.
+		 */
+		intr_context->handler = qlge_isr;
+		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+		for (i = 0; i < qdev->rx_ring_count; i++)
+			qdev->rx_ring[i].irq = 0;
+	}
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+	int i;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		if (intr_context->hooked) {
+			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+				free_irq(qdev->msi_x_entry[i].vector,
+					 &qdev->rx_ring[i]);
+				QPRINTK(qdev, IFDOWN, DEBUG,
+					"freeing msix interrupt %d.\n", i);
+			} else {
+				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+				QPRINTK(qdev, IFDOWN, DEBUG,
+					"freeing msi interrupt %d.\n", i);
+			}
+		}
+	}
+	ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+	int i;
+	int status = 0;
+	struct pci_dev *pdev = qdev->pdev;
+	struct intr_context *intr_context = &qdev->intr_context[0];
+
+	ql_resolve_queues_to_irqs(qdev);
+
+	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+		atomic_set(&intr_context->irq_cnt, 0);
+		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+			status = request_irq(qdev->msi_x_entry[i].vector,
+						intr_context->handler,
+						0,
+						intr_context->name,
+						&qdev->rx_ring[i]);
+			if (status) {
+				QPRINTK(qdev, IFUP, ERR,
+					"Failed request for MSIX interrupt \
+					%d.\n",	i);
+				goto err_irq;
+			} else {
+				QPRINTK(qdev, IFUP, DEBUG,
+					"Hooked intr %d, queue type %s%s%s, \
+					 with name %s.\n",
+					i, qdev->rx_ring[i].type ==
+					DEFAULT_Q ? "DEFAULT_Q" : "",
+					qdev->rx_ring[i].type ==
+					TX_Q ? "TX_Q" : "",
+					qdev->rx_ring[i].type ==
+					RX_Q ? "RX_Q" : "", intr_context->name);
+			}
+		} else {
+			QPRINTK(qdev, IFUP, DEBUG,
+				"trying msi or legacy interrupts.\n");
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: irq = %d.\n", __func__, pdev->irq);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: context->name = %s.\n", __func__,
+				intr_context->name);
+			QPRINTK(qdev, IFUP, DEBUG,
+				"%s: dev_id = 0x%p.\n", __func__,
+				&qdev->rx_ring[0]);
+			status =
+				request_irq(pdev->irq, qlge_isr,
+					test_bit(QL_MSI_ENABLED,
+						 &qdev->
+						 flags) ? 0 : IRQF_SHARED,
+					intr_context->name, &qdev->rx_ring[0]);
+			if (status)
+				goto err_irq;
+
+			QPRINTK(qdev, IFUP, ERR,
+				"Hooked intr %d, queue type %s%s%s,	\
+				with name %s.\n", i, qdev->rx_ring[0].type ==
+				DEFAULT_Q ? "DEFAULT_Q" : "",
+				qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
+				qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+				intr_context->name);
+		}
+		intr_context->hooked = 1;
+	}
+	return status;
+err_irq:
+	QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+	ql_free_irq(qdev);
+	return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+	struct ricb *ricb = &qdev->ricb;
+	int status = 0;
+	int i;
+	u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+	memset((void *)ricb, 0, sizeof(*ricb));
+
+	ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
+	ricb->flags =
+		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 |
+		RSS_RT4 | RSS_RT6);
+	ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
+
+	/*
+	 * Fill out the Indirection Table.
+	 */
+	for (i = 0; i < 256; i++)
+		hash_id[i] = (i & (qdev->rss_ring_count - 1));
+	/*
+	 * Random values for the IPv6 and IPv4 Hash Keys.
+	 */
+	get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
+	get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
+
+	QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
+	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+		return status;
+	}
+	QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
+	return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+	int status = 0;
+	int i;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return status;
+
+	/* Clear all the entries in the routing table. */
+	for (i = 0; i < 16; i++) {
+		status = ql_set_routing_reg(qdev, i, 0, 0);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for CAM \
+				packets.\n");
+			goto exit;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for error packets.\n");
+		goto exit;
+	}
+	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for broadcast \
+			packets.\n");
+		goto exit;
+	}
+	/* If we have more than one inbound queue, then turn on RSS in the
+	 * routing block.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+					RT_IDX_RSS_MATCH, 1);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init routing register for MATCH \
+				RSS packets.\n");
+			goto exit;
+		}
+	}
+
+	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+					RT_IDX_CAM_HIT, 1);
+	if (status)
+		QPRINTK(qdev, IFUP, ERR,
+			"Failed to init routing register for CAM packets.\n");
+exit:
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+	return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+	int status;
+	char zero_mac_addr[6];
+
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status)
+		return status;
+	if (ql_read32(qdev, STS) & qdev->port_link_up) {
+		QPRINTK(qdev, IFUP, DEBUG,
+			"Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+			qdev->ndev->dev_addr[0], qdev->ndev->dev_addr[1],
+			qdev->ndev->dev_addr[2], qdev->ndev->dev_addr[3],
+			qdev->ndev->dev_addr[4], qdev->ndev->dev_addr[5]);
+		status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->dev_addr,
+				MAC_ADDR_TYPE_CAM_MAC, qdev->port * MAX_CQ);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Failed to init mac \
+				address.\n");
+			ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+			return status;
+		}
+	} else {
+		memset(zero_mac_addr, 0, sizeof(zero_mac_addr));
+		status = ql_set_mac_addr_reg(qdev, (u8 *) zero_mac_addr,
+				MAC_ADDR_TYPE_CAM_MAC, qdev->port * MAX_CQ);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Failed to clear mac \
+				address.\n");
+			ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+			return status;
+		} else
+			QPRINTK(qdev, IFUP, DEBUG, "Clearing MAC address on \
+				%s\n", qdev->ndev->name);
+	}
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+	status = ql_route_initialize(qdev);
+	if (status)
+		QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+
+	return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+	u32 value, mask;
+	int i;
+	int status = 0;
+
+	/*
+	 * Set up the System register to halt on errors.
+	 */
+	value = SYS_EFE | SYS_FAE;
+	mask = value << 16;
+	ql_write32(qdev, SYS, mask | value);
+
+	/* Set the default queue, and VLAN behavior. */
+	value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV |
+		NIC_RCV_CFG_VLAN_MATCH_AND_NON;
+	mask = NIC_RCV_CFG_DFQ_MASK | NIC_RCV_CFG_VLAN_MASK |
+		(NIC_RCV_CFG_RV << 16);
+	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+	/* Set the MPI interrupt to enabled. */
+	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+	/* Enable the function, set pagesize, enable error checking. */
+	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+		FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
+
+	/* Set/clear header splitting. */
+	mask = FSC_VM_PAGESIZE_MASK |
+		FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+	ql_write32(qdev, FSC, mask | value);
+
+	/* Start up the rx queues. */
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start rx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	/* If there is more than one inbound completion queue
+	 * then download a RICB to configure RSS.
+	 */
+	if (qdev->rss_ring_count > 1) {
+		status = ql_start_rss(qdev);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+			return status;
+		}
+	}
+
+	/* Start up the tx queues. */
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to start tx ring[%d].\n", i);
+			return status;
+		}
+	}
+
+	/* Initialize the port and set the max framesize. */
+	status = qdev->nic_ops->port_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+		return status;
+	}
+
+	/* Set up the MAC address and frame routing filter. */
+	status = ql_cam_route_initialize(qdev);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+				"Failed to init CAM/Routing tables.\n");
+		return status;
+	}
+	return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+	u32 value;
+	int max_wait_time = 3;
+	int status = 0;
+
+	if (pci_channel_offline(qdev->pdev))
+		return 0;
+
+	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+
+	max_wait_time = 3;
+	do {
+		value = ql_read32(qdev, RST_FO);
+		if ((value & RST_FO_FR) == 0)
+			break;
+		udelay(10);
+	} while ((--max_wait_time));
+
+	if (max_wait_time == 0) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"ETIMEOUT!!! errored out of resetting the chip!\n");
+		status = -ETIMEDOUT;
+	}
+
+	return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+
+	QPRINTK(qdev, PROBE, INFO,
+		"Function #%d, Port #%d, Rev ID = %x.\n",
+		qdev->func, qdev->port,
+		qdev->chip_rev_id);
+	QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
+}
+
+int ql_wol(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 wol = MB_WOL_DISABLE;
+
+	/* The CAM is still intact after a reset, but if we
+	 * are doing WOL, then we may need to program the
+	 * routing regs. We would also need to issue the mailbox
+	 * commands to instruct the MPI what to do per the ethtool
+	 * settings.
+	 */
+
+	if (qdev->wol & WAKE_ARP) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"ARP request WOL not support yet.\n");
+		return status;
+	}
+
+	if (qdev->wol & WAKE_MAGIC) {
+		status = ql_mb_wol_set_magic(qdev, 1);
+		if (status) {
+			QPRINTK(qdev, IFDOWN, ERR,
+				"Failed to set magic packet on %s.\n",
+				qdev->ndev->name);
+			return status;
+		} else
+			QPRINTK(qdev, DRV, INFO,
+				"Enabled magic packet successfully on %s.\n",
+				qdev->ndev->name);
+
+		wol |= MB_WOL_MAGIC_PKT;
+	}
+
+	if (qdev->wol & WAKE_MAGICSECURE) {
+		QPRINTK(qdev, IFDOWN, ERR,
+			"Secure Magic packet WOL not support yet.\n");
+		return status;
+	}
+
+	if (qdev->wol & WAKE_PHY)
+		wol |= (MB_WOL_LINK_UP | MB_WOL_LINK_DOWN);
+
+	if (qdev->wol & WAKE_UCAST)
+		wol |= MB_WOL_UCAST;
+
+	if (qdev->wol & WAKE_MCAST)
+		wol |= MB_WOL_MCAST;
+
+	if (qdev->wol & WAKE_BCAST)
+		wol |= MB_WOL_BCAST;
+
+	if (qdev->wol) {
+		/* Reroute all packets to Management Interface */
+		ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
+			(MGMT_RCV_CFG_RM << 16)));
+		wol |= MB_WOL_MODE_ON;
+		status = ql_mb_wol_mode(qdev, wol);
+		QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+			(status == 0) ? "Sucessfully set" : "Failed", wol,
+			qdev->ndev->name);
+	}
+
+	return status;
+}
+
+static void ql_disable_napi(struct ql_adapter *qdev)
+{
+	int i;
+
+	for (i = 0; i < qdev->intr_count; i++)
+		netif_poll_disable(qdev->rx_ring[i].dummy_netdev);
+}
+static void ql_enable_napi(struct ql_adapter *qdev)
+{
+	int i;
+
+	for (i = 0; i < qdev->intr_count; i++)
+		netif_poll_enable(qdev->rx_ring[i].dummy_netdev);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+	int i, status = 0;
+	struct rx_ring *rx_ring;
+
+	ql_link_off(qdev);
+
+	/* Don't kill the reset worker thread if we
+	 * are in the process of recovery.
+	 */
+	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+		cancel_delayed_work_sync(&qdev->asic_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_work);
+	cancel_delayed_work_sync(&qdev->mpi_idc_work);
+	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+	/* The default queue at index 0 is always processed in
+	 * a workqueue.
+	 */
+
+	/* The rest of the rx_rings are processed in
+	 * a workqueue only if it's a single interrupt
+	 * environment (MSI/Legacy).
+	 */
+	for (i = 1; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		/* Only the RSS rings use NAPI on multi irq
+		 * environment.  Outbound completion processing
+		 * is done in interrupt context.
+		 */
+	}
+
+	ql_disable_napi(qdev);
+	clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+	ql_disable_interrupts(qdev);
+
+	ql_tx_ring_clean(qdev);
+	ql_free_rx_buffers(qdev);
+	status = ql_adapter_reset(qdev);
+	if (status)
+		QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
+			qdev->func);
+	ql_wol(qdev);
+	return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+	int err = 0;
+
+	err = ql_adapter_initialize(qdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+		goto err_init;
+	}
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	ql_enable_napi(qdev);
+	ql_alloc_rx_buffers(qdev);
+	ql_enable_all_completion_interrupts(qdev);
+	if ((ql_read32(qdev, STS) & qdev->port_init) &&
+			(ql_read32(qdev, STS) & qdev->port_link_up))
+		ql_link_on(qdev);
+	ql_enable_interrupts(qdev);
+	return 0;
+err_init:
+	ql_adapter_reset(qdev);
+	return err;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+	ql_free_mem_resources(qdev);
+	ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+	int status = 0;
+
+	if (ql_alloc_mem_resources(qdev)) {
+		QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
+		return -ENOMEM;
+	}
+	status = ql_request_irq(qdev);
+	if (status)
+		goto err_irq;
+	return status;
+err_irq:
+	ql_free_mem_resources(qdev);
+	return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	/*
+	 * Wait for device to recover from a reset.
+	 * (Rarely happens, but possible.)
+	 */
+	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+		msleep(1);
+	ql_adapter_down(qdev);
+	ql_release_adapter_resources(qdev);
+	return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+	int i;
+	struct rx_ring *rx_ring;
+	struct tx_ring *tx_ring;
+	int cpu_cnt = num_online_cpus();
+
+	/*
+	 * For each processor present we allocate one
+	 * rx_ring for outbound completions, and one
+	 * rx_ring for inbound completions.  Plus there is
+	 * always the one default queue.  For the CPU
+	 * counts we end up with the following rx_rings:
+	 * rx_ring count =
+	 *  one default queue +
+	 *  (CPU count * outbound completion rx_ring) +
+	 *  (CPU count * inbound (RSS) completion rx_ring)
+	 * To keep it simple we limit the total number of
+	 * queues to < 32, so we truncate CPU to 8.
+	 * This limitation can be removed when requested.
+	 */
+
+	if (cpu_cnt > MAX_CPUS)
+		cpu_cnt = MAX_CPUS;
+
+	/*
+	 * rx_ring[0] is always the default queue.
+	 */
+	/* Allocate outbound completion ring for each CPU. */
+	qdev->tx_ring_count = cpu_cnt;
+	/* Allocate inbound completion (RSS) ring for each CPU. */
+	qdev->rss_ring_count = cpu_cnt;
+	/* cq_id for the first inbound ring handler. */
+	qdev->rss_ring_first_cq_id = cpu_cnt + 1;
+	/*
+	 * qdev->rx_ring_count:
+	 * Total number of rx_rings.  This includes the one
+	 * default queue, a number of outbound completion
+	 * handler rx_rings, and the number of inbound
+	 * completion handler rx_rings.
+	 */
+	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
+
+	for (i = 0; i < qdev->tx_ring_count; i++) {
+		tx_ring = &qdev->tx_ring[i];
+		memset((void *)tx_ring, 0, sizeof(*tx_ring));
+		tx_ring->qdev = qdev;
+		tx_ring->wq_id = i;
+		tx_ring->wq_len = qdev->tx_ring_size;
+		tx_ring->wq_size =
+			tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+		/*
+		 * The completion queue ID for the tx rings start
+		 * immediately after the default Q ID, which is zero.
+		 */
+		tx_ring->cq_id = i + 1;
+	}
+
+	for (i = 0; i < qdev->rx_ring_count; i++) {
+		rx_ring = &qdev->rx_ring[i];
+		rx_ring->qdev = qdev;
+		rx_ring->cq_id = i;
+		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
+		if (i == 0) {	/* Default queue at index 0. */
+			/*
+			 * Default queue handles bcast/mcast plus
+			 * async events.  Needs buffers.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size = rx_ring->cq_len *
+					sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+				rx_ring->lbq_len * sizeof(__le64);
+			rx_ring->lbq_buf_map_size =
+				(PAGE_SIZE > LARGE_BUFFER_SIZE) ?
+				LARGE_BUFFER_SIZE : PAGE_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+				rx_ring->sbq_len * sizeof(__le64);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE;
+			rx_ring->type = DEFAULT_Q;
+		} else if (i < qdev->rss_ring_first_cq_id) {
+			/*
+			 * Outbound queue handles outbound completions only.
+			 */
+			/* outbound cq is same size as tx_ring it services. */
+			rx_ring->cq_len = qdev->tx_ring_size;
+			rx_ring->cq_size = rx_ring->cq_len *
+					sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = 0;
+			rx_ring->lbq_size = 0;
+			rx_ring->lbq_buf_map_size = 0;
+			rx_ring->sbq_len = 0;
+			rx_ring->sbq_size = 0;
+			rx_ring->sbq_buf_size = 0;
+			rx_ring->type = TX_Q;
+		} else {	/* Inbound completions (RSS) queues */
+			/*
+			 * Inbound queues handle unicast frames only.
+			 */
+			rx_ring->cq_len = qdev->rx_ring_size;
+			rx_ring->cq_size = rx_ring->cq_len *
+					sizeof(struct ql_net_rsp_iocb);
+			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+			rx_ring->lbq_size =
+				rx_ring->lbq_len * sizeof(__le64);
+			rx_ring->lbq_buf_map_size =
+				(PAGE_SIZE > LARGE_BUFFER_SIZE) ?
+				LARGE_BUFFER_SIZE : PAGE_SIZE;
+			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+			rx_ring->sbq_size =
+				rx_ring->sbq_len * sizeof(__le64);
+			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE;
+			rx_ring->type = RX_Q;
+		}
+	}
+	return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+	int err = 0;
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	err = ql_configure_rings(qdev);
+	if (err)
+		return err;
+
+	err = ql_get_adapter_resources(qdev);
+	if (err)
+		goto error_up;
+
+	err = ql_adapter_up(qdev);
+	if (err)
+		goto error_up;
+
+	return err;
+
+error_up:
+	ql_release_adapter_resources(qdev);
+	return err;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (ndev->mtu == 1500 && new_mtu == 9000) {
+		QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+		queue_delayed_work(qdev->workqueue,
+				&qdev->mpi_port_cfg_work, 0);
+	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
+			QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
+	} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
+			(ndev->mtu == 9000 && new_mtu == 9000)) {
+	} else
+		return -EINVAL;
+	ndev->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+						*ndev)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	return &qdev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct dev_mc_list *mc_ptr;
+	unsigned long hw_flags = 0;
+	int i, status;
+
+	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+	if (status)
+		return;
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	/*
+	 * Set or clear promiscuous mode if a
+	 * transition is taking place.
+	 */
+	if (ndev->flags & IFF_PROMISC) {
+		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg(qdev, RT_IDX_PROMISCUOUS_SLOT,
+						RT_IDX_VALID, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set promiscous mode.\n");
+			} else {
+				set_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+			if (ql_set_routing_reg(qdev, RT_IDX_PROMISCUOUS_SLOT,
+						RT_IDX_VALID, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear promiscous mode.\n");
+			} else {
+				clear_bit(QL_PROMISCUOUS, &qdev->flags);
+			}
+		}
+	}
+
+	/*
+	 * Set or clear all multicast mode if a
+	 * transition is taking place.
+	 */
+	if ((ndev->flags & IFF_ALLMULTI) ||
+		(ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+				(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to set all-multi mode.\n");
+			} else {
+				set_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	} else {
+		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+			if (ql_set_routing_reg
+				(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to clear all-multi mode.\n");
+			} else {
+				clear_bit(QL_ALLMULTI, &qdev->flags);
+			}
+		}
+	}
+
+	if (ndev->mc_count) {
+		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+		if (status)
+			goto exit;
+		for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
+			i++, mc_ptr = mc_ptr->next) {
+			status = ql_set_mac_addr_reg(qdev,
+					(u8 *) mc_ptr->dmi_addr,
+					MAC_ADDR_TYPE_MULTI_MAC, i);
+			if (status) {
+				QPRINTK(qdev, HW, ERR,
+					"Failed to loadmulticast address.\n");
+				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+				goto exit;
+			}
+		}
+		if (ql_set_routing_reg(qdev, RT_IDX_MCAST_MATCH_SLOT,
+					RT_IDX_MCAST_MATCH, 1)) {
+			QPRINTK(qdev, HW, ERR,
+				"Failed to set multicast match mode.\n");
+		} else {
+			set_bit(QL_ALLMULTI, &qdev->flags);
+		}
+		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+	}
+exit:
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct sockaddr *addr = p;
+	unsigned long hw_flags = 0;
+	int status;
+	if (netif_running(ndev)) {
+		QPRINTK(qdev, DRV, ERR,
+			"Interface already active, aborting change mac address \
+			request !!!\n");
+		return -EBUSY;
+	}
+
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		QPRINTK(qdev, DRV, ERR,
+			"Invalid Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+			addr->sa_data[0], addr->sa_data[1], addr->sa_data[2],
+			addr->sa_data[3], addr->sa_data[4], addr->sa_data[5]);
+		return -EADDRNOTAVAIL;
+	}
+	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+	if (status) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Unable to get hardware semaphore on %s\n",
+			qdev->ndev->name);
+		return status;
+	}
+	QPRINTK(qdev, IFUP, DEBUG,
+		"Set Mac addr request %02x:%02x:%02x:%02x:%02x:%02x\n",
+		ndev->dev_addr[0],  ndev->dev_addr[1],  ndev->dev_addr[2],
+		ndev->dev_addr[3], ndev->dev_addr[4],  ndev->dev_addr[5]);
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+				MAC_ADDR_TYPE_CAM_MAC, qdev->port * MAX_CQ);
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	if (status)
+		QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+
+	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+	return status;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	ql_queue_asic_error(qdev);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+		container_of(work, struct ql_adapter, asic_reset_work);
+	int status;
+
+	status = ql_adapter_down(qdev);
+	if (status)
+		goto error;
+	status = ql_adapter_up(qdev);
+	if (status)
+		goto error;
+	return;
+error:
+	QPRINTK(qdev, IFUP, ALERT,
+		"Driver up/down cycle failed, closing device\n");
+	rtnl_lock();
+	set_bit(QL_ADAPTER_UP, &qdev->flags);
+	dev_close(qdev->ndev);
+	rtnl_unlock();
+}
+
+static struct nic_operations qla8012_nic_ops = {
+	.get_flash		= ql_get_8012_flash_params,
+	.port_initialize	= ql_8012_port_initialize,
+};
+
+static struct nic_operations qla8000_nic_ops = {
+	.get_flash		= ql_get_8000_flash_params,
+	.port_initialize	= ql_8000_port_initialize,
+};
+
+/* Find the pcie function number for the other NIC
+ * on this chip.  Since both NIC functions share a
+ * common firmware we have the lowest enabled function
+ * do any common work.  Examples would be resetting
+ * after a fatal firmware error, or doing a firmware
+ * coredump.
+ */
+static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+{
+	int status = 0;
+	u32 temp;
+	u32 nic_func1, nic_func2;
+
+	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+			&temp);
+	if (status)
+		return status;
+
+	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
+			MPI_TEST_NIC_FUNC_MASK);
+	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
+			MPI_TEST_NIC_FUNC_MASK);
+
+	if (qdev->func == nic_func1)
+		qdev->alt_func = nic_func2;
+	else if (qdev->func == nic_func2)
+		qdev->alt_func = nic_func1;
+	else
+		status = -EIO;
+
+	return status;
+}
+
+static int ql_get_board_info(struct ql_adapter *qdev)
+{
+	int status;
+	qdev->func =
+		(ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+	if (qdev->func > 3)
+		return -EIO;
+
+	status = ql_get_alt_pcie_func(qdev);
+	if (status)
+		return status;
+
+	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
+	if (qdev->port) {
+		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+		qdev->port_link_up = STS_PL1;
+		qdev->port_init = STS_PI1;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+	} else {
+		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+		qdev->port_link_up = STS_PL0;
+		qdev->port_init = STS_PI0;
+		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+	}
+	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+	qdev->device_id = qdev->pdev->device;
+	if (qdev->device_id == QLGE_DEVICE_ID_8012)
+		qdev->nic_ops = &qla8012_nic_ops;
+	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+		qdev->nic_ops = &qla8000_nic_ops;
+	return status;
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (qdev->workqueue) {
+		destroy_workqueue(qdev->workqueue);
+		qdev->workqueue = NULL;
+	}
+	if (qdev->q_workqueue) {
+		destroy_workqueue(qdev->q_workqueue);
+		qdev->q_workqueue = NULL;
+	}
+	if (qdev->reg_base)
+		iounmap(qdev->reg_base);
+	if (qdev->doorbell_area)
+		iounmap(qdev->doorbell_area);
+	vfree(qdev->mpi_coredump);
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int __devinit ql_init_device(struct pci_dev *pdev,
+				struct net_device *ndev, int cards_found)
+{
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int pos, pcie_dctl_reg, err = 0;
+	u16 val16;
+
+	memset((void *)qdev, 0, sizeof(*qdev));
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "PCI device enable failed.\n");
+		return err;
+	}
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (pos <= 0) {
+		dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
+			"aborting.\n");
+		goto err_out;
+	} else {
+		pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+		val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+		val16 |= (PCI_EXP_DEVCTL_CERE |
+			  PCI_EXP_DEVCTL_NFERE |
+			  PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
+		pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+
+		/* PCIe -- adjust Maximum Read Request Size (4096). */
+		 pcie_dctl_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+		 if (pcie_dctl_reg) {
+			uint16_t pcie_dctl;
+			pcie_dctl_reg += 0x08;  /* PCI_EXP_DEVCTL */
+			pci_read_config_word(pdev, pcie_dctl_reg, &pcie_dctl);
+			pcie_dctl &= ~0x7000;	/* ~PCI_EXP_DEVCTL_READRQ */
+			pcie_dctl |= 0x5000;
+			pci_write_config_word(pdev, pcie_dctl_reg, pcie_dctl);
+		  }
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "PCI region request failed.\n");
+		goto err_out;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		set_bit(QL_DMA64, &qdev->flags);
+		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (!err)
+			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	}
+
+	if (err) {
+		dev_err(&pdev->dev, "No usable DMA configuration.\n");
+		goto err_out;
+	}
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+	pci_set_drvdata(pdev, ndev);
+	qdev->reg_base =
+		ioremap_nocache(pci_resource_start(pdev, 1),
+				pci_resource_len(pdev, 1));
+	if (!qdev->reg_base) {
+		dev_err(&pdev->dev, "Register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+	qdev->doorbell_area =
+		ioremap_nocache(pci_resource_start(pdev, 3),
+				pci_resource_len(pdev, 3));
+	if (!qdev->doorbell_area) {
+		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+	qdev->ndev = ndev;
+	qdev->pdev = pdev;
+	err = ql_get_board_info(qdev);
+	if (err) {
+		dev_err(&pdev->dev, "Register access failed.\n");
+		err = -EIO;
+		goto err_out;
+	}
+	qdev->msg_enable = netif_msg_init(debug, default_msg);
+	spin_lock_init(&qdev->hw_lock);
+	spin_lock_init(&qdev->stats_lock);
+
+	qdev->mpi_coredump = vmalloc(sizeof(struct ql_mpi_coredump));
+	if ((qdev->mpi_coredump == NULL) && qlge_mpi_coredump) {
+		dev_err(&pdev->dev, "Coredump alloc failed.\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/* make sure the EEPROM is good */
+	err = qdev->nic_ops->get_flash(qdev);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid FLASH.\n");
+		goto err_out;
+	}
+
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	/* Set up the default ring sizes. */
+	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+	/* Set up the coalescing parameters. */
+	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+	/*
+	 * Set up the operating parameters.
+	 */
+	qdev->rx_csum = 1;
+
+	qdev->q_workqueue = create_workqueue(ndev->name);
+	qdev->workqueue = create_singlethread_workqueue(ndev->name);
+	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+	init_completion(&qdev->ide_completion);
+	mutex_init(&qdev->mpi_mutex);
+
+	if (!cards_found) {
+		dev_info(&pdev->dev, "%s\n", DRV_STRING);
+		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+			 DRV_NAME, DRV_VERSION);
+		dev_info(&pdev->dev, "Patch version: %s, Release date: %s.\n",
+			 DIS_VERSION, REL_DATE);
+	}
+	return 0;
+err_out:
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	return err;
+}
+
+static int ql_poll(struct net_device *ndev, int *budget)
+{
+	struct rx_ring *rx_ring = ndev->priv;
+	struct ql_adapter *qdev = rx_ring->qdev;
+	int work_to_do = min(*budget, ndev->quota);
+	int work_done;
+
+	QPRINTK(qdev, RX_STATUS, DEBUG, "NAPI poll, netdev = %p, rx_ring = %p, \
+		cq_id = %d, work_to_do = %d\n", ndev, rx_ring, rx_ring->cq_id,
+		work_to_do);
+
+	work_done = ql_clean_inbound_rx_ring(rx_ring, work_to_do);
+	*budget -= work_done;
+	ndev->quota -= work_done;
+
+	if (work_done >= work_to_do)
+		return 1;
+
+	netif_rx_complete(ndev);
+	ql_enable_completion_interrupt(qdev, rx_ring->irq);
+	return 0;
+}
+
+static int ql_init_napi(struct ql_adapter *qdev)
+{
+	int rc = 0;
+	int i;
+	struct rx_ring *rx_ring = &qdev->rx_ring[0];
+
+	for (i = 0; i < MAX_RX_RINGS; i++, rx_ring++) {
+		if ((rx_ring->dummy_netdev =
+				alloc_netdev(0, "", ether_setup)) == NULL) {
+			rc = -ENOMEM;
+			goto init_napi_err;
+		} else {
+			rx_ring->dummy_netdev->priv = rx_ring;
+			rx_ring->dummy_netdev->weight = 64;
+			rx_ring->dummy_netdev->poll = ql_poll;
+			set_bit(__LINK_STATE_START,
+					&rx_ring->dummy_netdev->state);
+		}
+	}
+	return rc;
+
+init_napi_err:
+	/* Free allocated netdev blocks */
+	for (; i; i--, rx_ring--) {
+		if (rx_ring->dummy_netdev) {
+			free_netdev(rx_ring->dummy_netdev);
+			rx_ring->dummy_netdev = NULL;
+		}
+	}
+	return rc;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ql_netpoll(struct net_device *netdev)
+{
+	struct ql_adapter *qdev = netdev_priv(netdev);
+
+	/* Disable interrupts */
+	ql_disable_interrupts(qdev);
+
+	/* Invoke interrupt handler */
+	qlge_isr(qdev->pdev->irq, &qdev->rx_ring[0]);
+
+	/* Enable interrupts */
+	ql_enable_interrupts(qdev);
+
+	return;
+}
+
+#endif
+
+static int __devinit qlge_probe(struct pci_dev *pdev,
+				const struct pci_device_id *pci_entry)
+{
+	struct net_device *ndev = NULL;
+	struct ql_adapter *qdev = NULL;
+	static int cards_found;
+	int err = 0;
+
+	ndev = alloc_etherdev(sizeof(struct ql_adapter));
+	if (!ndev)
+		return -ENOMEM;
+
+	err = ql_init_device(pdev, ndev, cards_found);
+	if (err < 0) {
+		free_netdev(ndev);
+		return err;
+	}
+
+	qdev = netdev_priv(ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	ndev->features = (0
+			  | NETIF_F_IP_CSUM
+			  | NETIF_F_SG
+			  | NETIF_F_TSO
+#ifdef NETIF_F_TSO6
+			  | NETIF_F_TSO6
+#endif
+#ifdef NETIF_F_TSO_ECN
+			  | NETIF_F_TSO_ECN
+#endif
+			  | NETIF_F_HW_VLAN_TX
+			  | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
+
+	if (test_bit(QL_DMA64, &qdev->flags))
+		ndev->features |= NETIF_F_HIGHDMA;
+
+	/*
+	 * Set up net_device structure.
+	 */
+	ndev->tx_queue_len = qdev->tx_ring_size;
+	ndev->irq = pdev->irq;
+
+	ndev->open			= qlge_open;
+	ndev->stop			= qlge_close;
+	ndev->hard_start_xmit		= qlge_send;
+	ndev->change_mtu		= qlge_change_mtu;
+	ndev->get_stats			= qlge_get_stats;
+	ndev->set_multicast_list	= qlge_set_multicast_list;
+	ndev->set_mac_address		= qlge_set_mac_address;
+	ndev->tx_timeout		= qlge_tx_timeout;
+	ndev->vlan_rx_register		= ql_vlan_rx_register;
+	ndev->vlan_rx_add_vid		= ql_vlan_rx_add_vid;
+	ndev->vlan_rx_kill_vid		= ql_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	ndev->poll_controller		= ql_netpoll;
+#endif
+	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+	ndev->watchdog_timeo = 10 * HZ;
+
+	err = register_netdev(ndev);
+	if (err) {
+		dev_err(&pdev->dev, "net device registration failed.\n");
+		ql_release_all(pdev);
+		pci_disable_device(pdev);
+		return err;
+	}
+
+	err = ql_init_napi(qdev);
+	if (err) {
+		dev_err(&pdev->dev, "Napi initialization failed.\n");
+		unregister_netdev(ndev);
+		ql_release_all(pdev);
+		pci_disable_device(pdev);
+		return err;
+	}
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	ql_display_dev_info(ndev);
+	cards_found++;
+	return 0;
+}
+
+static void ql_deinit_napi(struct ql_adapter *qdev)
+{
+	int i;
+	struct rx_ring *rx_ring;
+	rx_ring = &qdev->rx_ring[0];
+	for (i = 0; i < MAX_RX_RINGS; i++, rx_ring++) {
+		if (rx_ring->dummy_netdev) {
+			free_netdev(rx_ring->dummy_netdev);
+			rx_ring->dummy_netdev = NULL;
+		}
+	}
+}
+
+static void __devexit qlge_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	unregister_netdev(ndev);
+	ql_deinit_napi(qdev);
+	ql_release_all(pdev);
+	pci_disable_device(pdev);
+	free_netdev(ndev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+						enum pci_channel_state state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	int err = 0;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		netif_device_detach(ndev);
+		if (netif_running(ndev)) {
+			err = qlge_close(ndev);
+			if (err) {
+				dev_err(&pdev->dev, "%s: Failed qlge close \
+					from pci_channel_io_frozen.\n",
+					__func__);
+				return PCI_ERS_RESULT_DISCONNECT;
+			}
+		}
+		pci_disable_device(pdev);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		dev_err(&pdev->dev,
+			"%s: pci_channel_io_perm_failure.\n", __func__);
+		qlge_close(ndev);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	printk(KERN_ERR"%s: Enter.\n", __func__);
+
+	if (pci_enable_device(pdev)) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	/* Make sure the EEPROM is good */
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	if (!is_valid_ether_addr(ndev->perm_addr)) {
+		QPRINTK(qdev, IFUP, ERR,
+			"%s: After reset, invalid MAC address.\n", __func__);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err = 0;
+	printk(KERN_ERR"%s: Enter.\n", __func__);
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(ndev)) {
+		err = qlge_open(ndev);
+		if (err) {
+			QPRINTK(qdev, IFUP, ERR,
+				"Device initialization failed after reset.\n");
+			return;
+		}
+	}
+
+	netif_device_attach(ndev);
+}
+
+static struct pci_error_handlers qlge_err_handler = {
+	.error_detected = qlge_io_error_detected,
+	.slot_reset = qlge_io_slot_reset,
+	.resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	netif_device_detach(ndev);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_down(qdev);
+		if (!err)
+			return err;
+	}
+
+	err = pci_save_state(pdev);
+	if (err)
+		return err;
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int qlge_resume(struct pci_dev *pdev)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+	int err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		QPRINTK(qdev, IFUP, ERR,
+			"Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(ndev)) {
+		err = ql_adapter_up(qdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(ndev);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+	qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+	.name = DRV_NAME,
+	.id_table = qlge_pci_tbl,
+	.probe = qlge_probe,
+	.remove = __devexit_p(qlge_remove),
+#ifdef CONFIG_PM
+	.suspend = qlge_suspend,
+	.resume = qlge_resume,
+#endif
+	.shutdown = qlge_shutdown,
+	.err_handler = &qlge_err_handler
+};
+
+static int __init qlge_init_module(void)
+{
+	return pci_register_driver(&qlge_driver);
+}
+
+static void __exit qlge_exit(void)
+{
+	pci_unregister_driver(&qlge_driver);
+}
+
+module_init(qlge_init_module);
+module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 0000000..4973c00
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,1196 @@
+#include "qlge.h"
+
+static void ql_display_mb_sts(struct ql_adapter *qdev,
+					struct mbox_params *mbcp)
+{
+	int i;
+	static char *err_sts[] = {
+		"Command Complete",
+		"Command Not Supported",
+		"Host Interface Error",
+		"Checksum Error",
+		"Unused Completion Status",
+		"Test Failed",
+		"Command Parameter Error"};
+
+	QPRINTK(qdev, DRV, DEBUG, "%s.\n",
+		err_sts[mbcp->mbox_out[0] & 0x0000000f]);
+	for (i = 0; i < mbcp->out_count; i++)
+		QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
+				i, mbcp->mbox_out[i]);
+}
+
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+	u32 tmp;
+
+	/* Un-pause the RISC */
+	tmp = ql_read32(qdev, CSR);
+	if (!(tmp & CSR_RP))
+		return -EIO;
+
+	ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+	return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+	u32 tmp;
+	int count = UDELAY_COUNT;
+
+	/* Pause the RISC */
+	ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+	do {
+		tmp = ql_read32(qdev, CSR);
+		if (tmp & CSR_RP)
+			break;
+		mdelay(UDELAY_DELAY);
+		count--;
+	} while (count);
+	return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+	u32 tmp;
+	int count = UDELAY_COUNT;
+
+	/* Reset the RISC */
+	ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+	do {
+		tmp = ql_read32(qdev, CSR);
+		if (tmp & CSR_RR) {
+			ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+			break;
+		}
+		mdelay(UDELAY_DELAY);
+		count--;
+	} while (count);
+	return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+	int status;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* set up for reg read */
+	ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* get the data */
+	*data = ql_read32(qdev, PROC_DATA);
+exit:
+	return status;
+}
+
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+	int status = 0;
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+	/* write the data to the data reg */
+	ql_write32(qdev, PROC_DATA, data);
+	/* trigger the write */
+	ql_write32(qdev, PROC_ADDR, reg);
+	/* wait for reg to come ready */
+	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+	if (status)
+		goto exit;
+exit:
+	return status;
+}
+
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+{
+	int status;
+	status = ql_write_mpi_reg(qdev, 0x00001010, 1);
+	if (status)
+		printk(KERN_ERR
+			"Failed to force Auto-Load/"
+			"Auto-Init, status = 0x%.08x!\n",
+			status);
+	return status;
+}
+
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+static int ql_own_firmware(struct ql_adapter *qdev)
+{
+	u32 temp;
+
+	/* If we are the lower of the 2 NIC functions
+	 * on the chip the we are responsible for
+	 * core dump and firmware reset after an error.
+	 */
+	if (qdev->func < qdev->alt_func)
+		return 1;
+
+	/* If we are the higher of the 2 NIC functions
+	 * on the chip and the lower function is not
+	 * enabled, then we are responsible for
+	 * core dump and firmware reset after an error.
+	 */
+	temp =  ql_read32(qdev, STS);
+	if (temp & (1 >> (8 + qdev->alt_func)))
+		return 1;
+
+	return 0;
+
+}
+
+static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int i, status;
+
+	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+	if (status)
+		return -EBUSY;
+	for (i = 0; i < mbcp->out_count; i++) {
+		status =
+			ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
+					&mbcp->mbox_out[i]);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+			break;
+		}
+	}
+	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);	/* does flush too */
+	return status;
+}
+
+/* Wait for a single mailbox command to complete.
+ * Returns zero on success.
+ */
+static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+{
+	int count = 1000;	/* TODO: arbitrary for now. */
+	u32 value;
+
+	do {
+		value = ql_read32(qdev, STS);
+		if (value & STS_PI)
+			return 0;
+		mdelay(UDELAY_DELAY); /* 10ms */
+		if (!(count % 200))
+			QPRINTK(qdev, DRV, DEBUG,
+				"Waiting for mailbox command to complete.\n");
+	} while (--count);
+	return -ETIMEDOUT;
+}
+
+/* Execute a single mailbox command.
+ * Caller must hold PROC_ADDR semaphore.
+ */
+static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int i, status;
+
+	/*
+	 * Make sure there's nothing pending.
+	 * This shouldn't happen.
+	 */
+	if (ql_read32(qdev, CSR) & CSR_HRI) {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: CSR_HRI bit set in CSR. Should never happen!\n",
+			__func__);
+		return -EIO;
+	}
+
+	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: Couldn't get PROC_ADDR semaphore.\n", __func__);
+		return status;
+	}
+
+	/*
+	 * Fill the outbound mailboxes.
+	 */
+	for (i = 0; i < mbcp->in_count; i++) {
+		status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
+						mbcp->mbox_in[i]);
+		if (status)
+			goto end;
+	}
+	/*
+	 * Wake up the MPI firmware.
+	 */
+	ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+end:
+	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+	return status;
+}
+
+/* We are being asked by firmware to accept
+ * a change to the port.  This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * paramters, or loopback mode. We wake up a worker
+ * to handler processing this since a mailbox command
+ * will need to be sent to ACK the request.
+ */
+static int ql_idc_req_aen(struct ql_adapter *qdev)
+{
+	int status;
+	struct mbox_params *mbcp = &qdev->idc_mbc;
+
+	QPRINTK(qdev, DRV, ERR, "Enter!\n");
+	/* Get the status data and start up a thread to
+	 * handle the request.
+	 */
+	mbcp = &qdev->idc_mbc;
+	mbcp->out_count = 4;
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Could not read MPI, resetting ASIC!\n");
+		ql_queue_asic_error(qdev);
+	} else	{
+		/* Begin polled mode early so
+		 * we don't get another interrupt
+		 * when we leave mpi_worker.
+		 */
+		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+		queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
+	}
+	return status;
+}
+
+/* Process an inter-device event completion.
+ * If good, signal the caller's completion.
+ */
+static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+{
+	int status;
+	struct mbox_params *mbcp = &qdev->idc_mbc;
+
+	mbcp->out_count = 4;
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Could not read MPI, resetting RISC!\n");
+		ql_queue_fw_error(qdev);
+	} else
+		/* Wake up the sleeping mpi_idc_work thread that is
+		 * waiting for this event.
+		 */
+
+		complete(&qdev->ide_completion);
+
+	return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 2;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"%s: Could not get mailbox status.\n", __func__);
+		return;
+	}
+
+	qdev->link_status = mbcp->mbox_out[1];
+	QPRINTK(qdev, DRV, ERR, "%s: Link Up.\n", qdev->ndev->name);
+
+	/* If we're coming back from an IDC event
+	 * then set up the CAM and frame routing.
+	 */
+	if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
+		status = ql_cam_route_initialize(qdev);
+		if (status) {
+			QPRINTK(qdev, IFUP, ERR,
+			"Failed to init CAM/Routing tables.\n");
+			return;
+		} else
+			clear_bit(QL_CAM_RT_SET, &qdev->flags);
+	}
+
+	/* Queue up a worker to check the frame
+	 * size information, and fix it if it's not
+	 * to our liking.
+	 */
+	if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
+		QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+		set_bit(QL_PORT_CFG, &qdev->flags);
+		/* Begin polled mode early so
+		 * we don't get another interrupt
+		 * when we leave mpi_worker dpc.
+		 */
+		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+		queue_delayed_work(qdev->workqueue,
+				&qdev->mpi_port_cfg_work, 0);
+	}
+
+	ql_link_on(qdev);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 3;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status)
+		QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+
+	QPRINTK(qdev, DRV, ERR, "%s: Link Down.\n", qdev->ndev->name);
+	ql_link_off(qdev);
+}
+
+static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 5;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status)
+		QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+	else
+		QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+
+	return status;
+}
+
+static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 1;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status)
+		QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+	else
+		QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+
+	return status;
+}
+
+static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 6;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status)
+		QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+	else {
+		int i;
+		QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+		for (i = 0; i < mbcp->out_count; i++)
+			QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
+					i, mbcp->mbox_out[i]);
+
+	}
+
+	return status;
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+
+	mbcp->out_count = 2;
+
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+	} else {
+		QPRINTK(qdev, DRV, ERR, "Firmware Revision  = 0x%.08x.\n",
+			mbcp->mbox_out[1]);
+		qdev->fw_rev_id = mbcp->mbox_out[1];
+		status = ql_cam_route_initialize(qdev);
+		if (status)
+			QPRINTK(qdev, IFUP, ERR,
+				"Failed to init CAM/Routing tables.\n");
+
+	}
+}
+
+/* Process an async event and clear it unless it's an
+ * error condition.
+ *  This can get called iteratively from the mpi_work thread
+ *  when events arrive via an interrupt.
+ *  It also gets called when a mailbox command is polling for
+ *  it's completion. */
+static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status;
+	int orig_count = mbcp->out_count;
+
+	/* Just get mailbox zero for now. */
+	mbcp->out_count = 1;
+	status = ql_get_mb_sts(qdev, mbcp);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Could not read MPI, resetting ASIC!\n");
+		ql_queue_asic_error(qdev);
+		goto end;
+	}
+
+	switch (mbcp->mbox_out[0]) {
+
+	/* This case is only active when we arrive here
+	 * as a result of issuing a mailbox command to
+	 * the firmware.
+	 */
+	case MB_CMD_STS_INTRMDT:
+	case MB_CMD_STS_GOOD:
+	case MB_CMD_STS_INVLD_CMD:
+	case MB_CMD_STS_XFC_ERR:
+	case MB_CMD_STS_CSUM_ERR:
+	case MB_CMD_STS_ERR:
+	case MB_CMD_STS_PARAM_ERR:
+		/* We can only get mailbox status if we're polling from an
+		 * unfinished command.  Get the rest of the status data and
+		 * return back to the caller.
+		 * We only end up here when we're polling for a mailbox
+		 * command completion.
+		 */
+		mbcp->out_count = orig_count;
+		status = ql_get_mb_sts(qdev, mbcp);
+		return status;
+
+	/* We are being asked by firmware to accept
+	 * a change to the port.  This is only
+	 * a change to max frame sizes (Tx/Rx), pause
+	 * paramters, or loopback mode.
+	 */
+	case AEN_IDC_REQ:
+		status = ql_idc_req_aen(qdev);
+		break;
+
+	/* Process and inbound IDC event.
+	 * This will happen when we're trying to
+	 * change tx/rx max frame size, change pause
+	 * paramters or loopback mode.
+	 */
+	case AEN_IDC_CMPLT:
+	case AEN_IDC_EXT:
+		status = ql_idc_cmplt_aen(qdev);
+		break;
+
+	case AEN_LINK_UP:
+		ql_link_up(qdev, mbcp);
+		break;
+
+	case AEN_LINK_DOWN:
+		ql_link_down(qdev, mbcp);
+		break;
+
+	case AEN_FW_INIT_DONE:
+		/* If we're in process on executing the firmware,
+		 * then convert the status to normal mailbox status.
+		 */
+		if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+			mbcp->out_count = orig_count;
+			status = ql_get_mb_sts(qdev, mbcp);
+			mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
+			return status;
+		}
+		ql_init_fw_done(qdev, mbcp);
+		break;
+
+	case AEN_AEN_SFP_IN:
+		ql_sfp_in(qdev, mbcp);
+		break;
+
+	case AEN_AEN_SFP_OUT:
+		ql_sfp_out(qdev, mbcp);
+		break;
+
+	case AEN_AEN_LOST:
+		ql_aen_lost(qdev, mbcp);
+		break;
+
+	/* This event can arrive at boot time or after an
+	 * MPI reset if the firmware failed to initialize.
+	 */
+	case AEN_FW_INIT_FAIL:
+		/* If we're in process on executing the firmware,
+		 * then convert the status to normal mailbox status.
+		 */
+		if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+			mbcp->out_count = orig_count;
+			status = ql_get_mb_sts(qdev, mbcp);
+			mbcp->mbox_out[0] = MB_CMD_STS_ERR;
+			return status;
+		}
+		QPRINTK(qdev, DRV, ERR,
+			"Firmware initialization failed.\n");
+		status = -EIO;
+		ql_queue_fw_error(qdev);
+		break;
+
+	case AEN_SYS_ERR:
+		QPRINTK(qdev, DRV, ERR,
+			"System Error.\n");
+		ql_queue_fw_error(qdev);
+		status = -EIO;
+		break;
+
+	case AEN_DCBX_CHG:
+		/* Need to support AEN 8110 */
+		break;
+	default:
+		QPRINTK(qdev, DRV, ERR,
+			"Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+		/* Clear the MPI firmware status. */
+	}
+end:
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+	/* In some case we didn't get the completion we wanted,
+	 * (e.g. received AEN instead), so we restore the original out_count.
+	 */
+	mbcp->out_count = orig_count;
+	return status;
+}
+
+/* Execute a single mailbox command.
+ * mbcp is a pointer to an array of u32.  Each
+ * element in the array contains the value for it's
+ * respective mailbox register.
+ */
+static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+	int status, count;
+
+	mutex_lock(&qdev->mpi_mutex);
+
+	/* Begin polled mode for MPI */
+	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+	/* Load the mailbox registers and wake up MPI RISC. */
+	status = ql_exec_mb_cmd(qdev, mbcp);
+	if (status)
+		goto end;
+
+
+	/* If we're generating a system error, then there's nothing
+	 * to wait for.
+	 */
+	if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
+		goto end;
+
+	/* Wait for the command to complete. We loop
+	 * here because some AEN might arrive while
+	 * we're waiting for the mailbox command to
+	 * complete. If more than 5 arrive then we can
+	 * assume something is wrong. */
+	count = 5;
+	do {
+		/* Wait for the interrupt to come in. */
+		status = ql_wait_mbx_cmd_cmplt(qdev);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed ql_wait_mbx_cmd_cmplt(),"
+				" status = %d.\n",
+				status);
+			goto end;
+		}
+
+		/* Process the event.  If it's an AEN, it
+		 * will be handled in-line or a worker
+		 * will be spawned. If it's our completion
+		 * we will catch it below.
+		 */
+		status = ql_mpi_handler(qdev, mbcp);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Failed ql_mpi_hander(), status = %d.\n",
+				status);
+			goto end;
+		}
+
+		/* It's either the completion for our mailbox
+		 * command complete or an AEN.  If it's our
+		 * completion then get out.
+		 */
+		if (((mbcp->mbox_out[0] & 0x0000f000) ==
+					MB_CMD_STS_GOOD) ||
+			((mbcp->mbox_out[0] & 0x0000f000) ==
+					MB_CMD_STS_INTRMDT))
+			break;
+	} while (--count);
+
+	if (!count) {
+		QPRINTK(qdev, DRV, ERR,
+			"Timed out waiting for mailbox complet.\n");
+		status = -ETIMEDOUT;
+		goto end;
+	}
+
+	/* Now we can clear the interrupt condition
+	 * and look at our status.
+	 */
+	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+	if (((mbcp->mbox_out[0] & 0x0000f000) !=
+					MB_CMD_STS_GOOD) &&
+		((mbcp->mbox_out[0] & 0x0000f000) !=
+					MB_CMD_STS_INTRMDT)) {
+		ql_display_mb_sts(qdev, mbcp);
+		status = -EIO;
+	}
+end:
+	mutex_unlock(&qdev->mpi_mutex);
+	/* End polled mode for MPI */
+	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+	return status;
+}
+
+int ql_mb_sys_err(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 0;
+
+	mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_about_fw(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status = 0;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 3;
+
+	mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed about firmware command\n");
+		status = -EIO;
+	}
+
+	/* Store the firmware version */
+	qdev->fw_rev_id = mbcp->mbox_out[1];
+
+	return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_get_fw_state(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status = 0;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 2;
+
+	mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Get Firmware State.\n");
+		status = -EIO;
+	}
+
+	/* If bit zero is set in mbx 1 then the firmware is
+	 * running, but not initialized.  This should never
+	 * happen.
+	 */
+	if (mbcp->mbox_out[1] & 1) {
+		QPRINTK(qdev, DRV, ERR,
+			"Firmware waiting for initialization.\n");
+		status = -EIO;
+	}
+
+	return status;
+}
+
+/* Send and ACK mailbox command to the firmware to
+ * let it continue with the change.
+ */
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status = 0;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 5;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
+	mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
+	mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
+	mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
+	mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed IDC ACK send.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status = 0;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 3;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
+	mbcp->mbox_in[1] = qdev->link_config;
+	mbcp->mbox_in[2] = qdev->max_frame_size;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
+		QPRINTK(qdev, DRV, ERR,
+			"Port Config sent, wait for IDC.\n");
+	} else	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Set Port Configuration.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+	u32 size)
+{
+	int status = 0;
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 9;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+	mbcp->mbox_in[1] = LSW(addr);
+	mbcp->mbox_in[2] = MSW(req_dma);
+	mbcp->mbox_in[3] = LSW(req_dma);
+	mbcp->mbox_in[4] = MSW(size);
+	mbcp->mbox_in[5] = LSW(size);
+	mbcp->mbox_in[6] = MSW(MSD(req_dma));
+	mbcp->mbox_in[7] = LSW(MSD(req_dma));
+	mbcp->mbox_in[8] = MSW(addr);
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Get Port Configuration.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+		u32 ram_addr, int word_count)
+{
+	int status;
+	u64 map;
+
+	map = pci_map_single(qdev->pdev,
+				buf,
+				word_count * sizeof(u32),
+				PCI_DMA_FROMDEVICE);
+	status = pci_dma_mapping_error(map);
+	if (status)
+		goto err;
+
+	status = ql_mb_dump_ram(qdev, map, ram_addr, word_count);
+	pci_unmap_single(qdev->pdev, map, word_count * sizeof(u32),
+				PCI_DMA_FROMDEVICE);
+err:
+	return status;
+}
+
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status = 0;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 3;
+
+	mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed Get Port Configuration.\n");
+		status = -EIO;
+	} else	{
+		QPRINTK(qdev, DRV, ERR,
+			"Passed Get Port Configuration.\n");
+		qdev->link_config = mbcp->mbox_out[1];
+		qdev->max_frame_size = mbcp->mbox_out[2];
+	}
+	return status;
+}
+
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+	mbcp->mbox_in[1] = wol;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+	u8 *addr = qdev->ndev->dev_addr;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 8;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+	if (enable_wol) {
+		mbcp->mbox_in[1] = (u32)addr[0];
+		mbcp->mbox_in[2] = (u32)addr[1];
+		mbcp->mbox_in[3] = (u32)addr[2];
+		mbcp->mbox_in[4] = (u32)addr[3];
+		mbcp->mbox_in[5] = (u32)addr[4];
+		mbcp->mbox_in[6] = (u32)addr[5];
+		mbcp->mbox_in[7] = 0;
+	} else {
+		mbcp->mbox_in[1] = 0;
+		mbcp->mbox_in[2] = 1;
+		mbcp->mbox_in[3] = 1;
+		mbcp->mbox_in[4] = 1;
+		mbcp->mbox_in[5] = 1;
+		mbcp->mbox_in[6] = 1;
+		mbcp->mbox_in[7] = 0;
+	}
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set WOL mode.\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 2;
+	mbcp->out_count = 1;
+
+	mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+	mbcp->mbox_in[1] = led_config;
+
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to set LED Configuration.\n");
+		status = -EIO;
+	}
+
+	return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int status;
+
+	memset(mbcp, 0, sizeof(struct mbox_params));
+
+	mbcp->in_count = 1;
+	mbcp->out_count = 2;
+
+	mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+	status = ql_mailbox_command(qdev, mbcp);
+	if (status)
+		return status;
+
+	if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+		QPRINTK(qdev, DRV, ERR,
+			"Failed to get LED Configuration.\n");
+		status = -EIO;
+	} else
+		qdev->led_config = mbcp->mbox_out[1];
+
+	return status;
+}
+/* API called in work thread context to set new TX/RX
+ * maximum frame size values to match MTU.
+ */
+static int ql_set_port_cfg(struct ql_adapter *qdev)
+{
+	int status;
+
+	status = ql_mb_set_port_cfg(qdev);
+	if (status)
+		return status;
+	return status;
+}
+
+/* The following routines are worker threads that process
+ * events that may sleep waiting for completion.
+ */
+
+/* This thread gets the maximum TX and RX frame size values
+ * from the firmware and, if necessary, changes them to match
+ * the MTU setting.
+ */
+void ql_mpi_port_cfg_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+		container_of(work, struct ql_adapter, mpi_port_cfg_work);
+	int status;
+
+	status = ql_mb_get_port_cfg(qdev);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Bug: Failed to get port config data.\n");
+		goto err;
+	}
+	if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+			qdev->max_frame_size ==
+			CFG_DEFAULT_MAX_FRAME_SIZE)
+		goto end;
+
+	qdev->link_config |=	CFG_JUMBO_FRAME_SIZE;
+	qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
+	status = ql_set_port_cfg(qdev);
+	if (status) {
+		QPRINTK(qdev, DRV, ERR,
+			"Bug: Failed to set port config data.\n");
+		goto err;
+	}
+end:
+	clear_bit(QL_PORT_CFG, &qdev->flags);
+	return;
+err:
+	ql_queue_fw_error(qdev);
+	goto end;
+}
+
+/* Process an inter-device request.  This is issues by
+ * the firmware in response to another function requesting
+ * a change to the port. We set a flag to indicate a change
+ * has been made and then send a mailbox command ACKing
+ * the change request.
+ */
+void ql_mpi_idc_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+		container_of(work, struct ql_adapter, mpi_idc_work);
+	int status;
+	struct mbox_params *mbcp = &qdev->idc_mbc;
+	u32 aen;
+
+	aen = mbcp->mbox_out[1] >> 16;
+
+	switch (aen) {
+	default:
+		QPRINTK(qdev, DRV, ERR,
+			"Bug: Unhandled IDC action.\n");
+		break;
+	case MB_CMD_PORT_RESET:
+	case MB_CMD_SET_PORT_CFG:
+	case MB_CMD_STOP_FW:
+		ql_link_off(qdev);
+		/* Signal the resulting link up AEN
+		 * that the frame routing and mac addr
+		 * needs to be set.
+		 */
+		set_bit(QL_CAM_RT_SET, &qdev->flags);
+		status = ql_mb_idc_ack(qdev);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+				"Bug: No pending IDC!\n");
+		}
+		break;
+	/* These sub-commands issued by another (FCoE)
+	 * function are requesting to do an operation
+	 * on the shared resource (MPI environment).
+	 * We currently don't issue these so we just
+	 * ACK the request.
+	 * See IDC document FcoeIdcv0_5.doc
+	 */
+	case MB_CMD_IOP_DVR_START:
+	case MB_CMD_IOP_FLASH_ACC:
+	case MB_CMD_IOP_CORE_DUMP_MPI:
+	case MB_CMD_IOP_PREP_UPDATE_MPI:
+	case MB_CMD_IOP_COMP_UPDATE_MPI:
+	case MB_CMD_IOP_PREP_LINK_DOWN:
+	case MB_CMD_IOP_RESTART_MPI:
+		/* Do the minimum by acknowledging receipt
+		 * of these IDC notifications. The ACK below
+		 * sends back the same values we received
+		 * in mailbox regs 1-4.
+		 */
+		qdev->idc_mbc.mbox_out[1] = mbcp->mbox_out[1];
+		qdev->idc_mbc.mbox_out[2] = mbcp->mbox_out[2];
+		qdev->idc_mbc.mbox_out[3] = mbcp->mbox_out[3];
+		qdev->idc_mbc.mbox_out[4] = mbcp->mbox_out[4];
+		if ((aen == MB_CMD_IOP_RESTART_MPI) ||
+				(aen == MB_CMD_IOP_PREP_LINK_DOWN)) {
+			/* Drop the link, reload the routing
+			 * table when link comes up.
+			 */
+			ql_link_off(qdev);
+			set_bit(QL_CAM_RT_SET, &qdev->flags);
+		}
+		status = ql_mb_idc_ack(qdev);
+		if (status) {
+			QPRINTK(qdev, DRV, ERR,
+	"Bug: No pending IDC!\n");
+		}
+		break;
+	}
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+		container_of(work, struct ql_adapter, mpi_work);
+	struct mbox_params mbc;
+	struct mbox_params *mbcp = &mbc;
+	int err = 0;
+
+	mutex_lock(&qdev->mpi_mutex);
+	while (ql_read32(qdev, STS) & STS_PI) {
+		memset(mbcp, 0, sizeof(struct mbox_params));
+		mbcp->out_count = 1;
+		err = ql_mpi_handler(qdev, mbcp);
+		if (err)
+			break;
+	}
+
+	mutex_unlock(&qdev->mpi_mutex);
+	ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+	struct ql_adapter *qdev =
+		container_of(work, struct ql_adapter, mpi_reset_work);
+	cancel_delayed_work_sync(&qdev->mpi_work);
+	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+	cancel_delayed_work_sync(&qdev->mpi_idc_work);
+
+	/* If we're not the dominate NIC function,
+	 * then there is nothing to do.
+	 */
+	if (!ql_own_firmware(qdev))
+		return;
+
+	/* If the dump completed successfully we
+	 * signal ethtool that there is information
+	 * available.
+	 */
+	if (!ql_core_dump(qdev, qdev->mpi_coredump))
+		qdev->core_is_dumped = 1;
+	ql_soft_reset_mpi_risc(qdev);
+}