Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2834

kernel-2.6.18-238.el5.src.rpm

From: Andy Gospodarek <gospo@redhat.com>
Date: Wed, 19 Dec 2007 10:14:16 -0500
Subject: [net] ixgbe: support for new Intel 10GbE Hardware
Message-id: 20071219151416.GK28834@gospo.usersys.redhat.com
O-Subject: [RHEL5.2 PATCH] ixgbe: add driver to support for new Intel 10GbE Hardware
Bugzilla: 252005

This patch adds support for the ixgbe driver to RHEL5.  It was recently
added upstream (Sept 2007) and several partners will be shipping this
hardware during the next update cycle.

Some testing has been done in-house and others partners have reported
success with this driver as well.

This will resolve BZ 252005.

This patch also requires CONFIG_IXGBE=m to be added to config-generic in
CVS.

Acked-by: Jeff Garzik <jgarzik@redhat.com>

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 76ccb6d..8badc78 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2399,6 +2399,28 @@ config EHEA
 	  To compile the driver as a module, choose M here. The module
 	  will be called ehea.
 
+config IXGBE
+	tristate "Intel(R) 10GbE PCI Express adapters support"
+	depends on PCI
+	---help---
+	  This driver supports Intel(R) 10GbE PCI Express family of
+	  adapters.  For more information on how to identify your adapter, go
+	  to the Adapter & Driver ID Guide at:
+
+	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+	  For general information and support, go to the Intel support
+	  website at:
+
+	  <http://support.intel.com>
+
+	  More specific information on configuring the driver is in
+	  <file:Documentation/networking/ixgbe.txt>.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module
+	  will be called ixgbe.
+
 config IXGB
 	tristate "Intel(R) PRO/10GbE support"
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b03e9e4..4c49abf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_E1000E) += e1000e/
 obj-$(CONFIG_IGB) += igb/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
+obj-$(CONFIG_IXGBE) += ixgbe/
 obj-$(CONFIG_IXGB) += ixgb/
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
new file mode 100644
index 0000000..ccd83d9
--- /dev/null
+++ b/drivers/net/ixgbe/Makefile
@@ -0,0 +1,36 @@
+################################################################################
+#
+# Intel 10 Gigabit PCI Express Linux driver
+# Copyright(c) 1999 - 2007 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_IXGBE) += ixgbe.o
+
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+              ixgbe_82598.o ixgbe_phy.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
new file mode 100644
index 0000000..e141b4f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -0,0 +1,255 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_H_
+#define _IXGBE_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_compat.h"
+
+
+#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
+
+#define PFX "ixgbe: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args)))
+
+/* TX/RX descriptor defines */
+#define IXGBE_DEFAULT_TXD		   1024
+#define IXGBE_MAX_TXD			   4096
+#define IXGBE_MIN_TXD			     64
+
+#define IXGBE_DEFAULT_RXD		   1024
+#define IXGBE_MAX_RXD			   4096
+#define IXGBE_MIN_RXD			     64
+
+#define IXGBE_DEFAULT_RXQ			   1
+#define IXGBE_MAX_RXQ				   1
+#define IXGBE_MIN_RXQ				   1
+
+#define IXGBE_DEFAULT_ITR_RX_USECS	    125  /*   8k irqs/sec */
+#define IXGBE_DEFAULT_ITR_TX_USECS	    250  /*   4k irqs/sec */
+#define IXGBE_MIN_ITR_USECS		    100  /* 500k irqs/sec */
+#define IXGBE_MAX_ITR_USECS		  10000  /* 100  irqs/sec */
+
+/* flow control */
+#define IXGBE_DEFAULT_FCRTL		0x10000
+#define IXGBE_MIN_FCRTL			      0
+#define IXGBE_MAX_FCRTL			0x7FF80
+#define IXGBE_DEFAULT_FCRTH		0x20000
+#define IXGBE_MIN_FCRTH			      0
+#define IXGBE_MAX_FCRTH			0x7FFF0
+#define IXGBE_DEFAULT_FCPAUSE		 0x6800  /* may be too long */
+#define IXGBE_MIN_FCPAUSE		      0
+#define IXGBE_MAX_FCPAUSE		 0xFFFF
+
+/* Supported Rx Buffer Sizes */
+#define IXGBE_RXBUFFER_64    64     /* Used for packet split */
+#define IXGBE_RXBUFFER_128   128    /* Used for packet split */
+#define IXGBE_RXBUFFER_256   256    /* Used for packet split */
+#define IXGBE_RXBUFFER_2048  2048
+
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+
+/* How many Tx Descriptors do we need to call netif_wake_queue? */
+#define IXGBE_TX_QUEUE_WAKE 16
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define IXGBE_TX_FLAGS_CSUM		(u32)(1)
+#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
+#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbe_tx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+};
+
+struct ixgbe_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+	dma_addr_t page_dma;
+};
+
+struct ixgbe_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct ixgbe_ring {
+	struct ixgbe_adapter *adapter;	/* backlink */
+	void *desc;			/* descriptor ring memory */
+	dma_addr_t dma;			/* phys. address of descriptor ring */
+	unsigned int size;		/* length in bytes */
+	unsigned int count;		/* amount of descriptors */
+	unsigned int next_to_use;
+	unsigned int next_to_clean;
+
+	union {
+		struct ixgbe_tx_buffer *tx_buffer_info;
+		struct ixgbe_rx_buffer *rx_buffer_info;
+	};
+
+	u16 head;
+	u16 tail;
+
+	/* To protect race between sender and clean_tx_irq */
+	spinlock_t tx_lock;
+
+	struct ixgbe_queue_stats stats;
+
+	u32 eims_value;
+	u16 itr_register;
+
+	char name[IFNAMSIZ + 5];
+	u16 work_limit;                /* max work per interrupt */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 0)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \
+	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
+
+/* board specific private data structure */
+struct ixgbe_adapter {
+	struct timer_list watchdog_timer;
+	struct vlan_group *vlgrp;
+	u16 bd_number;
+	u16 rx_buf_len;
+	atomic_t irq_sem;
+	struct work_struct reset_task;
+
+	/* TX */
+	struct ixgbe_ring *tx_ring;	/* One per active queue */
+	u64 restart_queue;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+	bool detect_tx_hung;
+
+	/* RX */
+	struct ixgbe_ring *rx_ring;	/* One per active queue */
+	u64 hw_csum_tx_good;
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+	int num_tx_queues;
+	int num_rx_queues;
+	struct msix_entry *msix_entries;
+
+	u64 rx_hdr_split;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_ENABLED			(u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED		(u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL			(u32)(1 << 4)
+
+	/* Interrupt Throttle Rate */
+	u32 rx_eitr;
+	u32 tx_eitr;
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in ixgbe_hw.h */
+	struct ixgbe_hw hw;
+	u16 msg_enable;
+	struct ixgbe_hw_stats stats;
+	char lsc_name[IFNAMSIZ + 5];
+
+	unsigned long state;
+	u64 tx_busy;
+};
+
+enum ixbge_state_t {
+	__IXGBE_TESTING,
+	__IXGBE_RESETTING,
+	__IXGBE_DOWN
+};
+
+enum ixgbe_boards {
+	board_82598,
+};
+
+extern struct ixgbe_info ixgbe_82598_info;
+
+extern char ixgbe_driver_name[];
+extern const char ixgbe_driver_version[];
+
+extern int ixgbe_up(struct ixgbe_adapter *adapter);
+extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reset(struct ixgbe_adapter *adapter);
+extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+				    struct ixgbe_ring *rxdr);
+extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
+				    struct ixgbe_ring *txdr);
+
+#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
new file mode 100644
index 0000000..6321b05
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -0,0 +1,526 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES   16
+
+static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
+					 bool *autoneg);
+static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
+						u32 *speed, bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
+				      bool *link_up);
+static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+					    bool autoneg,
+					    bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+					       bool autoneg,
+					       bool autoneg_wait_to_complete);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+
+
+static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+{
+	hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES;
+	hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES;
+	hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES;
+
+	/* PHY ops are filled in by default properly for Fiber only */
+	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598;
+		hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598;
+		hw->mac.ops.get_link_settings =
+				&ixgbe_get_copper_link_settings_82598;
+
+		/* Call PHY identify routine to get the phy type */
+		ixgbe_identify_phy(hw);
+
+		switch (hw->phy.type) {
+		case ixgbe_phy_tn:
+			hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
+			hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
+			hw->phy.ops.setup_link_speed =
+					&ixgbe_setup_tnx_phy_link_speed;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_link_settings_82598 - Determines default link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the default link settings by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
+					 bool *autoneg)
+{
+	s32 status = 0;
+	s32 autoc_reg;
+
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	if (hw->mac.link_settings_loaded) {
+		autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
+		autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
+		autoc_reg |= hw->mac.link_attach_type;
+		autoc_reg |= hw->mac.link_mode_select;
+	}
+
+	switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		*autoneg = false;
+		break;
+
+	case IXGBE_AUTOC_LMS_1G_AN:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	case IXGBE_AUTOC_LMS_KX4_AN:
+	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+		if (autoc_reg & IXGBE_AUTOC_KX4_SUPP)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (autoc_reg & IXGBE_AUTOC_KX_SUPP)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		*autoneg = true;
+		break;
+
+	default:
+		status = IXGBE_ERR_LINK_SETUP;
+		break;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_copper_link_settings_82598 - Determines default link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the default link settings by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
+						u32 *speed, bool *autoneg)
+{
+	s32 status = IXGBE_ERR_LINK_SETUP;
+	u16 speed_ability;
+
+	*speed = 0;
+	*autoneg = true;
+
+	status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+				    IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				    &speed_ability);
+
+	if (status == 0) {
+		if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+		    *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+		    *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82598 - Determines media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+	enum ixgbe_media_type media_type;
+
+	/* Media type for I82598 is based on device ID */
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598EB_CX4:
+		media_type = ixgbe_media_type_fiber;
+		break;
+	case IXGBE_DEV_ID_82598AT_DUAL_PORT:
+		media_type = ixgbe_media_type_copper;
+		break;
+	default:
+		media_type = ixgbe_media_type_unknown;
+		break;
+	}
+
+	return media_type;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82598 - Configures MAC link settings
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
+{
+	u32 autoc_reg;
+	u32 links_reg;
+	u32 i;
+	s32 status = 0;
+
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	if (hw->mac.link_settings_loaded) {
+		autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE;
+		autoc_reg &= ~IXGBE_AUTOC_LMS_MASK;
+		autoc_reg |= hw->mac.link_attach_type;
+		autoc_reg |= hw->mac.link_mode_select;
+
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+		IXGBE_WRITE_FLUSH(hw);
+		msleep(50);
+	}
+
+	/* Restart link */
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+	/* Only poll for autoneg to complete if specified to do so */
+	if (hw->phy.autoneg_wait_to_complete) {
+		if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN ||
+		    hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+			links_reg = 0; /* Just in case Autoneg time = 0 */
+			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+					break;
+				msleep(100);
+			}
+			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+				hw_dbg(hw,
+				       "Autonegotiation did not complete.\n");
+			}
+		}
+	}
+
+	/*
+	 * We want to save off the original Flow Control configuration just in
+	 * case we get disconnected and then reconnected into a different hub
+	 * or switch with different Flow Control capabilities.
+	 */
+	hw->fc.type = hw->fc.original_type;
+	ixgbe_setup_fc(hw, 0);
+
+	/* Add delay to filter out noises during initial link setup */
+	msleep(50);
+
+	return status;
+}
+
+/**
+ *  ixgbe_check_mac_link_82598 - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
+				      bool *link_up)
+{
+	u32 links_reg;
+
+	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+	if (links_reg & IXGBE_LINKS_UP)
+		*link_up = true;
+	else
+		*link_up = false;
+
+	if (links_reg & IXGBE_LINKS_SPEED)
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if auto-negotiation enabled
+ *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+					    u32 speed, bool autoneg,
+					    bool autoneg_wait_to_complete)
+{
+	s32 status = 0;
+
+	/* If speed is 10G, then check for CX4 or XAUI. */
+	if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+	    (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4)))
+		hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
+	else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg))
+		hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+	else if (autoneg) {
+		/* BX mode - Autonegotiate 1G */
+		if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
+			hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
+		else /* KX/KX4 mode */
+			hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN;
+	} else {
+		status = IXGBE_ERR_LINK_SETUP;
+	}
+
+	if (status == 0) {
+		hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
+
+		hw->mac.link_settings_loaded = true;
+		/*
+		 * Setup and restart the link based on the new values in
+		 * ixgbe_hw This will write the AUTOC register based on the new
+		 * stored values
+		 */
+		hw->mac.ops.setup_link(hw);
+	}
+
+	return status;
+}
+
+
+/**
+ *  ixgbe_setup_copper_link_82598 - Setup copper link settings
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.  Restart
+ *  phy and wait for autonegotiate to finish.  Then synchronize the
+ *  MAC and PHY.
+ **/
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+
+	/* Restart autonegotiation on PHY */
+	if (hw->phy.ops.setup_link)
+		status = hw->phy.ops.setup_link(hw);
+
+	/* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */
+	hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+	/* Set up MAC */
+	hw->mac.ops.setup_link(hw);
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ *  Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
+					       bool autoneg,
+					       bool autoneg_wait_to_complete)
+{
+	s32 status = 0;
+
+	/* Setup the PHY according to input speed */
+	if (hw->phy.ops.setup_link_speed)
+		status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+						autoneg_wait_to_complete);
+
+	/* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
+	hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
+	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
+
+	/* Set up MAC */
+	hw->mac.ops.setup_link(hw);
+
+	return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82598 - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ *  reset.
+ **/
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+	u32 ctrl;
+	u32 gheccr;
+	u32 i;
+	u32 autoc;
+	u8  analog_val;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	ixgbe_stop_adapter(hw);
+
+	/*
+	 * Power up the Atlas TX lanes if they are currently powered down.
+	 * Atlas TX lanes are powered down for MAC loopback tests, but
+	 * they are not automatically restored on reset.
+	 */
+	ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+		/* Enable TX Atlas so packets can be transmitted again */
+		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val);
+
+		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val);
+
+		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val);
+
+		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val);
+		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val);
+	}
+
+	/* Reset PHY */
+	ixgbe_reset_phy(hw);
+
+	/*
+	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+	 * access and verify no pending requests before reset
+	 */
+	if (ixgbe_disable_pcie_master(hw) != 0) {
+		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+	}
+
+	/*
+	 * Issue global reset to the MAC.  This needs to be a SW reset.
+	 * If link reset is used, it might reset the MAC when mng is using it
+	 */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear indicating reset is complete */
+	for (i = 0; i < 10; i++) {
+		udelay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST))
+			break;
+	}
+	if (ctrl & IXGBE_CTRL_RST) {
+		status = IXGBE_ERR_RESET_FAILED;
+		hw_dbg(hw, "Reset polling failed to complete.\n");
+	}
+
+	msleep(50);
+
+	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+	/*
+	 * AUTOC register which stores link settings gets cleared
+	 * and reloaded from EEPROM after reset. We need to restore
+	 * our stored value from init in case SW changed the attach
+	 * type or speed.  If this is the first time and link settings
+	 * have not been stored, store default settings from AUTOC.
+	 */
+	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	if (hw->mac.link_settings_loaded) {
+		autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE);
+		autoc &= ~(IXGBE_AUTOC_LMS_MASK);
+		autoc |= hw->mac.link_attach_type;
+		autoc |= hw->mac.link_mode_select;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+	} else {
+		hw->mac.link_attach_type =
+					 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
+		hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
+		hw->mac.link_settings_loaded = true;
+	}
+
+	/* Store the permanent mac address */
+	ixgbe_get_mac_addr(hw, hw->mac.perm_addr);
+
+	return status;
+}
+
+static struct ixgbe_mac_operations mac_ops_82598 = {
+	.reset			= &ixgbe_reset_hw_82598,
+	.get_media_type		= &ixgbe_get_media_type_82598,
+	.setup_link		= &ixgbe_setup_mac_link_82598,
+	.check_link		= &ixgbe_check_mac_link_82598,
+	.setup_link_speed	= &ixgbe_setup_mac_link_speed_82598,
+	.get_link_settings	= &ixgbe_get_link_settings_82598,
+};
+
+struct ixgbe_info ixgbe_82598_info = {
+	.mac			= ixgbe_mac_82598EB,
+	.get_invariants		= &ixgbe_get_invariants_82598,
+	.mac_ops		= &mac_ops_82598,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
new file mode 100644
index 0000000..7fd6aeb
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -0,0 +1,1179 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+
+static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
+
+static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+
+/**
+ *  ixgbe_start_hw - Prepare hardware for TX/RX
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+	u32 ctrl_ext;
+
+	/* Set the media type */
+	hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+	/* Identify the PHY */
+	ixgbe_identify_phy(hw);
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table
+	 */
+	ixgbe_init_rx_addrs(hw);
+
+	/* Clear the VLAN filter table */
+	ixgbe_clear_vfta(hw);
+
+	/* Set up link */
+	hw->mac.ops.setup_link(hw);
+
+	/* Clear statistics registers */
+	ixgbe_clear_hw_cntrs(hw);
+
+	/* Set No Snoop Disable */
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_init_hw - Generic hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by reseting the hardware, filling the bus info
+ *  structure and media type, clears all on chip counters, initializes receive
+ *  address registers, multicast table, VLAN filter table, calls routine to set
+ *  up link and flow control settings, and leaves transmit and receive units
+ *  disabled and uninitialized
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+	/* Reset the hardware */
+	hw->mac.ops.reset(hw);
+
+	/* Start the HW */
+	ixgbe_start_hw(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+	u16 i = 0;
+
+	IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+	IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+	IXGBE_READ_REG(hw, IXGBE_ERRBC);
+	IXGBE_READ_REG(hw, IXGBE_MSPDC);
+	for (i = 0; i < 8; i++)
+		IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+	IXGBE_READ_REG(hw, IXGBE_MLFC);
+	IXGBE_READ_REG(hw, IXGBE_MRFC);
+	IXGBE_READ_REG(hw, IXGBE_RLEC);
+	IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+	IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+	IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+	IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+
+	for (i = 0; i < 8; i++) {
+		IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+		IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+		IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+		IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+	}
+
+	IXGBE_READ_REG(hw, IXGBE_PRC64);
+	IXGBE_READ_REG(hw, IXGBE_PRC127);
+	IXGBE_READ_REG(hw, IXGBE_PRC255);
+	IXGBE_READ_REG(hw, IXGBE_PRC511);
+	IXGBE_READ_REG(hw, IXGBE_PRC1023);
+	IXGBE_READ_REG(hw, IXGBE_PRC1522);
+	IXGBE_READ_REG(hw, IXGBE_GPRC);
+	IXGBE_READ_REG(hw, IXGBE_BPRC);
+	IXGBE_READ_REG(hw, IXGBE_MPRC);
+	IXGBE_READ_REG(hw, IXGBE_GPTC);
+	IXGBE_READ_REG(hw, IXGBE_GORCL);
+	IXGBE_READ_REG(hw, IXGBE_GORCH);
+	IXGBE_READ_REG(hw, IXGBE_GOTCL);
+	IXGBE_READ_REG(hw, IXGBE_GOTCH);
+	for (i = 0; i < 8; i++)
+		IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+	IXGBE_READ_REG(hw, IXGBE_RUC);
+	IXGBE_READ_REG(hw, IXGBE_RFC);
+	IXGBE_READ_REG(hw, IXGBE_ROC);
+	IXGBE_READ_REG(hw, IXGBE_RJC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+	IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+	IXGBE_READ_REG(hw, IXGBE_TORL);
+	IXGBE_READ_REG(hw, IXGBE_TORH);
+	IXGBE_READ_REG(hw, IXGBE_TPR);
+	IXGBE_READ_REG(hw, IXGBE_TPT);
+	IXGBE_READ_REG(hw, IXGBE_PTC64);
+	IXGBE_READ_REG(hw, IXGBE_PTC127);
+	IXGBE_READ_REG(hw, IXGBE_PTC255);
+	IXGBE_READ_REG(hw, IXGBE_PTC511);
+	IXGBE_READ_REG(hw, IXGBE_PTC1023);
+	IXGBE_READ_REG(hw, IXGBE_PTC1522);
+	IXGBE_READ_REG(hw, IXGBE_MPTC);
+	IXGBE_READ_REG(hw, IXGBE_BPTC);
+	for (i = 0; i < 16; i++) {
+		IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+		IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+		IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+		IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_mac_addr - Generic get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ *  A reset of the adapter must be performed prior to calling this function
+ *  in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+	rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+	for (i = 0; i < 4; i++)
+		mac_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < 2; i++)
+		mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	return 0;
+}
+
+s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
+{
+	s32 ret_val;
+	u16 data;
+
+	ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
+	if (ret_val) {
+		hw_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+	*part_num = (u32)(data << 16);
+
+	ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
+	if (ret_val) {
+		hw_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+	*part_num |= data;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_stop_adapter - Generic stop TX/RX units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+	u32 number_of_queues;
+	u32 reg_val;
+	u16 i;
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit */
+	reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	reg_val &= ~(IXGBE_RXCTRL_RXEN);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
+	msleep(2);
+
+	/* Clear interrupt mask to stop from interrupts being generated */
+	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+	/* Clear any pending interrupts */
+	IXGBE_READ_REG(hw, IXGBE_EICR);
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	number_of_queues = hw->mac.num_tx_queues;
+	for (i = 0; i < number_of_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+		if (reg_val & IXGBE_TXDCTL_ENABLE) {
+			reg_val &= ~IXGBE_TXDCTL_ENABLE;
+			IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_led_on - Turns on the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	/* To turn on the LED, set mode to ON. */
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_led_off - Turns off the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	/* To turn off the LED, set mode to OFF. */
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+
+/**
+ *  ixgbe_init_eeprom - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 eec;
+	u16 eeprom_size;
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->type = ixgbe_eeprom_none;
+
+		/*
+		 * Check for EEPROM present first.
+		 * If not present leave as none
+		 */
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		if (eec & IXGBE_EEC_PRES) {
+			eeprom->type = ixgbe_eeprom_spi;
+
+			/*
+			 * SPI EEPROM is assumed here.  This code would need to
+			 * change if a future EEPROM is not SPI.
+			 */
+			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+					    IXGBE_EEC_SIZE_SHIFT);
+			eeprom->word_size = 1 << (eeprom_size +
+						  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+		}
+
+		if (eec & IXGBE_EEC_ADDR_SIZE)
+			eeprom->address_bits = 16;
+		else
+			eeprom->address_bits = 8;
+		hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
+			  "%d\n", eeprom->type, eeprom->word_size,
+			  eeprom->address_bits);
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbe_read_eeprom - Read EEPROM word using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	u32 eerd;
+	s32 status;
+
+	eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
+	       IXGBE_EEPROM_READ_REG_START;
+
+	IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+	status = ixgbe_poll_eeprom_eerd_done(hw);
+
+	if (status == 0)
+		*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+			IXGBE_EEPROM_READ_REG_DATA);
+	else
+		hw_dbg(hw, "Eeprom read timed out\n");
+
+	return status;
+}
+
+/**
+ *  ixgbe_poll_eeprom_eerd_done - Poll EERD status
+ *  @hw: pointer to hardware structure
+ *
+ *  Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ **/
+static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 reg;
+	s32 status = IXGBE_ERR_EEPROM;
+
+	for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+		if (reg & IXGBE_EEPROM_READ_REG_DONE) {
+			status = 0;
+			break;
+		}
+		udelay(5);
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM;
+	u32 timeout;
+	u32 i;
+	u32 swsm;
+
+	/* Set timeout value based on size of EEPROM */
+	timeout = hw->eeprom.word_size + 1;
+
+	/* Get SMBI software semaphore between device drivers first */
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (!(swsm & IXGBE_SWSM_SMBI)) {
+			status = 0;
+			break;
+		}
+		msleep(1);
+	}
+
+	/* Now get the semaphore between SW/FW through the SWESMBI bit */
+	if (status == 0) {
+		for (i = 0; i < timeout; i++) {
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+			/* Set the SW EEPROM semaphore bit to request access */
+			swsm |= IXGBE_SWSM_SWESMBI;
+			IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+			/*
+			 * If we set the bit successfully then we got the
+			 * semaphore.
+			 */
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+			if (swsm & IXGBE_SWSM_SWESMBI)
+				break;
+
+			udelay(50);
+		}
+
+		/*
+		 * Release semaphores and return error if SW EEPROM semaphore
+		 * was not granted because we don't have access to the EEPROM
+		 */
+		if (i >= timeout) {
+			hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
+				 "not granted.\n");
+			ixgbe_release_eeprom_semaphore(hw);
+			status = IXGBE_ERR_EEPROM;
+		}
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+	u32 swsm;
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+	/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+	swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+{
+	u16 i;
+	u16 j;
+	u16 checksum = 0;
+	u16 length = 0;
+	u16 pointer = 0;
+	u16 word = 0;
+
+	/* Include 0x0-0x3F in the checksum */
+	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+		if (ixgbe_read_eeprom(hw, i, &word) != 0) {
+			hw_dbg(hw, "EEPROM read failed\n");
+			break;
+		}
+		checksum += word;
+	}
+
+	/* Include all data from pointers except for the fw pointer */
+	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+		ixgbe_read_eeprom(hw, i, &pointer);
+
+		/* Make sure the pointer seems valid */
+		if (pointer != 0xFFFF && pointer != 0) {
+			ixgbe_read_eeprom(hw, pointer, &length);
+
+			if (length != 0xFFFF && length != 0) {
+				for (j = pointer+1; j <= pointer+length; j++) {
+					ixgbe_read_eeprom(hw, j, &word);
+					checksum += word;
+				}
+			}
+		}
+	}
+
+	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+	return checksum;
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+	s32 status;
+	u16 checksum;
+	u16 read_checksum = 0;
+
+	/*
+	 * Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	status = ixgbe_read_eeprom(hw, 0, &checksum);
+
+	if (status == 0) {
+		checksum = ixgbe_calc_eeprom_checksum(hw);
+
+		ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+		/*
+		 * Verify read checksum from EEPROM is the same as
+		 * calculated checksum
+		 */
+		if (read_checksum != checksum)
+			status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+		/* If the user cares, return the calculated checksum */
+		if (checksum_val)
+			*checksum_val = checksum;
+	} else {
+		hw_dbg(hw, "EEPROM read failed\n");
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_validate_mac_addr - Validate MAC address
+ *  @mac_addr: pointer to MAC address.
+ *
+ *  Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+	s32 status = 0;
+
+	/* Make sure it is not a multicast address */
+	if (IXGBE_IS_MULTICAST(mac_addr))
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Not a broadcast address */
+	else if (IXGBE_IS_BROADCAST(mac_addr))
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+	/* Reject the zero address */
+	else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+		 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+		status = IXGBE_ERR_INVALID_MAC_ADDR;
+
+	return status;
+}
+
+/**
+ *  ixgbe_set_rar - Set RX address register
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @index: Receive address register to write
+ *  @vind: Vind to set RAR to
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
+		  u32 enable_addr)
+{
+	u32 rar_low, rar_high;
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order from
+	 * network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+
+	rar_high = ((u32)addr[4] |
+		    ((u32)addr[5] << 8) |
+		    ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK));
+
+	if (enable_addr != 0)
+		rar_high |= IXGBE_RAH_AV;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+	IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_init_rx_addrs - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive addresss registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 rar_entries = hw->mac.num_rx_addrs;
+
+	/*
+	 * If the current mac address is valid, assume it is a software override
+	 * to the permanent address.
+	 * Otherwise, use the permanent address from the eeprom.
+	 */
+	if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+	    IXGBE_ERR_INVALID_MAC_ADDR) {
+		/* Get the MAC address from the RAR0 for later reference */
+		ixgbe_get_mac_addr(hw, hw->mac.addr);
+
+		hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2]);
+		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+	} else {
+		/* Setup the receive address. */
+		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+		hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
+			  hw->mac.addr[0], hw->mac.addr[1],
+			  hw->mac.addr[2]);
+		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
+			  hw->mac.addr[4], hw->mac.addr[5]);
+
+		ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+	}
+
+	hw->addr_ctrl.rar_used_count = 1;
+
+	/* Zero out the other receive addresses. */
+	hw_dbg(hw, "Clearing RAR[1-15]\n");
+	for (i = 1; i < rar_entries; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+	}
+
+	/* Clear the MTA */
+	hw->addr_ctrl.mc_addr_in_rar_count = 0;
+	hw->addr_ctrl.mta_in_use = 0;
+	IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+	hw_dbg(hw, " Clearing MTA\n");
+	for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initalization
+ *  to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0:	  /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+		break;
+	case 1:	  /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+		break;
+	case 2:	  /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+		break;
+	case 3:	  /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+		break;
+	default:	 /* Invalid mc_filter_type */
+		hw_dbg(hw, "MC filter type param set incorrectly\n");
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  ixgbe_set_mta - Set bit-vector in multicast table
+ *  @hw: pointer to hardware structure
+ *  @hash_value: Multicast address hash value
+ *
+ *  Sets the bit-vector in the multicast table.
+ **/
+static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector;
+	u32 vector_bit;
+	u32 vector_reg;
+	u32 mta_reg;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector = ixgbe_mta_vector(hw, mc_addr);
+	hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
+
+	/*
+	 * The MTA is a register array of 128 32-bit registers. It is treated
+	 * like an array of 4096 bits.  We want to set bit
+	 * BitArray[vector_value]. So we figure out what register the bit is
+	 * in, read it, OR in the new bit, then write back the new value.  The
+	 * register is determined by the upper 7 bits of the vector value and
+	 * the bit within that register are determined by the lower 5 bits of
+	 * the value.
+	 */
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+	mta_reg |= (1 << vector_bit);
+	IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+}
+
+/**
+ *  ixgbe_add_mc_addr - Adds a multicast address.
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: new multicast address
+ *
+ *  Adds it to unused receive address register or to the multicast table.
+ **/
+static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 rar_entries = hw->mac.num_rx_addrs;
+
+	hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
+		  mc_addr[0], mc_addr[1], mc_addr[2],
+		  mc_addr[3], mc_addr[4], mc_addr[5]);
+
+	/*
+	 * Place this multicast address in the RAR if there is room,
+	 * else put it in the MTA
+	 */
+	if (hw->addr_ctrl.rar_used_count < rar_entries) {
+		ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count,
+			      mc_addr, 0, IXGBE_RAH_AV);
+		hw_dbg(hw, "Added a multicast address to RAR[%d]\n",
+			  hw->addr_ctrl.rar_used_count);
+		hw->addr_ctrl.rar_used_count++;
+		hw->addr_ctrl.mc_addr_in_rar_count++;
+	} else {
+		ixgbe_set_mta(hw, mc_addr);
+	}
+
+	hw_dbg(hw, "ixgbe_add_mc_addr Complete\n");
+}
+
+/**
+ *  ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @pad: number of bytes between addresses in the list
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unsed receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, u32 pad)
+{
+	u32 i;
+	u32 rar_entries = hw->mac.num_rx_addrs;
+
+	/*
+	 * Set the new number of MC addresses that we are being requested to
+	 * use.
+	 */
+	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+	hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
+	hw->addr_ctrl.mc_addr_in_rar_count = 0;
+	hw->addr_ctrl.mta_in_use = 0;
+
+	/* Zero out the other receive addresses. */
+	hw_dbg(hw, "Clearing RAR[1-15]\n");
+	for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+	}
+
+	/* Clear the MTA */
+	hw_dbg(hw, " Clearing MTA\n");
+	for (i = 0; i < IXGBE_MC_TBL_SIZE; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+	/* Add the new addresses */
+	for (i = 0; i < mc_addr_count; i++) {
+		hw_dbg(hw, " Adding the multicast addresses:\n");
+		ixgbe_add_mc_addr(hw, mc_addr_list +
+				  (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
+	}
+
+	/* Enable mta */
+	if (hw->addr_ctrl.mta_in_use > 0)
+		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+	hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n");
+	return 0;
+}
+
+/**
+ *  ixgbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+	u32 offset;
+	u32 vlanbyte;
+
+	for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+		for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
+			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+					0);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_set_vfta - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+		   bool vlan_on)
+{
+	u32 VftaIndex;
+	u32 BitOffset;
+	u32 VftaReg;
+	u32 VftaByte;
+
+	/* Determine 32-bit word position in array */
+	VftaIndex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+
+	/* Determine the location of the (VMD) queue index */
+	VftaByte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+	BitOffset = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
+
+	/* Set the nibble for VMD queue index */
+	VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
+	VftaReg &= (~(0x0F << BitOffset));
+	VftaReg |= (vind << BitOffset);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
+
+	/* Determine the location of the bit for this VLAN id */
+	BitOffset = vlan & 0x1F;	   /* lower five bits */
+
+	VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
+	if (vlan_on)
+		/* Turn on this VLAN id */
+		VftaReg |= (1 << BitOffset);
+	else
+		/* Turn off this VLAN id */
+		VftaReg &= ~(1 << BitOffset);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_setup_fc - Configure flow control settings
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Configures the flow control settings based on SW configuration.
+ *  This function is used for 802.3x flow control configuration only.
+ **/
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+	u32 frctl_reg;
+	u32 rmcs_reg;
+
+	if (packetbuf_num < 0 || packetbuf_num > 7)
+		hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
+		       "is 0-7\n", packetbuf_num);
+
+	frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+	/*
+	 * We want to save off the original Flow Control configuration just in
+	 * case we get disconnected and then reconnected into a different hub
+	 * or switch with different Flow Control capabilities.
+	 */
+	hw->fc.type = hw->fc.original_type;
+
+	/*
+	 * The possible values of the "flow_control" parameter are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames but not
+	 *    send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but we do not
+	 *    support receiving pause frames)
+	 * 3: Both Rx and TX flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.type) {
+	case ixgbe_fc_none:
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * RX Flow control is enabled,
+		 * and TX Flow control is disabled.
+		 */
+		frctl_reg |= IXGBE_FCTRL_RFCE;
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * TX Flow control is enabled, and RX Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	case ixgbe_fc_full:
+		/*
+		 * Flow control (both RX and TX) is enabled by a software
+		 * over-ride.
+		 */
+		frctl_reg |= IXGBE_FCTRL_RFCE;
+		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+		break;
+	default:
+		/* We should never get here.  The value should be 0-3. */
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		break;
+	}
+
+	/* Enable 802.3x based flow control settings. */
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+	/*
+	 * We need to set up the Receive Threshold high and low water
+	 * marks as well as (optionally) enabling the transmission of
+	 * XON frames.
+	 */
+	if (hw->fc.type & ixgbe_fc_tx_pause) {
+		if (hw->fc.send_xon) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+					(hw->fc.low_water | IXGBE_FCRTL_XONE));
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
+					hw->fc.low_water);
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
+				(hw->fc.high_water)|IXGBE_FCRTH_FCEN);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+	return 0;
+}
+
+/**
+ *  ixgbe_disable_pcie_master - Disable PCI-express master access
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ *  bit hasn't caused the master requests to be disabled, else 0
+ *  is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+	u32 ctrl;
+	s32 i;
+	s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+	ctrl |= IXGBE_CTRL_GIO_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+
+	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+		if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
+			status = 0;
+			break;
+		}
+		udelay(100);
+	}
+
+	return status;
+}
+
+
+/**
+ *  ixgbe_acquire_swfw_sync - Aquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify wich semaphore to acquire
+ *
+ *  Aquires the SWFW semaphore throught the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+	u32 gssr;
+	u32 swmask = mask;
+	u32 fwmask = mask << 5;
+	s32 timeout = 200;
+
+	while (timeout) {
+		if (ixgbe_get_eeprom_semaphore(hw))
+			return -IXGBE_ERR_SWFW_SYNC;
+
+		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+		if (!(gssr & (fwmask | swmask)))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask) or other software
+		 * thread currently using resource (swmask)
+		 */
+		ixgbe_release_eeprom_semaphore(hw);
+		msleep(5);
+		timeout--;
+	}
+
+	if (!timeout) {
+		hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
+		return -IXGBE_ERR_SWFW_SYNC;
+	}
+
+	gssr |= swmask;
+	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+	ixgbe_release_eeprom_semaphore(hw);
+	return 0;
+}
+
+/**
+ *  ixgbe_release_swfw_sync - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify wich semaphore to release
+ *
+ *  Releases the SWFW semaphore throught the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+	u32 gssr;
+	u32 swmask = mask;
+
+	ixgbe_get_eeprom_semaphore(hw);
+
+	gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+	gssr &= ~swmask;
+	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+	ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ *  ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+	u32  atlas_ctl;
+
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(10);
+	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+	*val = (u8)atlas_ctl;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+	u32  atlas_ctl;
+
+	atlas_ctl = (reg << 8) | val;
+	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+	IXGBE_WRITE_FLUSH(hw);
+	udelay(10);
+
+	return 0;
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
new file mode 100644
index 0000000..de6ddd5
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num);
+
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
+		  u32 enable_addr);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+			      u32 mc_addr_count, u32 pad);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+
+s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
+
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
+    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
+    readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#ifdef DEBUG
+#define hw_dbg(hw, format, arg...) \
+printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg);
+#else
+static inline int __attribute__ ((format (printf, 2, 3)))
+hw_dbg(struct ixgbe_hw *hw, const char *format, ...)
+{
+	return 0;
+}
+#endif
+
+#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_compat.h b/drivers/net/ixgbe/ixgbe_compat.h
new file mode 100644
index 0000000..b488c10
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_compat.h
@@ -0,0 +1,20 @@
+#ifndef __IXGBE_COMPAT_H__
+#define __IXGBE_COMPAT_H__
+
+#include <linux/if_vlan.h>
+
+#define ETH_FCS_LEN               4
+
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+						       int vlan_id)
+{
+	return vg->vlan_devices[vlan_id];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+					 struct net_device *dev)
+{
+	vg->vlan_devices[vlan_id] = NULL;
+}
+
+#endif 
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
new file mode 100644
index 0000000..ccd7f11
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -0,0 +1,943 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbe */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+
+#include "ixgbe.h"
+
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+struct ixgbe_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
+		      offsetof(struct ixgbe_adapter, m)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+	{"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
+	{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
+	{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
+	{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+	{"lsc_int", IXGBE_STAT(lsc_int)},
+	{"tx_busy", IXGBE_STAT(tx_busy)},
+	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
+	{"rx_errors", IXGBE_STAT(net_stats.rx_errors)},
+	{"tx_errors", IXGBE_STAT(net_stats.tx_errors)},
+	{"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)},
+	{"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)},
+	{"multicast", IXGBE_STAT(net_stats.multicast)},
+	{"broadcast", IXGBE_STAT(stats.bprc)},
+	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
+	{"collisions", IXGBE_STAT(net_stats.collisions)},
+	{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
+	{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
+	{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
+	{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
+	{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
+	{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
+	{"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)},
+	{"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)},
+	{"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)},
+	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
+	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
+	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
+	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
+	{"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)},
+	{"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)},
+	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
+	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
+	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
+	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
+	{"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)},
+	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
+	{"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)},
+	{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
+	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
+	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN \
+		((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
+		 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
+		 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+
+static int ixgbe_get_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+	ecmd->port = PORT_FIBRE;
+	ecmd->transceiver = XCVR_EXTERNAL;
+
+	if (netif_carrier_ok(adapter->netdev)) {
+		ecmd->speed = SPEED_10000;
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = AUTONEG_DISABLE;
+	return 0;
+}
+
+static int ixgbe_set_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE ||
+	    ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
+		return -EINVAL;
+
+	if (netif_running(adapter->netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_reset(adapter);
+		ixgbe_up(adapter);
+	} else {
+		ixgbe_reset(adapter);
+	}
+
+	return 0;
+}
+
+static void ixgbe_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	pause->autoneg = AUTONEG_DISABLE;
+
+	if (hw->fc.type == ixgbe_fc_rx_pause) {
+		pause->rx_pause = 1;
+	} else if (hw->fc.type == ixgbe_fc_tx_pause) {
+		pause->tx_pause = 1;
+	} else if (hw->fc.type == ixgbe_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int ixgbe_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	if (pause->autoneg == AUTONEG_ENABLE)
+		return -EINVAL;
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->fc.type = ixgbe_fc_full;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = ixgbe_fc_rx_pause;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->fc.type = ixgbe_fc_tx_pause;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = ixgbe_fc_none;
+
+	hw->fc.original_type = hw->fc.type;
+
+	if (netif_running(adapter->netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_up(adapter);
+	} else {
+		ixgbe_reset(adapter);
+	}
+
+	return 0;
+}
+
+static u32 ixgbe_get_rx_csum(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
+}
+
+static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	if (data)
+		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+	else
+		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_up(adapter);
+	} else {
+		ixgbe_reset(adapter);
+	}
+
+	return 0;
+}
+
+static u32 ixgbe_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int ixgbe_set_tso(struct net_device *netdev, u32 data)
+{
+
+	if (data) {
+		netdev->features |= NETIF_F_TSO;
+		netdev->features |= NETIF_F_TSO6;
+	} else {
+		netdev->features &= ~NETIF_F_TSO;
+		netdev->features &= ~NETIF_F_TSO6;
+	}
+	return 0;
+}
+
+static u32 ixgbe_get_msglevel(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int ixgbe_get_regs_len(struct net_device *netdev)
+{
+#define IXGBE_REGS_LEN  1128
+	return IXGBE_REGS_LEN * sizeof(u32);
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
+
+static void ixgbe_get_regs(struct net_device *netdev,
+			   struct ethtool_regs *regs, void *p)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u8 i;
+
+	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+	/* General Registers */
+	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
+	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
+	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
+	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
+	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
+	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
+
+	/* NVM Register */
+	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
+	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
+	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
+	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
+	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
+	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
+	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
+	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+
+	/* Interrupt */
+	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR);
+	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
+	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
+	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
+	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
+	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
+	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
+	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
+	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
+	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
+	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
+	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+	/* Flow Control */
+	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
+	regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
+	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
+	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
+	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+	for (i = 0; i < 8; i++)
+		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
+	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
+
+	/* Receive DMA */
+	for (i = 0; i < 64; i++)
+		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+	for (i = 0; i < 64; i++)
+		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+	for (i = 0; i < 64; i++)
+		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+	for (i = 0; i < 64; i++)
+		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+	for (i = 0; i < 64; i++)
+		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+	for (i = 0; i < 64; i++)
+		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+	for (i = 0; i < 16; i++)
+		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+	for (i = 0; i < 16; i++)
+		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	for (i = 0; i < 8; i++)
+		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
+
+	/* Receive */
+	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+	for (i = 0; i < 16; i++)
+		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+	for (i = 0; i < 16; i++)
+		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
+	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
+	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+	for (i = 0; i < 8; i++)
+		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
+	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
+
+	/* Transmit */
+	for (i = 0; i < 32; i++)
+		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
+	for (i = 0; i < 32; i++)
+		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
+	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+	for (i = 0; i < 16; i++)
+		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
+	for (i = 0; i < 8; i++)
+		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
+	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
+
+	/* Wake Up */
+	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
+	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
+	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
+	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
+	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
+	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
+	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
+	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
+	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
+
+	/* DCE */
+	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+	regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+	for (i = 0; i < 8; i++)
+		regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+	for (i = 0; i < 8; i++)
+		regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+
+	/* Statistics */
+	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
+	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
+	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
+	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
+	for (i = 0; i < 8; i++)
+		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
+	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
+	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
+	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
+	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
+	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
+	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
+	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
+	for (i = 0; i < 8; i++)
+		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
+	for (i = 0; i < 8; i++)
+		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
+	for (i = 0; i < 8; i++)
+		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
+	for (i = 0; i < 8; i++)
+		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
+	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
+	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
+	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
+	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
+	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
+	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
+	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
+	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
+	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
+	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
+	regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
+	regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+	for (i = 0; i < 8; i++)
+		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
+	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
+	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
+	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
+	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
+	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
+	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
+	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
+	regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
+	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
+	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
+	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
+	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
+	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
+	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
+	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
+	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
+	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
+	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
+	for (i = 0; i < 16; i++)
+		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
+	for (i = 0; i < 16; i++)
+		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
+	for (i = 0; i < 16; i++)
+		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
+	for (i = 0; i < 16; i++)
+		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
+
+	/* MAC */
+	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
+	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
+	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
+	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
+	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
+	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
+	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
+	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
+	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
+	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
+	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
+	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
+	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
+	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
+	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
+	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
+	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
+	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
+	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
+	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
+	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
+	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
+	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
+	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+
+	/* Diagnostic */
+	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
+	for (i = 0; i < 8; i++)
+		regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
+	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
+	regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0);
+	regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1);
+	regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
+	regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
+	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
+	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
+	for (i = 0; i < 8; i++)
+		regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
+	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
+	regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0);
+	regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1);
+	regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
+	regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
+	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
+	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
+	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
+	regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
+	regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
+	regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
+	regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
+	regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
+	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
+	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+	for (i = 0; i < 8; i++)
+		regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
+	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
+	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
+	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
+	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
+	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
+	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
+	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
+	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
+	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
+}
+
+static int ixgbe_get_eeprom_len(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	return adapter->hw.eeprom.word_size * 2;
+}
+
+static int ixgbe_get_eeprom(struct net_device *netdev,
+			    struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word, eeprom_len;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_len = last_word - first_word + 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	for (i = 0; i < eeprom_len; i++) {
+		if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
+						 &eeprom_buff[i])))
+			break;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < eeprom_len; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static void ixgbe_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	strncpy(drvinfo->driver, ixgbe_driver_name, 32);
+	strncpy(drvinfo->version, ixgbe_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = IXGBE_STATS_LEN;
+	drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
+}
+
+static void ixgbe_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_ring *tx_ring = adapter->tx_ring;
+	struct ixgbe_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = IXGBE_MAX_RXD;
+	ring->tx_max_pending = IXGBE_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbe_set_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *ring)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_tx_buffer *old_buf;
+	struct ixgbe_rx_buffer *old_rx_buf;
+	void *old_desc;
+	int i, err;
+	u32 new_rx_count, new_tx_count, old_size;
+	dma_addr_t old_dma;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
+	new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
+	new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring->count) &&
+	    (new_rx_count == adapter->rx_ring->count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	if (netif_running(adapter->netdev))
+		ixgbe_down(adapter);
+
+	/*
+	 * We can't just free everything and then setup again,
+	 * because the ISRs in MSI-X mode get passed pointers
+	 * to the tx and rx ring structs.
+	 */
+	if (new_tx_count != adapter->tx_ring->count) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			/* Save existing descriptor ring */
+			old_buf = adapter->tx_ring[i].tx_buffer_info;
+			old_desc = adapter->tx_ring[i].desc;
+			old_size = adapter->tx_ring[i].size;
+			old_dma = adapter->tx_ring[i].dma;
+			/* Try to allocate a new one */
+			adapter->tx_ring[i].tx_buffer_info = NULL;
+			adapter->tx_ring[i].desc = NULL;
+			adapter->tx_ring[i].count = new_tx_count;
+			err = ixgbe_setup_tx_resources(adapter,
+						       &adapter->tx_ring[i]);
+			if (err) {
+				/* Restore the old one so at least
+				   the adapter still works, even if
+				   we failed the request */
+				adapter->tx_ring[i].tx_buffer_info = old_buf;
+				adapter->tx_ring[i].desc = old_desc;
+				adapter->tx_ring[i].size = old_size;
+				adapter->tx_ring[i].dma = old_dma;
+				goto err_setup;
+			}
+			/* Free the old buffer manually */
+			vfree(old_buf);
+			pci_free_consistent(adapter->pdev, old_size,
+					    old_desc, old_dma);
+		}
+	}
+
+	if (new_rx_count != adapter->rx_ring->count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+
+			old_rx_buf = adapter->rx_ring[i].rx_buffer_info;
+			old_desc = adapter->rx_ring[i].desc;
+			old_size = adapter->rx_ring[i].size;
+			old_dma = adapter->rx_ring[i].dma;
+
+			adapter->rx_ring[i].rx_buffer_info = NULL;
+			adapter->rx_ring[i].desc = NULL;
+			adapter->rx_ring[i].dma = 0;
+			adapter->rx_ring[i].count = new_rx_count;
+			err = ixgbe_setup_rx_resources(adapter,
+						       &adapter->rx_ring[i]);
+			if (err) {
+				adapter->rx_ring[i].rx_buffer_info = old_rx_buf;
+				adapter->rx_ring[i].desc = old_desc;
+				adapter->rx_ring[i].size = old_size;
+				adapter->rx_ring[i].dma = old_dma;
+				goto err_setup;
+			}
+
+			vfree(old_rx_buf);
+			pci_free_consistent(adapter->pdev, old_size, old_desc,
+					    old_dma);
+		}
+	}
+
+	err = 0;
+err_setup:
+	if (netif_running(adapter->netdev))
+		ixgbe_up(adapter);
+
+	return err;
+}
+
+static int ixgbe_get_stats_count(struct net_device *netdev)
+{
+	return IXGBE_STATS_LEN;
+}
+
+static void ixgbe_get_ethtool_stats(struct net_device *netdev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u64 *queue_stat;
+	int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
+	int j, k;
+	int i;
+
+	ixgbe_update_stats(adapter);
+	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
+		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
+			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+		for (k = 0; k < stat_count; k++)
+			data[i + k] = queue_stat[k];
+		i += k;
+	}
+	for (j = 0; j < adapter->num_rx_queues; j++) {
+		queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+		for (k = 0; k < stat_count; k++)
+			data[i + k] = queue_stat[k];
+		i += k;
+	}
+}
+
+static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
+			      u8 *data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			sprintf(p, "tx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			sprintf(p, "rx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+/*		BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+
+static void ixgbe_get_wol(struct net_device *netdev,
+			  struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	return;
+}
+
+static int ixgbe_nway_reset(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_reset(adapter);
+		ixgbe_up(adapter);
+	}
+
+	return 0;
+}
+
+static int ixgbe_phys_id(struct net_device *netdev, u32 data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
+	u32 i;
+
+	if (!data || data > 300)
+		data = 300;
+
+	for (i = 0; i < (data * 1000); i += 400) {
+		ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
+		msleep_interruptible(200);
+		ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
+		msleep_interruptible(200);
+	}
+
+	/* Restore LED settings */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
+
+	return 0;
+}
+
+static int ixgbe_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->rx_eitr == 0)
+		ec->rx_coalesce_usecs = 0;
+	else
+		ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
+
+	if (adapter->tx_eitr == 0)
+		ec->tx_coalesce_usecs = 0;
+	else
+		ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
+
+	ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+	return 0;
+}
+
+static int ixgbe_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
+	    ((ec->rx_coalesce_usecs > 0) &&
+	     (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
+		return -EINVAL;
+	if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
+	    ((ec->tx_coalesce_usecs > 0) &&
+	     (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
+		return -EINVAL;
+
+	/* convert to rate of irq's per second */
+	if (ec->rx_coalesce_usecs == 0)
+		adapter->rx_eitr = 0;
+	else
+		adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
+
+	if (ec->tx_coalesce_usecs == 0)
+		adapter->tx_eitr = 0;
+	else
+		adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
+
+	if (ec->tx_max_coalesced_frames_irq)
+		adapter->tx_ring[0].work_limit =
+					ec->tx_max_coalesced_frames_irq;
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_up(adapter);
+	}
+
+	return 0;
+}
+
+
+static struct ethtool_ops ixgbe_ethtool_ops = {
+	.get_settings           = ixgbe_get_settings,
+	.set_settings           = ixgbe_set_settings,
+	.get_drvinfo            = ixgbe_get_drvinfo,
+	.get_regs_len           = ixgbe_get_regs_len,
+	.get_regs               = ixgbe_get_regs,
+	.get_wol                = ixgbe_get_wol,
+	.nway_reset             = ixgbe_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = ixgbe_get_eeprom_len,
+	.get_eeprom             = ixgbe_get_eeprom,
+	.get_ringparam          = ixgbe_get_ringparam,
+	.set_ringparam          = ixgbe_set_ringparam,
+	.get_pauseparam         = ixgbe_get_pauseparam,
+	.set_pauseparam         = ixgbe_set_pauseparam,
+	.get_rx_csum            = ixgbe_get_rx_csum,
+	.set_rx_csum            = ixgbe_set_rx_csum,
+	.get_tx_csum            = ixgbe_get_tx_csum,
+	.set_tx_csum            = ixgbe_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+	.get_msglevel           = ixgbe_get_msglevel,
+	.set_msglevel           = ixgbe_set_msglevel,
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = ixgbe_set_tso,
+	.get_strings            = ixgbe_get_strings,
+	.phys_id                = ixgbe_phys_id,
+	.get_stats_count        = ixgbe_get_stats_count,
+	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
+	.get_coalesce           = ixgbe_get_coalesce,
+	.set_coalesce           = ixgbe_set_coalesce,
+};
+
+void ixgbe_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
new file mode 100644
index 0000000..70775af
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -0,0 +1,2876 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+
+char ixgbe_driver_name[] = "ixgbe";
+static const char ixgbe_driver_string[] =
+	"Intel(R) 10 Gigabit PCI Express Network Driver";
+
+#define DRV_VERSION "1.1.18"
+const char ixgbe_driver_version[] = DRV_VERSION;
+static const char ixgbe_copyright[] =
+	 "Copyright (c) 1999-2007 Intel Corporation.";
+
+static const struct ixgbe_info *ixgbe_info_tbl[] = {
+	[board_82598]			= &ixgbe_82598_info,
+};
+
+/* ixgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbe_pci_tbl[] = {
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
+	 board_82598 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
+	 board_82598 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
+	 board_82598 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
+	 board_82598 },
+
+	/* required last entry */
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+	struct ixgbe_adapter *adapter = hw->back;
+	struct net_device *netdev = adapter->netdev;
+	return netdev->name;
+}
+#endif
+
+static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
+			   u8 msix_vector)
+{
+	u32 ivar, index;
+
+	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+	index = (int_alloc_entry >> 2) & 0x1F;
+	ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
+	ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
+	ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
+}
+
+static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
+					     struct ixgbe_tx_buffer
+					     *tx_buffer_info)
+{
+	if (tx_buffer_info->dma) {
+		pci_unmap_page(adapter->pdev,
+			       tx_buffer_info->dma,
+			       tx_buffer_info->length, PCI_DMA_TODEVICE);
+		tx_buffer_info->dma = 0;
+	}
+	if (tx_buffer_info->skb) {
+		dev_kfree_skb_any(tx_buffer_info->skb);
+		tx_buffer_info->skb = NULL;
+	}
+	/* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
+				       struct ixgbe_ring *tx_ring,
+				       unsigned int eop,
+				       union ixgbe_adv_tx_desc *eop_desc)
+{
+	/* Detect a transmit hang in hardware, this serializes the
+	 * check with the clearing of time_stamp and movement of i */
+	adapter->detect_tx_hung = false;
+	if (tx_ring->tx_buffer_info[eop].dma &&
+	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
+	    !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+		/* detected Tx unit hang */
+		DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+			"  TDH                  <%x>\n"
+			"  TDT                  <%x>\n"
+			"  next_to_use          <%x>\n"
+			"  next_to_clean        <%x>\n"
+			"tx_buffer_info[next_to_clean]\n"
+			"  time_stamp           <%lx>\n"
+			"  next_to_watch        <%x>\n"
+			"  jiffies              <%lx>\n"
+			"  next_to_watch.status <%x>\n",
+			readl(adapter->hw.hw_addr + tx_ring->head),
+			readl(adapter->hw.hw_addr + tx_ring->tail),
+			tx_ring->next_to_use,
+			tx_ring->next_to_clean,
+			tx_ring->tx_buffer_info[eop].time_stamp,
+			eop, jiffies, eop_desc->wb.status);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+				    struct ixgbe_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	unsigned int i, eop;
+	bool cleaned = false;
+	int count = 0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->tx_buffer_info[i].next_to_watch;
+	eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+	while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
+		for (cleaned = false; !cleaned;) {
+			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			cleaned = (i == eop);
+
+			tx_ring->stats.bytes += tx_buffer_info->length;
+			ixgbe_unmap_and_free_tx_resource(adapter,
+							 tx_buffer_info);
+			tx_desc->wb.status = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+
+		tx_ring->stats.packets++;
+
+		eop = tx_ring->tx_buffer_info[i].next_to_watch;
+		eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+		/* weight of a sort for tx, avoid endless transmit cleanup */
+		if (count++ >= tx_ring->work_limit)
+			break;
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	spin_lock(&tx_ring->tx_lock);
+
+	if (cleaned && netif_carrier_ok(netdev) &&
+	    (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
+	    !test_bit(__IXGBE_DOWN, &adapter->state))
+		netif_wake_queue(netdev);
+
+	spin_unlock(&tx_ring->tx_lock);
+
+	if (adapter->detect_tx_hung)
+		if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
+			netif_stop_queue(netdev);
+
+	if (count >= tx_ring->work_limit)
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+
+	return cleaned;
+}
+
+/**
+ * ixgbe_receive_skb - Send a completed packet up the stack
+ * @adapter: board private structure
+ * @skb: packet to send up
+ * @is_vlan: packet has a VLAN tag
+ * @tag: VLAN tag from descriptor
+ **/
+static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
+			      struct sk_buff *skb, bool is_vlan,
+			      u16 tag)
+{
+	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+		if (adapter->vlgrp && is_vlan)
+			vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
+		else
+			netif_receive_skb(skb);
+	} else {
+
+		if (adapter->vlgrp && is_vlan)
+			vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+		else
+			netif_rx(skb);
+	}
+}
+
+static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
+					 u32 status_err,
+					 struct sk_buff *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Ignore Checksum bit is set */
+	if ((status_err & IXGBE_RXD_STAT_IXSM) ||
+		     !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+		return;
+	/* TCP/UDP checksum error bit is set */
+	if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_rx_error++;
+		return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+				       struct ixgbe_ring *rx_ring,
+				       int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc;
+	struct ixgbe_rx_buffer *rx_buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+	while (cleaned_count--) {
+		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+		if (!rx_buffer_info->page &&
+				(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+			rx_buffer_info->page = alloc_page(GFP_ATOMIC);
+			if (!rx_buffer_info->page) {
+				adapter->alloc_rx_page_failed++;
+				goto no_buffers;
+			}
+			rx_buffer_info->page_dma =
+			    pci_map_page(pdev, rx_buffer_info->page,
+					 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+		}
+
+		if (!rx_buffer_info->skb) {
+			skb = netdev_alloc_skb(netdev, bufsz);
+
+			if (!skb) {
+				adapter->alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+
+			/*
+			 * Make buffer alignment 2 beyond a 16 byte boundary
+			 * this will result in a 16 byte aligned IP header after
+			 * the 14 byte MAC header is removed
+			 */
+			skb_reserve(skb, NET_IP_ALIGN);
+
+			rx_buffer_info->skb = skb;
+			rx_buffer_info->dma = pci_map_single(pdev, skb->data,
+							  bufsz,
+							  PCI_DMA_FROMDEVICE);
+		}
+		/* Refresh the desc even if buffer_addrs didn't change because
+		 * each write-back erases this info. */
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			rx_desc->read.pkt_addr =
+			    cpu_to_le64(rx_buffer_info->page_dma);
+			rx_desc->read.hdr_addr =
+					cpu_to_le64(rx_buffer_info->dma);
+		} else {
+			rx_desc->read.pkt_addr =
+					cpu_to_le64(rx_buffer_info->dma);
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+	}
+no_buffers:
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		if (i-- == 0)
+			i = (rx_ring->count - 1);
+
+		/*
+		 * Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+	}
+}
+
+static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+			       struct ixgbe_ring *rx_ring,
+			       int *work_done, int work_to_do)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
+	struct sk_buff *skb;
+	unsigned int i;
+	u32 upper_len, len, staterr;
+	u16 hdr_info, vlan_tag;
+	bool is_vlan, cleaned = false;
+	int cleaned_count = 0;
+
+	i = rx_ring->next_to_clean;
+	upper_len = 0;
+	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	rx_buffer_info = &rx_ring->rx_buffer_info[i];
+	is_vlan = (staterr & IXGBE_RXD_STAT_VP);
+	vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+	while (staterr & IXGBE_RXD_STAT_DD) {
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			hdr_info =
+			    le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
+			len =
+			    ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+			     IXGBE_RXDADV_HDRBUFLEN_SHIFT);
+			if (hdr_info & IXGBE_RXDADV_SPH)
+				adapter->rx_hdr_split++;
+			if (len > IXGBE_RX_HDR_SIZE)
+				len = IXGBE_RX_HDR_SIZE;
+			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+		} else
+			len = le16_to_cpu(rx_desc->wb.upper.length);
+
+		cleaned = true;
+		skb = rx_buffer_info->skb;
+		prefetch(skb->data - NET_IP_ALIGN);
+		rx_buffer_info->skb = NULL;
+
+		if (len && !skb_shinfo(skb)->nr_frags) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 adapter->rx_buf_len + NET_IP_ALIGN,
+					 PCI_DMA_FROMDEVICE);
+			skb_put(skb, len);
+		}
+
+		if (upper_len) {
+			pci_unmap_page(pdev, rx_buffer_info->page_dma,
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			rx_buffer_info->page_dma = 0;
+			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					   rx_buffer_info->page, 0, upper_len);
+			rx_buffer_info->page = NULL;
+
+			skb->len += upper_len;
+			skb->data_len += upper_len;
+			skb->truesize += upper_len;
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		next_buffer = &rx_ring->rx_buffer_info[i];
+
+		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+		prefetch(next_rxd);
+
+		cleaned_count++;
+		if (staterr & IXGBE_RXD_STAT_EOP) {
+			rx_ring->stats.packets++;
+			rx_ring->stats.bytes += skb->len;
+		} else {
+			rx_buffer_info->skb = next_buffer->skb;
+			rx_buffer_info->dma = next_buffer->dma;
+			next_buffer->skb = skb;
+			adapter->non_eop_descs++;
+			goto next_desc;
+		}
+
+		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		ixgbe_rx_checksum(adapter, staterr, skb);
+		skb->protocol = eth_type_trans(skb, netdev);
+		ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
+		netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.upper.status_error = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+			ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		rx_buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+		is_vlan = (staterr & IXGBE_RXD_STAT_VP);
+		vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+	}
+
+	rx_ring->next_to_clean = i;
+	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+	if (cleaned_count)
+		ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+	return cleaned;
+}
+
+#define IXGBE_MAX_INTR 10
+/**
+ * ixgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
+{
+	int i, vector = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
+			       IXGBE_MSIX_VECTOR(vector));
+		writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
+		       adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
+		vector++;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i),
+			       IXGBE_MSIX_VECTOR(vector));
+		writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr),
+		       adapter->hw.hw_addr + adapter->rx_ring[i].itr_register);
+		vector++;
+	}
+
+	vector = adapter->num_tx_queues + adapter->num_rx_queues;
+	ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+		       IXGBE_MSIX_VECTOR(vector));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950);
+}
+
+static irqreturn_t ixgbe_msix_lsc(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+	if (eicr & IXGBE_EICR_LSC) {
+		adapter->lsc_int++;
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies);
+	}
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data, struct pt_regs *regs)
+{
+	struct ixgbe_ring *txr = data;
+	struct ixgbe_adapter *adapter = txr->adapter;
+
+	ixgbe_clean_tx_irq(adapter, txr);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data, struct pt_regs *regs)
+{
+	struct ixgbe_ring *rxr = data;
+	struct ixgbe_adapter *adapter = rxr->adapter;
+
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
+	netif_rx_schedule(adapter->netdev);
+	return IRQ_HANDLED;
+}
+
+static int ixgbe_clean_rxonly(struct net_device *poll_dev, int *budget)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(poll_dev);
+	int work_to_do = min(*budget, poll_dev->quota);
+	int work_done = 0;
+	struct ixgbe_ring *rxr = adapter->rx_ring;
+
+	/* Keep link state information with original poll_dev */
+	if (!netif_carrier_ok(poll_dev))
+		goto quit_polling;
+
+	ixgbe_clean_rx_irq(adapter, rxr, &work_done, work_to_do);
+
+	/* If no Tx and not enough Rx work done, exit the polling mode */
+	if ((work_done < work_to_do) || !netif_running(poll_dev)) {
+quit_polling:
+		netif_rx_complete(poll_dev);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
+					rxr->eims_value);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * ixgbe_setup_msix - Initialize MSI-X interrupts
+ *
+ * ixgbe_setup_msix allocates MSI-X vectors and requests
+ * interrutps from the kernel.
+ **/
+static int ixgbe_setup_msix(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i, int_vector = 0, err = 0;
+	int max_msix_count;
+
+	/* +1 for the LSC interrupt */
+	max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1;
+	adapter->msix_entries = kcalloc(max_msix_count,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries)
+		return -ENOMEM;
+
+	for (i = 0; i < max_msix_count; i++)
+		adapter->msix_entries[i].entry = i;
+
+	err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+			      max_msix_count);
+	if (err)
+		goto out;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i);
+		err = request_irq(adapter->msix_entries[int_vector].vector,
+				  &ixgbe_msix_clean_tx,
+				  0,
+				  adapter->tx_ring[i].name,
+				  &(adapter->tx_ring[i]));
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"request_irq failed for MSIX interrupt "
+				"Error: %d\n", err);
+			goto release_irqs;
+		}
+		adapter->tx_ring[i].eims_value =
+		    (1 << IXGBE_MSIX_VECTOR(int_vector));
+		adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
+		int_vector++;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		if (strlen(netdev->name) < (IFNAMSIZ - 5))
+			sprintf(adapter->rx_ring[i].name,
+				"%s-rx%d", netdev->name, i);
+		else
+			memcpy(adapter->rx_ring[i].name,
+			       netdev->name, IFNAMSIZ);
+		err = request_irq(adapter->msix_entries[int_vector].vector,
+				  &ixgbe_msix_clean_rx, 0,
+				  adapter->rx_ring[i].name,
+				  &(adapter->rx_ring[i]));
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"request_irq failed for MSIX interrupt "
+				"Error: %d\n", err);
+			goto release_irqs;
+		}
+
+		adapter->rx_ring[i].eims_value =
+		    (1 << IXGBE_MSIX_VECTOR(int_vector));
+		adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
+		int_vector++;
+	}
+
+	sprintf(adapter->lsc_name, "%s-lsc", netdev->name);
+	err = request_irq(adapter->msix_entries[int_vector].vector,
+			  &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev);
+	if (err) {
+		DPRINTK(PROBE, ERR,
+			"request_irq for msix_lsc failed: %d\n", err);
+		goto release_irqs;
+	}
+
+	/* FIXME: implement netif_napi_remove() instead */
+	netdev->poll = ixgbe_clean_rxonly;
+	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
+	return 0;
+
+release_irqs:
+	int_vector--;
+	for (; int_vector >= adapter->num_tx_queues; int_vector--)
+		free_irq(adapter->msix_entries[int_vector].vector,
+			 &(adapter->rx_ring[int_vector -
+					    adapter->num_tx_queues]));
+
+	for (; int_vector >= 0; int_vector--)
+		free_irq(adapter->msix_entries[int_vector].vector,
+			 &(adapter->tx_ring[int_vector]));
+out:
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+	return err;
+}
+
+/**
+ * ixgbe_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ **/
+static irqreturn_t ixgbe_intr(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 eicr;
+
+	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+	if (!eicr)
+		return IRQ_NONE;	/* Not our interrupt */
+
+	if (eicr & IXGBE_EICR_LSC) {
+		adapter->lsc_int++;
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies);
+	}
+	if (netif_rx_schedule_prep(netdev)) {
+		/* Disable interrupts and register for poll. The flush of the
+		 * posted write is intentionally left out. */
+		atomic_inc(&adapter->irq_sem);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+		__netif_rx_schedule(netdev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues)
+{
+	struct net_device *netdev = adapter->netdev;
+	int flags, err;
+	irqreturn_t(*handler) (int, void *, struct pt_regs *) = &ixgbe_intr;
+
+	flags = IRQF_SHARED;
+
+	err = ixgbe_setup_msix(adapter);
+	if (!err)
+		goto request_done;
+
+	/*
+	 * if we can't do MSI-X, fall through and try MSI
+	 * No need to reallocate memory since we're decreasing the number of
+	 * queues. We just won't use the other ones, also it is freed correctly
+	 * on ixgbe_remove.
+	 */
+	*num_rx_queues = 1;
+
+	/* do MSI */
+	err = pci_enable_msi(adapter->pdev);
+	if (!err) {
+		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+		flags &= ~IRQF_SHARED;
+		handler = &ixgbe_intr;
+	}
+
+	err = request_irq(adapter->pdev->irq, handler, flags,
+			  netdev->name, netdev);
+	if (err)
+		DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
+
+request_done:
+	return err;
+}
+
+static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		int i;
+
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			free_irq(adapter->msix_entries[i].vector,
+				 &(adapter->tx_ring[i]));
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			free_irq(adapter->msix_entries[i +
+						adapter->num_tx_queues].vector,
+				&(adapter->rx_ring[i]));
+		i = adapter->num_rx_queues + adapter->num_tx_queues;
+		free_irq(adapter->msix_entries[i].vector, netdev);
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+		return;
+	}
+
+	free_irq(adapter->pdev->irq, netdev);
+	if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+	}
+}
+
+/**
+ * ixgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
+{
+	atomic_inc(&adapter->irq_sem);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+	IXGBE_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+{
+	if (atomic_dec_and_test(&adapter->irq_sem)) {
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
+					(IXGBE_EIMS_ENABLE_MASK &
+					 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
+				IXGBE_EIMS_ENABLE_MASK);
+		IXGBE_WRITE_FLUSH(&adapter->hw);
+	}
+}
+
+/**
+ * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ *
+ **/
+static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
+{
+	int i;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	if (adapter->rx_eitr)
+		IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
+				EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
+
+	/* for re-triggering the interrupt in non-NAPI mode */
+	adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
+	adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
+
+	ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i);
+}
+
+/**
+ * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+{
+	u64 tdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 i, tdlen;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tdba = adapter->tx_ring[i].dma;
+		tdlen = adapter->tx_ring[i].count *
+		    sizeof(union ixgbe_adv_tx_desc);
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK));
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+		adapter->tx_ring[i].head = IXGBE_TDH(i);
+		adapter->tx_ring[i].tail = IXGBE_TDT(i);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
+}
+
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT			2
+/**
+ * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+{
+	u64 rdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	u32 rdlen, rxctrl, rxcsum;
+	u32 random[10];
+	u32 reta, mrqc;
+	int i;
+	u32 fctrl, hlreg0;
+	u32 srrctl;
+	u32 pages;
+
+	/* Decide whether to use packet split mode or not */
+	if (netdev->mtu > ETH_DATA_LEN)
+		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	else
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+	/* Set the RX buffer length according to the mode */
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
+	} else {
+		if (netdev->mtu <= ETH_DATA_LEN)
+			adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		else
+			adapter->rx_buf_len = ALIGN(max_frame, 1024);
+	}
+
+	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+	fctrl |= IXGBE_FCTRL_BAM;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+
+	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+	else
+		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+
+	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
+	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		srrctl |= ((IXGBE_RX_HDR_SIZE <<
+			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEHDR_MASK);
+	} else {
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+			srrctl |=
+			     IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		else
+			srrctl |=
+			     adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+	}
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
+
+	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+	/* disable receives while setting up the descriptors */
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rdba = adapter->rx_ring[i].dma;
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+		adapter->rx_ring[i].head = IXGBE_RDH(i);
+		adapter->rx_ring[i].tail = IXGBE_RDT(i);
+	}
+
+	if (adapter->num_rx_queues > 1) {
+		/* Random 40bytes used as random key in RSS hash function */
+		get_random_bytes(&random[0], 40);
+
+		switch (adapter->num_rx_queues) {
+		case 8:
+		case 4:
+			/* Bits [3:0] in each byte refers the Rx queue no */
+			reta = 0x00010203;
+			break;
+		case 2:
+			reta = 0x00010001;
+			break;
+		default:
+			reta = 0x00000000;
+			break;
+		}
+
+		/* Fill out redirection table */
+		for (i = 0; i < 32; i++) {
+			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta);
+			if (adapter->num_rx_queues > 4) {
+				i++;
+				IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i,
+						      0x04050607);
+			}
+		}
+
+		/* Fill out hash function seeds */
+		for (i = 0; i < 10; i++)
+			IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]);
+
+		mrqc = IXGBE_MRQC_RSSEN
+		    /* Perform hash on these packet types */
+		    | IXGBE_MRQC_RSS_FIELD_IPV4
+		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+		    | IXGBE_MRQC_RSS_FIELD_IPV6
+		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+		/* Multiqueue and packet checksumming are mutually exclusive. */
+		rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+		rxcsum |= IXGBE_RXCSUM_PCSD;
+		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+	} else {
+		/* Enable Receive Checksum Offload for TCP and UDP */
+		rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+		if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
+			/* Enable IPv4 payload checksum for UDP fragments
+			 * Must be used in conjunction with packet-split. */
+			rxcsum |= IXGBE_RXCSUM_IPPCSE;
+		} else {
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+	}
+	/* Enable Receives */
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+}
+
+static void ixgbe_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl;
+
+	ixgbe_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+		ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+	}
+
+	ixgbe_irq_enable(adapter);
+}
+
+static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* add VID to filter table */
+	ixgbe_set_vfta(&adapter->hw, vid, 0, true);
+}
+
+static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	ixgbe_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+	ixgbe_irq_enable(adapter);
+
+	/* remove VID from filter table */
+	ixgbe_set_vfta(&adapter->hw, vid, 0, false);
+}
+
+static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
+{
+	ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+/**
+ * ixgbe_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void ixgbe_set_multi(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct dev_mc_list *mc_ptr;
+	u8 *mta_list;
+	u32 fctrl;
+	int i;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		fctrl |= IXGBE_FCTRL_MPE;
+		fctrl &= ~IXGBE_FCTRL_UPE;
+	} else {
+		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+	if (netdev->mc_count) {
+		mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
+		if (!mta_list)
+			return;
+
+		/* Shared function expects packed array of only addresses. */
+		mc_ptr = netdev->mc_list;
+
+		for (i = 0; i < netdev->mc_count; i++) {
+			if (!mc_ptr)
+				break;
+			memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
+			       ETH_ALEN);
+			mc_ptr = mc_ptr->next;
+		}
+
+		ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
+		kfree(mta_list);
+	} else {
+		ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
+	}
+
+}
+
+static void ixgbe_configure(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	ixgbe_set_multi(netdev);
+
+	ixgbe_restore_vlan(adapter);
+
+	ixgbe_configure_tx(adapter);
+	ixgbe_configure_rx(adapter);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
+					   (adapter->rx_ring[i].count - 1));
+}
+
+static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+	u32 gpie = 0;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 txdctl, rxdctl, mhadd;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+	if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
+			      IXGBE_FLAG_MSI_ENABLED)) {
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+			gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
+				IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
+		} else {
+			/* MSI only */
+			gpie = (IXGBE_GPIE_EIAME |
+				IXGBE_GPIE_PBA_SUPPORT);
+		}
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
+		gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+	}
+
+	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+
+	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+		mhadd &= ~IXGBE_MHADD_MFS_MASK;
+		mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
+
+		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
+	}
+	/* enable all receives */
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+		ixgbe_configure_msix(adapter);
+	else
+		ixgbe_configure_msi_and_legacy(adapter);
+
+	clear_bit(__IXGBE_DOWN, &adapter->state);
+	netif_poll_enable(netdev);
+	ixgbe_irq_enable(adapter);
+
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problem */
+	mod_timer(&adapter->watchdog_timer, jiffies);
+	return 0;
+}
+
+int ixgbe_up(struct ixgbe_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	ixgbe_configure(adapter);
+
+	return ixgbe_up_complete(adapter);
+}
+
+void ixgbe_reset(struct ixgbe_adapter *adapter)
+{
+	if (ixgbe_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+
+	/* reprogram the RAR[0] in case user changed it. */
+	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 err, num_rx_queues = adapter->num_rx_queues;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
+				"suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(netdev)) {
+		err = ixgbe_request_irq(adapter, &num_rx_queues);
+		if (err)
+			return err;
+	}
+
+	ixgbe_reset(adapter);
+
+	if (netif_running(netdev))
+		ixgbe_up(adapter);
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+#endif
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
+				struct ixgbe_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+
+	for (i = 0; i < rx_ring->count; i++) {
+		struct ixgbe_rx_buffer *rx_buffer_info;
+
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+		if (rx_buffer_info->dma) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 adapter->rx_buf_len,
+					 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
+		}
+		if (rx_buffer_info->skb) {
+			dev_kfree_skb(rx_buffer_info->skb);
+			rx_buffer_info->skb = NULL;
+		}
+		if (!rx_buffer_info->page)
+			continue;
+		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+		rx_buffer_info->page_dma = 0;
+
+		put_page(rx_buffer_info->page);
+		rx_buffer_info->page = NULL;
+	}
+
+	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->head);
+	writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbe_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
+				struct ixgbe_ring *tx_ring)
+{
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->head);
+	writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+void ixgbe_down(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 rxctrl;
+
+	/* signal that we are down to the interrupt handler */
+	set_bit(__IXGBE_DOWN, &adapter->state);
+
+	/* disable receives */
+	rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
+			rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+	netif_tx_disable(netdev);
+
+	/* disable transmits in the hardware */
+
+	/* flush both disables */
+	IXGBE_WRITE_FLUSH(&adapter->hw);
+	msleep(10);
+
+	ixgbe_irq_disable(adapter);
+
+	netif_poll_disable(netdev);
+	del_timer_sync(&adapter->watchdog_timer);
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	ixgbe_reset(adapter);
+	ixgbe_clean_all_tx_rings(adapter);
+	ixgbe_clean_all_rx_rings(adapter);
+
+}
+
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_free_irq(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static void ixgbe_shutdown(struct pci_dev *pdev)
+{
+	ixgbe_suspend(pdev, PMSG_SUSPEND);
+}
+
+/**
+ * ixgbe_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+static int ixgbe_clean(struct net_device *poll_dev, int *budget)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(poll_dev);
+	int work_to_do = min(*budget, poll_dev->quota);
+	int tx_cleaned = 0, work_done = 0;
+
+	/* Keep link state information with original netdev */
+	if (!netif_carrier_ok(adapter->netdev))
+		goto quit_polling;
+
+	/* In non-MSIX case, there is no multi-Tx/Rx queue */
+	tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
+	ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
+			   work_to_do);
+
+	*budget -= work_done;
+	poll_dev->quota -= work_done;
+
+	/* If no Tx and not enough Rx work done, exit the polling mode */
+	if ((!tx_cleaned && (work_done == 0)) ||
+	    !netif_running(adapter->netdev)) {
+quit_polling:
+		netif_rx_complete(poll_dev);
+		ixgbe_irq_enable(adapter);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * ixgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbe_tx_timeout(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+static void ixgbe_reset_task(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	adapter->tx_timeout_count++;
+
+	ixgbe_down(adapter);
+	ixgbe_up(adapter);
+}
+
+/**
+ * ixgbe_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+				   sizeof(struct ixgbe_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+				   sizeof(struct ixgbe_ring), GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->rx_ring[i].adapter = adapter;
+		adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
+		adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* default flow control settings */
+	hw->fc.original_type = ixgbe_fc_full;
+	hw->fc.type = ixgbe_fc_full;
+
+	hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
+	if (hw->mac.ops.reset(hw)) {
+		dev_err(&pdev->dev, "HW Init failed\n");
+		return -EIO;
+	}
+	if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
+					 false)) {
+		dev_err(&pdev->dev, "Link Speed setup failed\n");
+		return -EIO;
+	}
+
+	/* initialize eeprom parameters */
+	if (ixgbe_init_eeprom(hw)) {
+		dev_err(&pdev->dev, "EEPROM initialization failed\n");
+		return -EIO;
+	}
+
+	/* Set the default values */
+	adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
+	adapter->num_tx_queues = 1;
+	adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	if (ixgbe_alloc_queues(adapter)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&adapter->irq_sem, 1);
+	set_bit(__IXGBE_DOWN, &adapter->state);
+
+	return 0;
+}
+
+/**
+ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
+			     struct ixgbe_ring *txdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
+	txdr->tx_buffer_info = vmalloc(size);
+	if (!txdr->tx_buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(txdr->tx_buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+	txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
+	txdr->size = ALIGN(txdr->size, 4096);
+
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+		vfree(txdr->tx_buffer_info);
+		DPRINTK(PROBE, ERR,
+			"Memory allocation failed for the tx desc ring\n");
+		return -ENOMEM;
+	}
+
+	txdr->adapter = adapter;
+	txdr->next_to_use = 0;
+	txdr->next_to_clean = 0;
+	txdr->work_limit = txdr->count;
+	spin_lock_init(&txdr->tx_lock);
+
+	return 0;
+}
+
+/**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+			     struct ixgbe_ring *rxdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
+	rxdr->rx_buffer_info = vmalloc(size);
+	if (!rxdr->rx_buffer_info) {
+		DPRINTK(PROBE, ERR,
+			"vmalloc allocation failed for the rx desc ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->rx_buffer_info, 0, size);
+
+	desc_len = sizeof(union ixgbe_adv_rx_desc);
+
+	/* Round up to nearest 4K */
+	rxdr->size = rxdr->count * desc_len;
+	rxdr->size = ALIGN(rxdr->size, 4096);
+
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+	if (!rxdr->desc) {
+		DPRINTK(PROBE, ERR,
+			"Memory allocation failed for the rx desc ring\n");
+		vfree(rxdr->rx_buffer_info);
+		return -ENOMEM;
+	}
+
+	rxdr->next_to_clean = 0;
+	rxdr->next_to_use = 0;
+	rxdr->adapter = adapter;
+
+	return 0;
+}
+
+/**
+ * ixgbe_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
+				    struct ixgbe_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ixgbe_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * ixgbe_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
+				    struct ixgbe_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ixgbe_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * ixgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+	if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
+	    (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+		return -EINVAL;
+
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev)) {
+		ixgbe_down(adapter);
+		ixgbe_up(adapter);
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbe_open(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	int err;
+	u32 ctrl_ext;
+	u32 num_rx_queues = adapter->num_rx_queues;
+
+	/* Let firmware know the driver has taken over */
+	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+
+try_intr_reinit:
+	/* allocate transmit descriptors */
+	err = ixgbe_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+		num_rx_queues = 1;
+		adapter->num_rx_queues = num_rx_queues;
+	}
+
+	/* allocate receive descriptors */
+	err = ixgbe_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	ixgbe_configure(adapter);
+
+	err = ixgbe_request_irq(adapter, &num_rx_queues);
+	if (err)
+		goto err_req_irq;
+
+	/* ixgbe_request might have reduced num_rx_queues */
+	if (num_rx_queues < adapter->num_rx_queues) {
+		/* We didn't get MSI-X, so we need to release everything,
+		 * set our Rx queue count to num_rx_queues, and redo the
+		 * whole init process.
+		 */
+		ixgbe_free_irq(adapter);
+		if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+			pci_disable_msi(adapter->pdev);
+			adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+		}
+		ixgbe_free_all_rx_resources(adapter);
+		ixgbe_free_all_tx_resources(adapter);
+		adapter->num_rx_queues = num_rx_queues;
+
+		/* Reset the hardware, and start over. */
+		ixgbe_reset(adapter);
+
+		goto try_intr_reinit;
+	}
+
+	err = ixgbe_up_complete(adapter);
+	if (err)
+		goto err_up;
+
+	return 0;
+
+err_up:
+	ixgbe_free_irq(adapter);
+err_req_irq:
+	ixgbe_free_all_rx_resources(adapter);
+err_setup_rx:
+	ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
+	ixgbe_reset(adapter);
+
+	return err;
+}
+
+/**
+ * ixgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbe_close(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl_ext;
+
+	ixgbe_down(adapter);
+	ixgbe_free_irq(adapter);
+
+	ixgbe_free_all_tx_resources(adapter);
+	ixgbe_free_all_rx_resources(adapter);
+
+	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+
+	return 0;
+}
+
+/**
+ * ixgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbe_update_stats(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 good_rx, missed_rx, bprc;
+
+	adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+	good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
+	missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
+	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
+	adapter->stats.gprc += (good_rx - missed_rx);
+
+	adapter->stats.mpc[0] += missed_rx;
+	adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+	adapter->stats.bprc += bprc;
+	adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+	adapter->stats.mprc -= bprc;
+	adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+	adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+	adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+	adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+	adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+	adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+	adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+	adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+	adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+	adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+	adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+	adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+	adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+	adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+	adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+	adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
+	adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+	adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+	adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+	adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+	adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+	adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+	adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+	adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+	adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+	adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+	adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+	adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+	adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.rx_packets = adapter->stats.gprc;
+	adapter->net_stats.tx_packets = adapter->stats.gptc;
+	adapter->net_stats.rx_bytes = adapter->stats.gorc;
+	adapter->net_stats.tx_bytes = adapter->stats.gotc;
+	adapter->net_stats.multicast = adapter->stats.mprc;
+
+	/* Rx Errors */
+	adapter->net_stats.rx_errors = adapter->stats.crcerrs +
+						adapter->stats.rlec;
+	adapter->net_stats.rx_dropped = 0;
+	adapter->net_stats.rx_length_errors = adapter->stats.rlec;
+	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+	adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
+
+}
+
+/**
+ * ixgbe_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbe_watchdog(unsigned long data)
+{
+	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+	struct net_device *netdev = adapter->netdev;
+	bool link_up;
+	u32 link_speed = 0;
+
+	adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
+
+	if (link_up) {
+		if (!netif_carrier_ok(netdev)) {
+			u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+			u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
+#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
+#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
+			DPRINTK(LINK, INFO, "NIC Link is Up %s, "
+				"Flow Control: %s\n",
+				(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+				 "10 Gbps" :
+				 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+				  "1 Gpbs" : "unknown speed")),
+				((FLOW_RX && FLOW_TX) ? "RX/TX" :
+				 (FLOW_RX ? "RX" :
+				 (FLOW_TX ? "TX" : "None"))));
+
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		} else {
+			/* Force detection of hung controller */
+			adapter->detect_tx_hung = true;
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	}
+
+	ixgbe_update_stats(adapter);
+
+	/* Reset the timer */
+	if (!test_bit(__IXGBE_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + 2 * HZ));
+}
+
+#define IXGBE_MAX_TXD_PWR	14
+#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)	/* for context */
+
+static int ixgbe_tso(struct ixgbe_adapter *adapter,
+			 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+			 u32 tx_flags, u8 *hdr_len)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	int err;
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+	u32 mss_l4len_idx = 0, l4len;
+	*hdr_len = 0;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+		l4len = tcp_hdrlen(skb);
+		*hdr_len += l4len;
+
+		if (skb->protocol == ntohs(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			adapter->hw_tso_ctxt++;
+		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					     &ipv6_hdr(skb)->daddr,
+					     0, IPPROTO_TCP, 0);
+			adapter->hw_tso6_ctxt++;
+		}
+
+		i = tx_ring->next_to_use;
+
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		/* VLAN MACLEN IPLEN */
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |=
+			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= ((skb_network_offset(skb)) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		*hdr_len += skb_network_offset(skb);
+		vlan_macip_lens |=
+		    (skb_transport_header(skb) - skb_network_header(skb));
+		*hdr_len +=
+		    (skb_transport_header(skb) - skb_network_header(skb));
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->protocol == ntohs(ETH_P_IP))
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+		/* MSS L4LEN IDX */
+		mss_l4len_idx |=
+		    (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+		mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+		context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+	return false;
+}
+
+static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
+				   struct ixgbe_ring *tx_ring,
+				   struct sk_buff *skb, u32 tx_flags)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+		i = tx_ring->next_to_use;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |=
+			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= (skb_network_offset(skb) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			vlan_macip_lens |= (skb_transport_header(skb) -
+					    skb_network_header(skb));
+
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			if (skb->protocol == ntohs(ETH_P_IP))
+				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+			if (skb->sk->sk_protocol == IPPROTO_TCP)
+				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		}
+
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+		context_desc->mss_l4len_idx = 0;
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+		adapter->hw_csum_tx_good++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+	return false;
+}
+
+static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
+			struct ixgbe_ring *tx_ring,
+			struct sk_buff *skb, unsigned int first)
+{
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	unsigned int len = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int f;
+
+	len -= skb->data_len;
+
+	i = tx_ring->next_to_use;
+
+	while (len) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
+
+		tx_buffer_info->length = size;
+		tx_buffer_info->dma = pci_map_single(adapter->pdev,
+						  skb->data + offset,
+						  size, PCI_DMA_TODEVICE);
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		len -= size;
+		offset += size;
+		count++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+		offset = frag->page_offset;
+
+		while (len) {
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
+
+			tx_buffer_info->length = size;
+			tx_buffer_info->dma = pci_map_page(adapter->pdev,
+							frag->page,
+							offset,
+							size, PCI_DMA_TODEVICE);
+			tx_buffer_info->time_stamp = jiffies;
+			tx_buffer_info->next_to_watch = i;
+
+			len -= size;
+			offset += size;
+			count++;
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+	}
+	if (i == 0)
+		i = tx_ring->count - 1;
+	else
+		i = i - 1;
+	tx_ring->tx_buffer_info[i].skb = skb;
+	tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
+			       struct ixgbe_ring *tx_ring,
+			       int tx_flags, int count, u32 paylen, u8 hdr_len)
+{
+	union ixgbe_adv_tx_desc *tx_desc = NULL;
+	struct ixgbe_tx_buffer *tx_buffer_info;
+	u32 olinfo_status = 0, cmd_type_len = 0;
+	unsigned int i;
+	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+						IXGBE_ADVTXD_POPTS_SHIFT;
+
+		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+			olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+						IXGBE_ADVTXD_POPTS_SHIFT;
+
+	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+						IXGBE_ADVTXD_POPTS_SHIFT;
+
+	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+	i = tx_ring->next_to_use;
+	while (count--) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+		tx_desc->read.cmd_type_len =
+			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_ring *tx_ring;
+	unsigned int len = skb->len;
+	unsigned int first;
+	unsigned int tx_flags = 0;
+	unsigned long flags = 0;
+	u8 hdr_len;
+	int tso;
+	unsigned int mss = 0;
+	int count = 0;
+	unsigned int f;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	len -= skb->data_len;
+
+	tx_ring = adapter->tx_ring;
+
+	if (skb->len <= 0) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+	mss = skb_shinfo(skb)->gso_size;
+
+	if (mss)
+		count++;
+	else if (skb->ip_summed == CHECKSUM_PARTIAL)
+		count++;
+
+	count += TXD_USE_COUNT(len);
+	for (f = 0; f < nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+	spin_lock_irqsave(&tx_ring->tx_lock, flags);
+	if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
+		adapter->tx_busy++;
+		netif_stop_queue(netdev);
+		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		tx_flags |= IXGBE_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
+	}
+
+	if (skb->protocol == ntohs(ETH_P_IP))
+		tx_flags |= IXGBE_TX_FLAGS_IPV4;
+	first = tx_ring->next_to_use;
+	tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (tso)
+		tx_flags |= IXGBE_TX_FLAGS_TSO;
+	else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+		 (skb->ip_summed == CHECKSUM_PARTIAL))
+		tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+	ixgbe_tx_queue(adapter, tx_ring, tx_flags,
+			   ixgbe_tx_map(adapter, tx_ring, skb, first),
+			   skb->len, hdr_len);
+
+	netdev->trans_start = jiffies;
+
+	spin_lock_irqsave(&tx_ring->tx_lock, flags);
+	/* Make sure there is space in the ring for the next send. */
+	if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
+		netif_stop_queue(netdev);
+	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbe_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * ixgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_set_mac(struct net_device *netdev, void *p)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ixgbe_netpoll(struct net_device *netdev)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	disable_irq(adapter->pdev->irq);
+	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
+	ixgbe_intr(adapter->pdev->irq, netdev, NULL);
+	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+	enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * ixgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbe_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct ixgbe_adapter *adapter = NULL;
+	struct ixgbe_hw *hw;
+	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+	unsigned long mmio_start, mmio_len;
+	static int cards_found;
+	int i, err, pci_using_dac;
+	u16 link_status, link_speed, link_width;
+	u32 part_num;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+			if (err) {
+				dev_err(&pdev->dev, "No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_dac = 0;
+	}
+
+	err = pci_request_regions(pdev, ixgbe_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+
+	netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+
+	hw->hw_addr = ioremap(mmio_start, mmio_len);
+	if (!hw->hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	for (i = 1; i <= 5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+	}
+
+	netdev->open = &ixgbe_open;
+	netdev->stop = &ixgbe_close;
+	netdev->hard_start_xmit = &ixgbe_xmit_frame;
+	netdev->get_stats = &ixgbe_get_stats;
+	netdev->set_multicast_list = &ixgbe_set_multi;
+	netdev->set_mac_address = &ixgbe_set_mac;
+	netdev->change_mtu = &ixgbe_change_mtu;
+	ixgbe_set_ethtool_ops(netdev);
+	netdev->tx_timeout = &ixgbe_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+	netdev->poll = &ixgbe_clean;
+	netdev->weight = 64;
+	netdev->vlan_rx_register = ixgbe_vlan_rx_register;
+	netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = ixgbe_netpoll;
+#endif
+	strcpy(netdev->name, pci_name(pdev));
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+
+	adapter->bd_number = cards_found;
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	/* Setup hw api */
+	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+
+	err = ii->get_invariants(hw);
+	if (err)
+		goto err_hw_init;
+
+	/* setup the private structure */
+	err = ixgbe_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	netdev->features = NETIF_F_SG |
+			   NETIF_F_HW_CSUM |
+			   NETIF_F_HW_VLAN_TX |
+			   NETIF_F_HW_VLAN_RX |
+			   NETIF_F_HW_VLAN_FILTER;
+
+	netdev->features |= NETIF_F_TSO;
+
+	netdev->features |= NETIF_F_TSO6;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+
+	/* make sure the EEPROM is good */
+	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
+		dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+
+	if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &ixgbe_watchdog;
+	adapter->watchdog_timer.data = (unsigned long)adapter;
+
+	INIT_WORK(&adapter->reset_task,
+		(void (*)(void *)) ixgbe_reset_task, netdev);
+
+	/* initialize default flow control settings */
+	hw->fc.original_type = ixgbe_fc_full;
+	hw->fc.type = ixgbe_fc_full;
+	hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
+	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+
+	/* Interrupt Throttle Rate */
+	adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
+	adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
+
+	/* print bus type/speed/width info */
+	pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
+	link_speed = link_status & IXGBE_PCI_LINK_SPEED;
+	link_width = link_status & IXGBE_PCI_LINK_WIDTH;
+	dev_info(&pdev->dev, "(PCI Express:%s:%s) "
+		 "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
+		 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
+		 "Unknown"),
+		((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
+		 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
+		 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
+		 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
+		 "Unknown"),
+		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+	ixgbe_read_part_num(hw, &part_num);
+	dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+		 hw->mac.type, hw->phy.type,
+		 (part_num >> 8), (part_num & 0xff));
+
+	/* reset the hardware with the new settings */
+	ixgbe_start_hw(hw);
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	strcpy(netdev->name, "eth%d");
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+
+	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
+	cards_found++;
+	return 0;
+
+err_register:
+err_hw_init:
+err_sw_init:
+err_eeprom:
+	iounmap(hw->hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * ixgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbe_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	set_bit(__IXGBE_DOWN, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+
+	flush_scheduled_work();
+
+	unregister_netdev(netdev);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	pci_release_regions(pdev);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * ixgbe_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev->priv;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev))
+		ixgbe_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbe_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev->priv;
+
+	if (pci_enable_device(pdev)) {
+		DPRINTK(PROBE, ERR,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	ixgbe_reset(adapter);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbe_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void ixgbe_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev->priv;
+
+	if (netif_running(netdev)) {
+		if (ixgbe_up(adapter)) {
+			DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+}
+
+static struct pci_error_handlers ixgbe_err_handler = {
+	.error_detected = ixgbe_io_error_detected,
+	.slot_reset = ixgbe_io_slot_reset,
+	.resume = ixgbe_io_resume,
+};
+
+static struct pci_driver ixgbe_driver = {
+	.name     = ixgbe_driver_name,
+	.id_table = ixgbe_pci_tbl,
+	.probe    = ixgbe_probe,
+	.remove   = __devexit_p(ixgbe_remove),
+#ifdef CONFIG_PM
+	.suspend  = ixgbe_suspend,
+	.resume   = ixgbe_resume,
+#endif
+	.shutdown = ixgbe_shutdown,
+	.err_handler = &ixgbe_err_handler
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbe_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
+	       ixgbe_driver_string, ixgbe_driver_version);
+
+	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+
+	ret = pci_register_driver(&ixgbe_driver);
+	return ret;
+}
+module_init(ixgbe_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbe_exit_module(void)
+{
+	pci_unregister_driver(&ixgbe_driver);
+}
+module_exit(ixgbe_exit_module);
+
+/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
new file mode 100644
index 0000000..8002931
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -0,0 +1,494 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			       u32 device_type, u16 phy_data);
+
+/**
+ *  ixgbe_identify_phy - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+	u32 phy_addr;
+
+	for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+		if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+			hw->phy.addr = phy_addr;
+			ixgbe_get_phy_id(hw);
+			hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+			status = 0;
+			break;
+		}
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_validate_phy_addr - Determines phy address is valid
+ *  @hw: pointer to hardware structure
+ *
+ **/
+static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+	u16 phy_id = 0;
+	bool valid = false;
+
+	hw->phy.addr = phy_addr;
+	ixgbe_read_phy_reg(hw,
+			   IXGBE_MDIO_PHY_ID_HIGH,
+			   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+			   &phy_id);
+
+	if (phy_id != 0xFFFF && phy_id != 0x0)
+		valid = true;
+
+	return valid;
+}
+
+/**
+ *  ixgbe_get_phy_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+	u32 status;
+	u16 phy_id_high = 0;
+	u16 phy_id_low = 0;
+
+	status = ixgbe_read_phy_reg(hw,
+				   IXGBE_MDIO_PHY_ID_HIGH,
+				   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+				   &phy_id_high);
+
+	if (status == 0) {
+		hw->phy.id = (u32)(phy_id_high << 16);
+		status = ixgbe_read_phy_reg(hw,
+					   IXGBE_MDIO_PHY_ID_LOW,
+					   IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+					   &phy_id_low);
+		hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+		hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_phy_type_from_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+	enum ixgbe_phy_type phy_type;
+
+	switch (phy_id) {
+	case TN1010_PHY_ID:
+		phy_type = ixgbe_phy_tn;
+		break;
+	case QT2022_PHY_ID:
+		phy_type = ixgbe_phy_qt;
+		break;
+	default:
+		phy_type = ixgbe_phy_unknown;
+		break;
+	}
+
+	return phy_type;
+}
+
+/**
+ *  ixgbe_reset_phy - Performs a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+	/*
+	 * Perform soft PHY reset to the PHY_XS.
+	 * This will cause a soft reset to the PHY
+	 */
+	return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+				   IXGBE_MDIO_PHY_XS_DEV_TYPE,
+				   IXGBE_MDIO_PHY_XS_RESET);
+}
+
+/**
+ *  ixgbe_read_phy_reg - Reads a value from a specified PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+		       u32 device_type, u16 *phy_data)
+{
+	u32 command;
+	u32 i;
+	u32 timeout = 10;
+	u32 data;
+	s32 status = 0;
+	u16 gssr;
+
+	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+		gssr = IXGBE_GSSR_PHY1_SM;
+	else
+		gssr = IXGBE_GSSR_PHY0_SM;
+
+	if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+		status = IXGBE_ERR_SWFW_SYNC;
+
+	if (status == 0) {
+		/* Setup and write the address cycle command */
+		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+		/*
+		 * Check every 10 usec to see if the address cycle completed.
+		 * The MDI Command bit will clear when the operation is
+		 * complete
+		 */
+		for (i = 0; i < timeout; i++) {
+			udelay(10);
+
+			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+				break;
+		}
+
+		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+			hw_dbg(hw, "PHY address command did not complete.\n");
+			status = IXGBE_ERR_PHY;
+		}
+
+		if (status == 0) {
+			/*
+			 * Address cycle complete, setup and write the read
+			 * command
+			 */
+			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+				   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+			/*
+			 * Check every 10 usec to see if the address cycle
+			 * completed. The MDI Command bit will clear when the
+			 * operation is complete
+			 */
+			for (i = 0; i < timeout; i++) {
+				udelay(10);
+
+				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+					break;
+			}
+
+			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+				hw_dbg(hw,
+				       "PHY read command didn't complete\n");
+				status = IXGBE_ERR_PHY;
+			} else {
+				/*
+				 * Read operation is complete.  Get the data
+				 * from MSRWD
+				 */
+				data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+				data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+				*phy_data = (u16)(data);
+			}
+		}
+
+		ixgbe_release_swfw_sync(hw, gssr);
+	}
+	return status;
+}
+
+/**
+ *  ixgbe_write_phy_reg - Writes a value to specified PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			       u32 device_type, u16 phy_data)
+{
+	u32 command;
+	u32 i;
+	u32 timeout = 10;
+	s32 status = 0;
+	u16 gssr;
+
+	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+		gssr = IXGBE_GSSR_PHY1_SM;
+	else
+		gssr = IXGBE_GSSR_PHY0_SM;
+
+	if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+		status = IXGBE_ERR_SWFW_SYNC;
+
+	if (status == 0) {
+		/* Put the data in the MDI single read and write data register*/
+		IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+		/* Setup and write the address cycle command */
+		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+		/*
+		 * Check every 10 usec to see if the address cycle completed.
+		 * The MDI Command bit will clear when the operation is
+		 * complete
+		 */
+		for (i = 0; i < timeout; i++) {
+			udelay(10);
+
+			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+				hw_dbg(hw, "PHY address cmd didn't complete\n");
+				break;
+			}
+		}
+
+		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
+			status = IXGBE_ERR_PHY;
+
+		if (status == 0) {
+			/*
+			 * Address cycle complete, setup and write the write
+			 * command
+			 */
+			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+				   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+			/*
+			 * Check every 10 usec to see if the address cycle
+			 * completed. The MDI Command bit will clear when the
+			 * operation is complete
+			 */
+			for (i = 0; i < timeout; i++) {
+				udelay(10);
+
+				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+					hw_dbg(hw, "PHY write command did not "
+						  "complete.\n");
+					break;
+				}
+			}
+
+			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
+				status = IXGBE_ERR_PHY;
+		}
+
+		ixgbe_release_swfw_sync(hw, gssr);
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_tnx_phy_link - Set and restart autoneg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_NOT_IMPLEMENTED;
+	u32 time_out;
+	u32 max_time_out = 10;
+	u16 autoneg_speed_selection_register = 0x10;
+	u16 autoneg_restart_mask = 0x0200;
+	u16 autoneg_complete_mask = 0x0020;
+	u16 autoneg_reg = 0;
+
+	/*
+	 * Set advertisement settings in PHY based on autoneg_advertised
+	 * settings. If autoneg_advertised = 0, then advertise default values
+	 * txn devices cannot be "forced" to a autoneg 10G and fail.  But can
+	 * for a 1G.
+	 */
+	ixgbe_read_phy_reg(hw,
+		  autoneg_speed_selection_register,
+		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+		  &autoneg_reg);
+
+	if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
+		autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
+	else
+		autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
+
+	ixgbe_write_phy_reg(hw,
+		  autoneg_speed_selection_register,
+		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+		  autoneg_reg);
+
+
+	/* Restart PHY autonegotiation and wait for completion */
+	ixgbe_read_phy_reg(hw,
+		  IXGBE_MDIO_AUTO_NEG_CONTROL,
+		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+		  &autoneg_reg);
+
+	autoneg_reg |= autoneg_restart_mask;
+
+	ixgbe_write_phy_reg(hw,
+		  IXGBE_MDIO_AUTO_NEG_CONTROL,
+		  IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+		  autoneg_reg);
+
+	/* Wait for autonegotiation to finish */
+	for (time_out = 0; time_out < max_time_out; time_out++) {
+		udelay(10);
+		/* Restart PHY autonegotiation and wait for completion */
+		status = ixgbe_read_phy_reg(hw,
+					    IXGBE_MDIO_AUTO_NEG_STATUS,
+					    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+					    &autoneg_reg);
+
+		autoneg_reg &= autoneg_complete_mask;
+		if (autoneg_reg == autoneg_complete_mask) {
+			status = 0;
+			break;
+		}
+	}
+
+	if (time_out == max_time_out)
+		status = IXGBE_ERR_LINK_SETUP;
+
+	return status;
+}
+
+/**
+ *  ixgbe_check_tnx_phy_link - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the VS1 register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
+			     bool *link_up)
+{
+	s32 status = 0;
+	u32 time_out;
+	u32 max_time_out = 10;
+	u16 phy_link = 0;
+	u16 phy_speed = 0;
+	u16 phy_data = 0;
+
+	/* Initialize speed and link to default case */
+	*link_up = false;
+	*speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+	/*
+	 * Check current speed and link status of the PHY register.
+	 * This is a vendor specific register and may have to
+	 * be changed for other copper PHYs.
+	 */
+	for (time_out = 0; time_out < max_time_out; time_out++) {
+		udelay(10);
+		if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+			*link_up = true;
+			if (phy_speed ==
+			    IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+				*speed = IXGBE_LINK_SPEED_1GB_FULL;
+			break;
+		} else {
+			status = ixgbe_read_phy_reg(hw,
+				     IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+				     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+				     &phy_data);
+			phy_link = phy_data &
+				IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+			phy_speed = phy_data &
+				IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+		}
+	}
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ **/
+s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
+				   bool autoneg,
+				   bool autoneg_wait_to_complete)
+{
+	/*
+	 * Clear autoneg_advertised and set new values based on input link
+	 * speed.
+	 */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	/* Setup link based on the new speed settings */
+	ixgbe_setup_tnx_phy_link(hw);
+
+	return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
new file mode 100644
index 0000000..aa3ea72
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -0,0 +1,49 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
+			       bool autoneg_wait_to_complete);
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
+			       u32 device_type, u16 *phy_data);
+
+/* PHY specific */
+s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
+s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
+				  bool autoneg_wait_to_complete);
+
+#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
new file mode 100644
index 0000000..37f4e4d
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -0,0 +1,1334 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include "ixgbe_compat.h"
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID   0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT_DUAL_PORT   0x10C8
+#define IXGBE_DEV_ID_82598EB_CX4         0x10DD
+
+/* General Registers */
+#define IXGBE_CTRL      0x00000
+#define IXGBE_STATUS    0x00008
+#define IXGBE_CTRL_EXT  0x00018
+#define IXGBE_ESDP      0x00020
+#define IXGBE_EODSDP    0x00028
+#define IXGBE_LEDCTL    0x00200
+#define IXGBE_FRTIMER   0x00048
+#define IXGBE_TCPTIMER  0x0004C
+
+/* NVM Registers */
+#define IXGBE_EEC       0x10010
+#define IXGBE_EERD      0x10014
+#define IXGBE_FLA       0x1001C
+#define IXGBE_EEMNGCTL  0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL  0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT  0x10120
+#define IXGBE_FLOP      0x1013C
+#define IXGBE_GRC       0x10200
+
+/* Interrupt Registers */
+#define IXGBE_EICR      0x00800
+#define IXGBE_EICS      0x00808
+#define IXGBE_EIMS      0x00880
+#define IXGBE_EIMC      0x00888
+#define IXGBE_EIAC      0x00810
+#define IXGBE_EIAM      0x00890
+#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL     0x11068
+#define IXGBE_GPIE      0x00898
+
+/* Flow Control Registers */
+#define IXGBE_PFCTOP    0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV     0x032A0
+#define IXGBE_TFCS      0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/
+#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40))
+#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40))
+#define IXGBE_RDH(_i)   (0x01010 + ((_i) * 0x40))
+#define IXGBE_RDT(_i)   (0x01018 + ((_i) * 0x40))
+#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40))
+#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40))
+#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4))
+					     /* array of 16 (0x02100-0x0213C) */
+#define IXGBE_DCA_RXCTRL(_i)    (0x02200 + ((_i) * 4))
+					     /* array of 16 (0x02200-0x0223C) */
+#define IXGBE_RDRXCTL    0x02F00
+#define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
+					     /* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXCTRL    0x03000
+#define IXGBE_DROPEN    0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+
+/* Receive Registers */
+#define IXGBE_RXCSUM    0x05000
+#define IXGBE_RFCTL     0x05008
+#define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4))
+				   /* Multicast Table Array - 128 entries */
+#define IXGBE_RAL(_i)   (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */
+#define IXGBE_RAH(_i)   (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */
+#define IXGBE_PSRTYPE   0x05480
+				   /* 0x5480-0x54BC Packet split receive type */
+#define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4))
+					 /* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+				     /*array of 4096 4-bit vlan vmdq indicies */
+#define IXGBE_FCTRL     0x05080
+#define IXGBE_VLNCTRL   0x05088
+#define IXGBE_MCSTCTRL  0x05090
+#define IXGBE_MRQC      0x05818
+#define IXGBE_VMD_CTL   0x0581C
+#define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIRVP    0x05AC0
+#define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i)   (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i)   (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL    0x07E00
+#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4))
+					      /* there are 16 of these (0-15) */
+#define IXGBE_TIPG      0x0CB00
+#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) *0x04))
+						      /* there are 8 of these */
+#define IXGBE_MNGTXMAP  0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT    10
+
+/* Wake up registers */
+#define IXGBE_WUC       0x05800
+#define IXGBE_WUFC      0x05808
+#define IXGBE_WUS       0x05810
+#define IXGBE_IPAV      0x05838
+#define IXGBE_IP4AT     0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT     0x05880 /* IPv6 table 0x5880-0x588F */
+#define IXGBE_WUPL      0x05900
+#define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT      0x09000 /* Flex host filter table 9000-93FC */
+
+/* Music registers */
+#define IXGBE_RMCS      0x03D00
+#define IXGBE_DPMCS     0x07F40
+#define IXGBE_PDPMCS    0x0CD00
+#define IXGBE_RUPPBMR   0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i)     (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i)     (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+/* Stats registers */
+#define IXGBE_CRCERRS   0x04000
+#define IXGBE_ILLERRC   0x04004
+#define IXGBE_ERRBC     0x04008
+#define IXGBE_MSPDC     0x04010
+#define IXGBE_MPC(_i)   (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC      0x04034
+#define IXGBE_MRFC      0x04038
+#define IXGBE_RLEC      0x04040
+#define IXGBE_LXONTXC   0x03F60
+#define IXGBE_LXONRXC   0x0CF60
+#define IXGBE_LXOFFTXC  0x03F68
+#define IXGBE_LXOFFRXC  0x0CF68
+#define IXGBE_PXONTXC(_i)       (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i)       (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i)      (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i)      (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64     0x0405C
+#define IXGBE_PRC127    0x04060
+#define IXGBE_PRC255    0x04064
+#define IXGBE_PRC511    0x04068
+#define IXGBE_PRC1023   0x0406C
+#define IXGBE_PRC1522   0x04070
+#define IXGBE_GPRC      0x04074
+#define IXGBE_BPRC      0x04078
+#define IXGBE_MPRC      0x0407C
+#define IXGBE_GPTC      0x04080
+#define IXGBE_GORCL     0x04088
+#define IXGBE_GORCH     0x0408C
+#define IXGBE_GOTCL     0x04090
+#define IXGBE_GOTCH     0x04094
+#define IXGBE_RNBC(_i)  (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC       0x040A4
+#define IXGBE_RFC       0x040A8
+#define IXGBE_ROC       0x040AC
+#define IXGBE_RJC       0x040B0
+#define IXGBE_MNGPRC    0x040B4
+#define IXGBE_MNGPDC    0x040B8
+#define IXGBE_MNGPTC    0x0CF90
+#define IXGBE_TORL      0x040C0
+#define IXGBE_TORH      0x040C4
+#define IXGBE_TPR       0x040D0
+#define IXGBE_TPT       0x040D4
+#define IXGBE_PTC64     0x040D8
+#define IXGBE_PTC127    0x040DC
+#define IXGBE_PTC255    0x040E0
+#define IXGBE_PTC511    0x040E4
+#define IXGBE_PTC1023   0x040E8
+#define IXGBE_PTC1522   0x040EC
+#define IXGBE_MPTC      0x040F0
+#define IXGBE_BPTC      0x040F4
+#define IXGBE_XEC       0x04120
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
+#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC      0x05820
+#define IXGBE_MFVAL     0x05824
+#define IXGBE_MANC2H    0x05860
+#define IXGBE_MDEF(_i)  (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF     0x058B0
+#define IXGBE_MMAL(_i)  (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i)  (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT      0x09400 /* 0x9400-0x97FC */
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR      0x15F00
+#define IXGBE_FWSTS     0x15F0C
+#define IXGBE_HSMC0R    0x15F04
+#define IXGBE_HSMC1R    0x15F08
+#define IXGBE_SWSR      0x15F10
+#define IXGBE_HFDR      0x15FE8
+#define IXGBE_FLEX_MNG  0x15800 /* 0x15800 - 0x15EFC */
+
+/* PCI-E registers */
+#define IXGBE_GCR       0x11000
+#define IXGBE_GTV       0x11004
+#define IXGBE_FUNCTAG   0x11008
+#define IXGBE_GLT       0x1100C
+#define IXGBE_GSCL_1    0x11010
+#define IXGBE_GSCL_2    0x11014
+#define IXGBE_GSCL_3    0x11018
+#define IXGBE_GSCL_4    0x1101C
+#define IXGBE_GSCN_0    0x11020
+#define IXGBE_GSCN_1    0x11024
+#define IXGBE_GSCN_2    0x11028
+#define IXGBE_GSCN_3    0x1102C
+#define IXGBE_FACTPS    0x10150
+#define IXGBE_PCIEANACTL  0x11040
+#define IXGBE_SWSM      0x10140
+#define IXGBE_FWSM      0x10148
+#define IXGBE_GSSR      0x10160
+#define IXGBE_MREVID    0x11064
+#define IXGBE_DCA_ID    0x11070
+#define IXGBE_DCA_CTRL  0x11074
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN    0x02F08
+#define IXGBE_RIC_DW0   0x02F10
+#define IXGBE_RIC_DW1   0x02F14
+#define IXGBE_RIC_DW2   0x02F18
+#define IXGBE_RIC_DW3   0x02F1C
+#define IXGBE_RDPROBE   0x02F20
+#define IXGBE_TDSTATCTL 0x07C20
+#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN    0x07F08
+#define IXGBE_TIC_DW0   0x07F10
+#define IXGBE_TIC_DW1   0x07F14
+#define IXGBE_TIC_DW2   0x07F18
+#define IXGBE_TIC_DW3   0x07F1C
+#define IXGBE_TDPROBE   0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0  0x0C610
+#define IXGBE_TXBUFDATA1  0x0C614
+#define IXGBE_TXBUFDATA2  0x0C618
+#define IXGBE_TXBUFDATA3  0x0C61C
+#define IXGBE_RXBUFCTRL   0x03600
+#define IXGBE_RXBUFDATA0  0x03610
+#define IXGBE_RXBUFDATA1  0x03614
+#define IXGBE_RXBUFDATA2  0x03618
+#define IXGBE_RXBUFDATA3  0x0361C
+#define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL     0x050A4
+#define IXGBE_MDFTC1    0x042B8
+#define IXGBE_MDFTC2    0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS     0x042CC
+#define IXGBE_RXDATAWRPTR(_i)   (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i)   (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i)   (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i)   (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i)   (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i)   (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i)   (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i)   (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_PBTXECC   0x0C300
+#define IXGBE_PBRXECC   0x03300
+#define IXGBE_GHECCR    0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA  0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0    0x04240
+#define IXGBE_HLREG1    0x04244
+#define IXGBE_PAP       0x04248
+#define IXGBE_MACA      0x0424C
+#define IXGBE_APAE      0x04250
+#define IXGBE_ARD       0x04254
+#define IXGBE_AIS       0x04258
+#define IXGBE_MSCA      0x0425C
+#define IXGBE_MSRWD     0x04260
+#define IXGBE_MLADD     0x04264
+#define IXGBE_MHADD     0x04268
+#define IXGBE_TREG      0x0426C
+#define IXGBE_PCSS1     0x04288
+#define IXGBE_PCSS2     0x0428C
+#define IXGBE_XPCSS     0x04290
+#define IXGBE_SERDESC   0x04298
+#define IXGBE_MACS      0x0429C
+#define IXGBE_AUTOC     0x042A0
+#define IXGBE_LINKS     0x042A4
+#define IXGBE_AUTOC2    0x042A8
+#define IXGBE_AUTOC3    0x042AC
+#define IXGBE_ANLP1     0x042B0
+#define IXGBE_ANLP2     0x042B4
+#define IXGBE_ATLASCTL  0x04800
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN          0x01
+#define IXGBE_RSCCTL_MAXDESC_1      0x00
+#define IXGBE_RSCCTL_MAXDESC_4      0x04
+#define IXGBE_RSCCTL_MAXDESC_8      0x08
+#define IXGBE_RSCCTL_MAXDESC_16     0x0C
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST      0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST          0x04000000 /* Reset (SW) */
+
+/* FACTPS */
+#define IXGBE_FACTPS_LFS        0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK    0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT   16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_NS_DIS   0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS   0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */
+#define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK      0x0000FFFF /* MDI Address (new protocol) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT     0
+#define IXGBE_MSCA_DEV_TYPE_MASK     0x001F0000 /* Device Type (new protocol) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT    16 /* Register Address (old protocol */
+#define IXGBE_MSCA_PHY_ADDR_MASK     0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT    21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK      0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT     26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE        0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE             0x04000000 /* OP CODE 01 (write) */
+#define IXGBE_MSCA_READ              0x08000000 /* OP CODE 10 (read) */
+#define IXGBE_MSCA_READ_AUTOINC      0x0C000000 /* OP CODE 11 (read, auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK      0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT     28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL      0x00000000 /* ST CODE 00 (new protocol) */
+#define IXGBE_MSCA_OLD_PROTOCOL      0x10000000 /* ST CODE 01 (old protocol) */
+#define IXGBE_MSCA_MDI_COMMAND       0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN    0x80000000 /* MDI in progress enable */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK  0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK   0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT  16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK    0x24
+#define IXGBE_ATLAS_PDN_10G     0xB
+#define IXGBE_ATLAS_PDN_1G      0xC
+#define IXGBE_ATLAS_PDN_AN      0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD        0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN       0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL   0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE               0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE                   0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE                0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE              0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE     0x1E   /* Device 30 */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS       0x1    /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS  0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED    0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED     0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL    0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS     0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL      0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET        0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH         0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW          0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Abilty Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G       0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G        0x0010 /* 1G capable */
+
+#define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR             32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID    0x00A19410
+#define QT2022_PHY_ID    0x0043A400
+
+/* General purpose Interrupt Enable */
+#define IXGBE_GPIE_MSIX_MODE      0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD            0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN          0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME          0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT    0x80000000
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF         0x00000001
+#define IXGBE_TFCS_TXOFF0        0x00000100
+#define IXGBE_TFCS_TXOFF1        0x00000200
+#define IXGBE_TFCS_TXOFF2        0x00000400
+#define IXGBE_TFCS_TXOFF3        0x00000800
+#define IXGBE_TFCS_TXOFF4        0x00001000
+#define IXGBE_TFCS_TXOFF5        0x00002000
+#define IXGBE_TFCS_TXOFF6        0x00004000
+#define IXGBE_TFCS_TXOFF7        0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS            0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE  0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH  0x00000400
+#define IXGBE_TCPTIMER_LOOP          0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN      0x00000001   /* bit  0 */
+#define IXGBE_HLREG0_RXCRCSTRP    0x00000002   /* bit  1 */
+#define IXGBE_HLREG0_JUMBOEN      0x00000004   /* bit  2 */
+#define IXGBE_HLREG0_TXPADEN      0x00000400   /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN    0x00001000   /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN    0x00004000   /* bit 14 */
+#define IXGBE_HLREG0_LPBK         0x00008000   /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD       0x00010000   /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC      0x00020000   /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR     0x00040000   /* bit 18 */
+#define IXGBE_HLREG0_PREPEND      0x00F00000   /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN   0x01000000   /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000   /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000   /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000   /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN     0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR       0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ      0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR       0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ      0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE     0x00001000   /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD       0x00002000   /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE        0x80000000  /* bit 31, XON enable */
+#define IXGBE_FCRTH_FCEN        0x80000000  /* Rx Flow control enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK   0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recylce Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC          0x00000004
+#define IXGBE_RMCS_DFP          IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
+#define IXGBE_RMCS_TFCE_802_3X  0x00000008 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
+#define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */
+#define IXGBE_EICR_MNG          0x00400000 /* Managability Event Interrupt */
+#define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_LSC          IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICS_MNG          IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR /* Pkt Buf Handler Error */
+#define IXGBE_EICS_DHER         IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Error */
+#define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Error */
+#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
+#define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK (\
+				IXGBE_EIMS_RTX_QUEUE       | \
+				IXGBE_EIMS_LSC             | \
+				IXGBE_EIMS_TCP_TIMER       | \
+				IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits */
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK    0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM      25
+#define IXGBE_IVAR_TXRX_ENTRY   96
+#define IXGBE_IVAR_RX_ENTRY     64
+#define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i)    (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY     32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX       96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX    97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i)   (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
+
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN     0x20000000  /* bit 29 */
+#define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */
+#define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID     0x0000000C /* LAN ID */
+#define IXGBE_STATUS_GIO        0x00080000 /* GIO Master Enable Status */
+
+#define IXGBE_STATUS_LAN_ID_0   0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR     0x00000008 /* SDP5 IO direction */
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE      0x00000040
+#define IXGBE_LED_BLINK_BASE     0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i)       IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i)      IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i)  IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP       0x0
+#define IXGBE_LED_LINK_10G      0x1
+#define IXGBE_LED_MAC           0x2
+#define IXGBE_LED_FILTER        0x3
+#define IXGBE_LED_LINK_ACTIVE   0x4
+#define IXGBE_LED_LINK_1G       0x5
+#define IXGBE_LED_ON            0xE
+#define IXGBE_LED_OFF           0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_SUPP    0x80000000
+#define IXGBE_AUTOC_KX_SUPP     0x40000000
+#define IXGBE_AUTOC_PAUSE       0x30000000
+#define IXGBE_AUTOC_RF          0x08000000
+#define IXGBE_AUTOC_PD_TMR      0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_AN_RESTART  0x00001000
+#define IXGBE_AUTOC_FLU         0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT   13
+#define IXGBE_AUTOC_LMS_MASK   (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN  (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN  (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN   (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE    (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD      0x00000200
+#define IXGBE_AUTOC_10G_PMA_PMD     0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP  0x80000000
+#define IXGBE_LINKS_UP          0x40000000
+#define IXGBE_LINKS_SPEED       0x20000000
+#define IXGBE_LINKS_MODE        0x18000000
+#define IXGBE_LINKS_RX_MODE     0x06000000
+#define IXGBE_LINKS_TX_MODE     0x01800000
+#define IXGBE_LINKS_XGXS_EN     0x00400000
+#define IXGBE_LINKS_PCS_1G_EN   0x00200000
+#define IXGBE_LINKS_1G_AN_EN    0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE  0x00080000
+#define IXGBE_LINKS_1G_SYNC     0x00040000
+#define IXGBE_LINKS_10G_ALIGN   0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT    0x00001000
+#define IXGBE_LINKS_SIGNAL      0x00000F00
+
+#define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+
+/* GSSR definitions */
+#define IXGBE_GSSR_EEP_SM     0x0001
+#define IXGBE_GSSR_PHY0_SM    0x0002
+#define IXGBE_GSSR_PHY1_SM    0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM   0x0010
+
+/* EEC Register */
+#define IXGBE_EEC_SK        0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS        0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI        0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO        0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK  0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ       0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT       0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE      0x00007800 /* EEPROM Size */
+
+#define IXGBE_EEC_SIZE_SHIFT          11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT  6
+#define IXGBE_EEPROM_OPCODE_BITS      8
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_EEPROM_CHECKSUM   0x3F
+#define IXGBE_EEPROM_SUM        0xBABA
+#define IXGBE_PCIE_ANALOG_PTR   0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR  0x06
+#define IXGBE_PCIE_CONFIG0_PTR  0x07
+#define IXGBE_PCIE_CONFIG1_PTR  0x08
+#define IXGBE_CORE0_PTR         0x09
+#define IXGBE_CORE1_PTR         0x0A
+#define IXGBE_MAC0_PTR          0x0B
+#define IXGBE_MAC1_PTR          0x0C
+#define IXGBE_CSR0_CONFIG_PTR   0x0D
+#define IXGBE_CSR1_CONFIG_PTR   0x0E
+#define IXGBE_FW_PTR            0x0F
+#define IXGBE_PBANUM0_PTR       0x15
+#define IXGBE_PBANUM1_PTR       0x16
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI      5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI     0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI    0x03  /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI   0x02  /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI      0x08  /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI    0x06  /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enbale latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI    0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI    0x05  /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI    0x01  /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20  /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_READ_REG_DATA   16   /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_READ_REG_DONE   2    /* Offset to READ done bit */
+#define IXGBE_EEPROM_READ_REG_START  1    /* First bit to start operation */
+#define IXGBE_EEPROM_READ_ADDR_SHIFT 2    /* Shift to the address bits */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS   6
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+
+#ifndef IXGBE_EERD_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read to complete */
+#define IXGBE_EERD_ATTEMPTS 100000
+#endif
+
+/* PCI Bus Info */
+#define IXGBE_PCI_LINK_STATUS     0xB2
+#define IXGBE_PCI_LINK_WIDTH      0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1    0x10
+#define IXGBE_PCI_LINK_WIDTH_2    0x20
+#define IXGBE_PCI_LINK_WIDTH_4    0x40
+#define IXGBE_PCI_LINK_WIDTH_8    0x80
+#define IXGBE_PCI_LINK_SPEED      0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
+
+/* Check whether address is multicast.  This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+		(bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address)                      \
+		((((u8 *)(Address))[0] == ((u8)0xff)) && \
+		(((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK     0x003C0000
+#define IXGBE_RAH_VIND_SHIFT    18
+#define IXGBE_RAH_AV            0x80000000
+
+/* Filters */
+#define IXGBE_MC_TBL_SIZE       128  /* Multicast Filter Table (4096 bits) */
+#define IXGBE_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS       0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK  0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_NFSW_DIS        0x00000040
+#define IXGBE_RFCTL_NFSR_DIS        0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK    0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT   8
+#define IXGBE_RFCTL_NFS_VER_2       0
+#define IXGBE_RFCTL_NFS_VER_3       1
+#define IXGBE_RFCTL_NFS_VER_4       2
+#define IXGBE_RFCTL_IPV6_DIS        0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS   0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS      0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS     0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE     0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH     0x04000000 /* Tx Desc. write-back flushing */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE     0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004  /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ      0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE   0x1      /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq. # write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enbale */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN                 0x00000001  /* RSS Enable */
+#define IXGBE_MRQC_RSS_FIELD_MASK        0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6        0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+
+#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */
+#define IXGBE_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE        0x01    /* CRC Error */
+#define IXGBE_RXD_ERR_LE        0x02    /* Length Error */
+#define IXGBE_RXD_ERR_PE        0x08    /* Packet Error */
+#define IXGBE_RXD_ERR_OSE       0x10    /* Oversize Error */
+#define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
+#define IXGBE_RXDADV_HBO        0x00800000
+#define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE    0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE    0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE   0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE    0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK  0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK      0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT     13
+#define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT     12
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10     /* so many KBs */
+#define IXGBE_SRRCTL_BSIZEPKT_MASK  0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK  0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP       0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
+#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
+#define IXGBE_RXDADV_SPH                0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE       0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP   0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4       0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP   0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX    0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6       0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP   0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP   0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE       0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4       0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6       0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP        0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\
+				      IXGBE_RXD_ERR_CE | \
+				      IXGBE_RXD_ERR_LE | \
+				      IXGBE_RXD_ERR_PE | \
+				      IXGBE_RXD_ERR_OSE | \
+				      IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\
+				      IXGBE_RXDADV_ERR_CE | \
+				      IXGBE_RXDADV_ERR_LE | \
+				      IXGBE_RXDADV_ERR_PE | \
+				      IXGBE_RXDADV_ERR_OSE | \
+				      IXGBE_RXDADV_ERR_USE)
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE      0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK  0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK   0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+	u64 buffer_addr;       /* Address of the descriptor's data buffer */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 cso; /* Checksum offset */
+			u8 cmd; /* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css; /* Checksum start */
+			u16 vlan;
+		} fields;
+	} upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+	struct {
+		u64 buffer_addr;       /* Address of descriptor's data buf */
+		u32 cmd_type_len;
+		u32 olinfo_status;
+	} read;
+	struct {
+		u64 rsvd;       /* Reserved */
+		u32 nxtseq_seed;
+		u32 status;
+	} wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+	u64 buffer_addr; /* Address of the descriptor's data buffer */
+	u16 length;      /* Length of data DMAed into data buffer */
+	u16 csum;        /* Packet checksum */
+	u8 status;       /* Descriptor status */
+	u8 errors;       /* Descriptor Errors */
+	u16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+	struct {
+		u64 pkt_addr; /* Packet buffer address */
+		u64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				u16 pkt_info; /* RSS type, Packet type */
+				u16 hdr_info; /* Split Header, header len */
+			} lo_dword;
+			union {
+				u32 rss; /* RSS Hash */
+				struct {
+					u16 ip_id; /* IP id */
+					u16 csum; /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error; /* ext status/error */
+			u16 length; /* Packet length */
+			u16 vlan; /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+	u32 vlan_macip_lens;
+	u32 seqnum_seed;
+	u32 type_tucmd_mlhl;
+	u32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buffer length(bytes) */
+#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RDMA  0x04000000 /* RDMA */
+#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000     /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED present in WB */
+#define IXGBE_ADVTXD_STAT_RSV   0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+				IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+				IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_EOM  0x00000400 /* Enable L bit-RDMA DDP hdr */
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST   0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL   0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST  0x00001000 /* Last TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
+#define IXGBE_ADVTXD_POPTS_RSV  0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT  14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT  9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT    16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
+#define IXGBE_ADVTXD_L4LEN_SHIFT   8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT     16  /* Adv ctxt MSS shift */
+
+/* Link speed */
+#define IXGBE_LINK_SPEED_UNKNOWN   0
+#define IXGBE_LINK_SPEED_100_FULL  0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL  0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+
+enum ixgbe_eeprom_type {
+	ixgbe_eeprom_uninitialized = 0,
+	ixgbe_eeprom_spi,
+	ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+	ixgbe_mac_unknown = 0,
+	ixgbe_mac_82598EB,
+	ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+	ixgbe_phy_unknown = 0,
+	ixgbe_phy_tn,
+	ixgbe_phy_qt,
+	ixgbe_phy_xaui
+};
+
+enum ixgbe_media_type {
+	ixgbe_media_type_unknown = 0,
+	ixgbe_media_type_fiber,
+	ixgbe_media_type_copper,
+	ixgbe_media_type_backplane
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_type {
+	ixgbe_fc_none = 0,
+	ixgbe_fc_rx_pause,
+	ixgbe_fc_tx_pause,
+	ixgbe_fc_full,
+	ixgbe_fc_default
+};
+
+struct ixgbe_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mc_addr_in_rar_count;
+	u32 mta_in_use;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+	u32 high_water; /* Flow Control High-water */
+	u32 low_water; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	enum ixgbe_fc_type type; /* Type of flow control */
+	enum ixgbe_fc_type original_type;
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+	u64 crcerrs;
+	u64 illerrc;
+	u64 errbc;
+	u64 mspdc;
+	u64 mpctotal;
+	u64 mpc[8];
+	u64 mlfc;
+	u64 mrfc;
+	u64 rlec;
+	u64 lxontxc;
+	u64 lxonrxc;
+	u64 lxofftxc;
+	u64 lxoffrxc;
+	u64 pxontxc[8];
+	u64 pxonrxc[8];
+	u64 pxofftxc[8];
+	u64 pxoffrxc[8];
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc[8];
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mngprc;
+	u64 mngpdc;
+	u64 mngptc;
+	u64 tor;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 xec;
+	u64 rqsmr[16];
+	u64 tqsmr[8];
+	u64 qprc[16];
+	u64 qptc[16];
+	u64 qbrc[16];
+	u64 qbtc[16];
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+struct ixgbe_mac_operations {
+	s32 (*reset)(struct ixgbe_hw *);
+	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+	s32 (*setup_link)(struct ixgbe_hw *);
+	s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
+	s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
+	s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *);
+};
+
+struct ixgbe_phy_operations {
+	s32 (*setup_link)(struct ixgbe_hw *);
+	s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *);
+	s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool);
+};
+
+struct ixgbe_mac_info {
+	struct ixgbe_mac_operations	ops;
+	enum ixgbe_mac_type		type;
+	u8				addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8				perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	s32				mc_filter_type;
+	u32				num_rx_queues;
+	u32				num_tx_queues;
+	u32				num_rx_addrs;
+	u32				link_attach_type;
+	u32				link_mode_select;
+	bool				link_settings_loaded;
+};
+
+struct ixgbe_eeprom_info {
+	enum ixgbe_eeprom_type		type;
+	u16				word_size;
+	u16				address_bits;
+};
+
+struct ixgbe_phy_info {
+	struct ixgbe_phy_operations	ops;
+
+	enum ixgbe_phy_type		type;
+	u32				addr;
+	u32				id;
+	u32				revision;
+	enum ixgbe_media_type		media_type;
+	u32				autoneg_advertised;
+	bool				autoneg_wait_to_complete;
+};
+
+struct ixgbe_info {
+	enum ixgbe_mac_type		mac;
+	s32 				(*get_invariants)(struct ixgbe_hw *);
+	struct ixgbe_mac_operations	*mac_ops;
+};
+
+struct ixgbe_hw {
+	u8 __iomem			*hw_addr;
+	void				*back;
+	struct ixgbe_mac_info		mac;
+	struct ixgbe_addr_filter_info	addr_ctrl;
+	struct ixgbe_fc_info		fc;
+	struct ixgbe_phy_info		phy;
+	struct ixgbe_eeprom_info	eeprom;
+	u16				device_id;
+	u16				vendor_id;
+	u16				subsystem_device_id;
+	u16				subsystem_vendor_id;
+	u8				revision_id;
+	bool				adapter_stopped;
+};
+
+/* Error Codes */
+#define IXGBE_ERR_EEPROM                        -1
+#define IXGBE_ERR_EEPROM_CHECKSUM               -2
+#define IXGBE_ERR_PHY                           -3
+#define IXGBE_ERR_CONFIG                        -4
+#define IXGBE_ERR_PARAM                         -5
+#define IXGBE_ERR_MAC_TYPE                      -6
+#define IXGBE_ERR_UNKNOWN_PHY                   -7
+#define IXGBE_ERR_LINK_SETUP                    -8
+#define IXGBE_ERR_ADAPTER_STOPPED               -9
+#define IXGBE_ERR_INVALID_MAC_ADDR              -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED          -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING       -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS         -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE          -14
+#define IXGBE_ERR_RESET_FAILED                  -15
+#define IXGBE_ERR_SWFW_SYNC                     -16
+#define IXGBE_ERR_PHY_ADDR_INVALID              -17
+#define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
+
+#endif /* _IXGBE_TYPE_H_ */