Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2712

kernel-2.6.18-238.el5.src.rpm

From: Andy Gospodarek <gospo@redhat.com>
Subject: [RHEL5.1 PATCH] igb: initial support for igb netdriver
Date: Thu, 9 Aug 2007 14:29:35 -0400
Bugzilla: 244758
Message-Id: <20070809182933.GF1096@gospo.rdu.redhat.com>
Changelog: [net] igb: initial support for igb netdriver




This is Intel's new igb driver 

This will resolve BZ 244758.

---

 Kconfig             |   22 
 Makefile            |    1 
 igb/Makefile        |   36 
 igb/e1000_82575.c   | 1310 +++++++++++++++++
 igb/e1000_82575.h   |  272 +++
 igb/e1000_api.c     |  823 ++++++++++
 igb/e1000_api.h     |  121 +
 igb/e1000_defines.h | 1362 +++++++++++++++++
 igb/e1000_hw.h      |  613 +++++++
 igb/e1000_mac.c     | 1416 ++++++++++++++++++
 igb/e1000_mac.h     |   71 
 igb/e1000_manage.c  |  246 +++
 igb/e1000_manage.h  |   76 
 igb/e1000_nvm.c     |  624 ++++++++
 igb/e1000_nvm.h     |   46 
 igb/e1000_phy.c     | 1723 ++++++++++++++++++++++
 igb/e1000_phy.h     |  163 ++
 igb/e1000_regs.h    |  467 ++++++
 igb/igb.h           |  308 ++++
 igb/igb_compat.h    |   22 
 igb/igb_ethtool.c   | 1659 +++++++++++++++++++++
 igb/igb_main.c      | 3992 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 igb/igb_param.c     |  735 +++++++++
 igb/igb_regtest.h   |   86 +
 24 files changed, 16194 insertions(+)

--- linux-2.6.18.i386/drivers/net/Makefile.orig	2007-08-07 10:27:15.000000000 -0400
+++ linux-2.6.18.i386/drivers/net/Makefile	2007-08-07 17:33:53.000000000 -0400
@@ -8,6 +8,7 @@ endif
 
 obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_E1000E) += e1000e/
+obj-$(CONFIG_IGB) += igb/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
--- linux-2.6.18.i386/drivers/net/Kconfig.orig	2007-08-07 10:27:15.000000000 -0400
+++ linux-2.6.18.i386/drivers/net/Kconfig	2007-08-07 17:33:29.000000000 -0400
@@ -1983,6 +1983,28 @@ config E1000E
 	  <file:Documentation/networking/net-modules.txt>.  The module
 	  will be called e1000e.
 
+config IGB
+	tristate "Intel(R) 82575 Gigabit Ethernet support"
+	depends on PCI
+	---help---
+	  This driver supports Intel(R) 82575 gigabit ethernet adapters.
+	  For more information on how to identify your adapter, go to the
+	  Adapter & Driver ID Guide at:
+
+	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+	  For general information and support, go to the Intel support
+	  website at:
+
+	  <http://support.intel.com>
+
+	  More specific information on configuring the driver is in
+	  <file:Documentation/networking/igb.txt>.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module
+	  will be called igb.
+
 source "drivers/net/ixp2000/Kconfig"
 
 config MYRI_SBUS
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_82575.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,272 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+/* Receive Address Register Count
+ * Number of high/low register pairs in the RAR.  The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+#define E1000_RAR_ENTRIES_82575   16
+
+struct e1000_adv_data_desc {
+	u64 buffer_addr;    /* Address of the descriptor's data buffer */
+	union {
+		u32 data;
+		struct {
+			u32 datalen :16; /* Data buffer length */
+			u32 rsvd    :4;
+			u32 dtyp    :4;  /* Descriptor type */
+			u32 dcmd    :8;  /* Descriptor command */
+		} config;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u32 status  :4;  /* Descriptor status */
+			u32 idx     :4;
+			u32 popts   :6;  /* Packet Options */
+			u32 paylen  :18; /* Payload length */
+		} options;
+	} upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C    0x2  /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D    0x3  /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT  0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4    0x2  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6    0x0  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0  /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4  /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ  0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP      0x1  /* End of Packet */
+#define E1000_ADV_DCMD_IFCS     0x2  /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS       0x8  /* Report Status */
+#define E1000_ADV_DCMD_VLE      0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE      0x80 /* TCP Seg enable */
+
+struct e1000_adv_context_desc {
+	union {
+		u32 ip_config;
+		struct {
+			u32 iplen    :9;
+			u32 maclen   :7;
+			u32 vlan_tag :16;
+		} fields;
+	} ip_setup;
+	u32 seq_num;
+	union {
+		u64 l4_config;
+		struct {
+			u32 mkrloc :9;
+			u32 tucmd  :11;
+			u32 dtyp   :4;
+			u32 adv    :8;
+			u32 rsvd   :4;
+			u32 idx    :4;
+			u32 l4len  :8;
+			u32 mss    :16;
+		} fields;
+	} l4_setup;
+};
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+
+#define E1000_TX_HEAD_WB_ENABLE   0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
+
+#define E1000_EICR_TX_QUEUE ( \
+    E1000_EICR_TX_QUEUE0 |    \
+    E1000_EICR_TX_QUEUE1 |    \
+    E1000_EICR_TX_QUEUE2 |    \
+    E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+    E1000_EICR_RX_QUEUE0 |    \
+    E1000_EICR_RX_QUEUE1 |    \
+    E1000_EICR_RX_QUEUE2 |    \
+    E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+    E1000_EIMS_RX_QUEUE  | \
+    E1000_EIMS_TX_QUEUE  | \
+    E1000_EIMS_TCP_TIMER | \
+    E1000_EIMS_OTHER)
+
+/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
+#define E1000_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+	struct {
+		u64 pkt_addr;             /* Packet buffer address */
+		u64 hdr_addr;             /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				u16 pkt_info;   /* RSS type, Packet type */
+				u16 hdr_info;   /* Split Header,
+				                      * header buffer length */
+			} lo_dword;
+			union {
+				u32 rss;          /* RSS Hash */
+				struct {
+					u16 ip_id;    /* IP id */
+					u16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length;           /* Packet length */
+			u16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK        0x0000F000
+#define E1000_RXDADV_RSSTYPE_SHIFT       12
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_SPLITHEADER_EN      0x00001000
+#define E1000_RXDADV_SPH                 0x8000
+#define E1000_RXDADV_HBO                 0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE        0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP    0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4        0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP    0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX     0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6        0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP    0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP    0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+	struct {
+		u64 buffer_addr;    /* Address of descriptor's data buf */
+		u32 cmd_type_len;
+		u32 olinfo_status;
+	} read;
+	struct {
+		u64 rsvd;       /* Reserved */
+		u32 nxtseq_seed;
+		u32 status;
+	} wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RDMA    0x04000000 /* RDMA */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC  0x00040000 /* Apply LinkSec on packet */
+#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_STAT_SN_CRC  0x00000002 /* NXTSEQ/SEED present in WB */
+#define E1000_ADVTXD_IDX_SHIFT    4  /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_EOM    0x00000400 /* Enable L bit in RDMA DDP hdr */
+#define E1000_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+	u32 vlan_macip_lens;
+	u32 seqnum_seed;
+	u32 type_tucmd_mlhl;
+	u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT     16  /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP    0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN  0x00004000
+#define E1000_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK     0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK      0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH        0x04000000 /* Tx Desc. write-back flushing */
+#define E1000_TXDCTL_PRIORITY      0x08000000 /* Tx Queue Arbitration Priority
+                                                 0=low, 1=high */
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH        0x04000000 /* Rx Desc. write-back flushing */
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_nvm.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,624 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/io.h>
+
+#include "e1000_api.h"
+#include "e1000_nvm.h"
+
+static s32 e1000_acquire_nvm(struct e1000_hw *hw);
+static void e1000_release_nvm(struct e1000_hw *hw);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+static void e1000_stop_nvm(struct e1000_hw *hw);
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 mask;
+
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_microwire)
+		eecd &= ~E1000_EECD_DO;
+	else if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+
+		udelay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		e1000_raise_eec_clk(hw, &eecd);
+
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = E1000_READ_REG(hw, E1000_EERD);
+		else
+			reg = E1000_READ_REG(hw, E1000_EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE) {
+			ret_val = E1000_SUCCESS;
+			break;
+		}
+
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		udelay(5);
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		ret_val = -E1000_ERR_NVM;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		udelay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+
+		/* Select EEPROM */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		udelay(nvm->delay_usec);
+
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		udelay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		udelay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+		/* CS on Microcwire is active-high */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  e1000_release_nvm_generic - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+
+	e1000_stop_nvm(hw);
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd &= ~E1000_EECD_REQ;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 ret_val = E1000_SUCCESS;
+	u16 timeout = 0;
+	u8 spi_stat_reg;
+
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		/* Clear SK and DI */
+		eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		/* Set CS */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		udelay(1);
+		timeout = NVM_MAX_RETRY_SPI;
+
+		/* Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out. */
+		while (timeout) {
+			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+			                         hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			udelay(5);
+			e1000_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words. */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+		       E1000_NVM_RW_REG_START;
+
+		E1000_WRITE_REG(hw, E1000_EERD, eerd);
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+		           E1000_NVM_RW_REG_DATA);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 widx = 0;
+
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words. */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	msleep(10);
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = e1000_ready_nvm_eeprom(hw);
+		if (ret_val)
+			goto release;
+
+		e1000_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+		                         nvm->opcode_bits);
+
+		e1000_standby_nvm(hw);
+
+		/* Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+		                         nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+			word_out = (word_out >> 8) | (word_out << 8);
+			e1000_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				e1000_standby_nvm(hw);
+				break;
+			}
+		}
+	}
+
+	msleep(10);
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+	s32  ret_val = E1000_SUCCESS;
+	u16 offset, nvm_data, i;
+
+
+	for (i = 0; i < ETH_ALEN; i += 2) {
+		offset = i >> 1;
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			goto out;
+		}
+		hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+		hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+	}
+
+	/* Flip last bit of mac address if we're on second port */
+	if (hw->bus.func == E1000_FUNC_1)
+		hw->mac.perm_addr[5] ^= 1;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32  ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val) {
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reload_nvm_generic - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+
+
+	udelay(10);
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/* Function pointers local to this file and not intended for public use */
+
+/**
+ *  e1000_acquire_nvm - Acquire exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM acquire function,
+ *  run the defined function else return success.
+ **/
+static s32 e1000_acquire_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_nvm != NULL)
+		return hw->func.acquire_nvm(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM release function,
+ *  run the defined fucntion else return success.
+ **/
+static void e1000_release_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.release_nvm != NULL)
+		hw->func.release_nvm(hw);
+}
+
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_api.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,823 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+#include "e1000_phy.h"
+
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_mac_params - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the MAC
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+static s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_mac_params != NULL) {
+		ret_val = hw->func.init_mac_params(hw);
+		if (ret_val) {
+			goto out;
+		}
+	} else {
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the NVM
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+static s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_nvm_params != NULL) {
+		ret_val = hw->func.init_nvm_params(hw);
+		if (ret_val) {
+			goto out;
+		}
+	} else {
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+static s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_phy_params != NULL) {
+		ret_val = hw->func.init_phy_params(hw);
+		if (ret_val) {
+			goto out;
+		}
+	} else {
+		ret_val =  -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  device ID stored in the hw structure.
+ *  MUST BE FIRST FUNCTION CALLED (explicitly or through
+ *  e1000_setup_init_funcs()).
+ **/
+static s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82575EB_COPPER:
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		mac->type = e1000_82575;
+		break;
+	default:
+		/* Should never have loaded on this device */
+		ret_val = -E1000_ERR_MAC_INIT;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_init_funcs - Initializes function pointers
+ *  @hw: pointer to the HW structure
+ *  @init_device: 1 will initialize the rest of the function pointers
+ *                 getting the device ready for use.  0 will only set
+ *                 MAC type and the function pointers for the other init
+ *                 functions.  Passing 0 will not generate any hardware
+ *                 reads or writes.
+ *
+ *  This function must be called by a driver in order to use the rest
+ *  of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+	s32 ret_val;
+
+	/* Can't do much good without knowing the MAC type.
+	 */
+	ret_val = e1000_set_mac_type(hw);
+	if (ret_val) {
+		goto out;
+	}
+
+	if (!hw->hw_addr) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Init some generic function pointers that are currently all pointing
+	 * to generic implementations. We do this first allowing a driver
+	 * module to override it afterwards.
+	 */
+	hw->func.config_collision_dist = e1000_config_collision_dist_generic;
+	hw->func.rar_set = e1000_rar_set_generic;
+	hw->func.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+	hw->func.mng_host_if_write = e1000_mng_host_if_write_generic;
+	hw->func.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
+	hw->func.mng_enable_host_if = e1000_mng_enable_host_if_generic;
+	hw->func.wait_autoneg = e1000_wait_autoneg_generic;
+	hw->func.reload_nvm = e1000_reload_nvm_generic;
+
+	/* Set up the init function pointers. These are functions within the
+	 * adapter family file that sets up function pointers for the rest of
+	 * the functions in that family.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82575:
+		e1000_init_function_pointers_82575(hw);
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		break;
+	}
+
+	/* Initialize the rest of the function pointers. These require some
+	 * register reads/writes in some cases.
+	 */
+	if ((ret_val == E1000_SUCCESS) && (init_device == 1)) {
+		ret_val = e1000_init_mac_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_nvm_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_phy_params(hw);
+		if (ret_val)
+			goto out;
+
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_remove_device - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it. This is a function pointer entry point called by drivers.
+ **/
+void e1000_remove_device(struct e1000_hw *hw)
+{
+	if (hw->func.remove_device != NULL)
+		hw->func.remove_device(hw);
+}
+
+/**
+ *  e1000_get_bus_info - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adaper is attached and stores it in the hw structure. This is a
+ *  function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_bus_info != NULL)
+		return hw->func.get_bus_info(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  This clears the VLAN filter table on the adapter. This is a function
+ *  pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+	if (hw->func.clear_vfta != NULL)
+		hw->func.clear_vfta (hw);
+}
+
+/**
+ *  e1000_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	if (hw->func.write_vfta != NULL)
+		hw->func.write_vfta(hw, offset, value);
+}
+
+/**
+ *  e1000_mc_addr_list_update - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.  Currently no func pointer
+ *  exists and all implementations are handled in the generic version of this
+ *  function.
+ **/
+void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list,
+                               u32 mc_addr_count, u32 rar_used_count,
+                               u32 rar_count)
+{
+	if (hw->func.mc_addr_list_update != NULL)
+		hw->func.mc_addr_list_update(hw,
+		                             mc_addr_list,
+		                             mc_addr_count,
+		                             rar_used_count,
+		                             rar_count);
+}
+
+/**
+ *  e1000_force_mac_fc - Force MAC flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings. Currently no func pointer exists
+ *  and all implementations are handled in the generic version of this
+ *  function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+	return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ *  e1000_check_for_link - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+	if (hw->func.check_for_link != NULL)
+		return hw->func.check_for_link(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_reset_hw - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+	if (hw->func.reset_hw != NULL)
+		return hw->func.reset_hw(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_init_hw - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation. This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+	if (hw->func.init_hw != NULL)
+		return hw->func.init_hw(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_link - Configures link and flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  This configures link and flow control settings for the adapter. This
+ *  is a function pointer entry point called by drivers. While modules can
+ *  also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+	if (hw->func.setup_link != NULL)
+		return hw->func.setup_link(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_get_speed_and_duplex - Returns current speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to a 16-bit value to store the speed
+ *  @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ *  This returns the speed and duplex of the adapter in the two 'out'
+ *  variables passed in. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	if (hw->func.get_link_up_info != NULL)
+		return hw->func.get_link_up_info(hw, speed, duplex);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_cleanup_led - Restores SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This restores the SW controllable LED to the value saved off by
+ *  e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+	if (hw->func.cleanup_led != NULL)
+		return hw->func.cleanup_led(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led - Blink SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This starts the adapter LED blinking. Request the LED to be setup first
+ *  and cleaned up after. This is a function pointer entry point called by
+ *  drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+	if (hw->func.blink_led != NULL)
+		return hw->func.blink_led(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+	if (hw->func.led_on != NULL)
+		return hw->func.led_on(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+	if (hw->func.led_off != NULL)
+		return hw->func.led_off(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_adaptive - Reset adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the adaptive IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+	e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_update_adaptive - Update adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates adapter IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+	e1000_update_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_disable_pcie_master - Disable PCI-Express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. Currently no func pointer exists and all implementations are
+ *  handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+	return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ *  e1000_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+	if (hw->func.config_collision_dist != NULL)
+		hw->func.config_collision_dist(hw);
+}
+
+/**
+ *  e1000_rar_set - Sets a receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: address to set the RAR to
+ *  @index: the RAR to set
+ *
+ *  Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	if (hw->func.rar_set != NULL)
+		hw->func.rar_set(hw, addr, index);
+}
+
+/**
+ *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ *  @hw: pointer to the HW structure
+ *
+ *  Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+	if (hw->func.validate_mdi_setting != NULL)
+		return hw->func.validate_mdi_setting(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mta_set - Sets multicast table bit
+ *  @hw: pointer to the HW structure
+ *  @hash_value: Multicast hash value.
+ *
+ *  This sets the bit in the multicast table corresponding to the
+ *  hash value.  This is a function pointer entry point called by drivers.
+ **/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+	if (hw->func.mta_set != NULL)
+		hw->func.mta_set(hw, hash_value);
+}
+
+/**
+ *  e1000_hash_mc_addr - Determines address location in multicast table
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: Multicast address to hash.
+ *
+ *  This hashes an address to determine its location in the multicast
+ *  table. Currently no func pointer exists and all implementations
+ *  are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ *  e1000_wait_autoneg - Waits for autonegotiation completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for autoneg to complete. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+	if (hw->func.wait_autoneg != NULL)
+		return hw->func.wait_autoneg(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block - Verifies PHY can be reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if the PHY is in a state that can be reset or if manageability
+ *  has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+	if (hw->func.check_reset_block != NULL)
+		return hw->func.check_reset_block(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg - Reads PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the buffer to store the 16-bit read.
+ *
+ *  Reads the PHY register and returns the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	if (hw->func.read_phy_reg != NULL)
+		return hw->func.read_phy_reg(hw, offset, data);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg - Writes PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	if (hw->func.write_phy_reg != NULL)
+		return hw->func.write_phy_reg(hw, offset, data);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_cable_length - Retrieves cable length estimation
+ *  @hw: pointer to the HW structure
+ *
+ *  This function estimates the cable length and stores them in
+ *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+	if (hw->func.get_cable_length != NULL)
+		return hw->func.get_cable_length(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_info - Retrieves PHY information from registers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function gets some information from various PHY registers and
+ *  populates hw->phy values with it. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_phy_info != NULL)
+		return hw->func.get_phy_info(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_hw_reset - Hard PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a hard PHY reset. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+	if (hw->func.reset_phy != NULL)
+		return hw->func.reset_phy(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_commit - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+	if (hw->func.commit_phy != NULL)
+		return hw->func.commit_phy(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d0_lplu_state != NULL)
+		return hw->func.set_d0_lplu_state(hw, active);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d3_lplu_state != NULL)
+		return hw->func.set_d3_lplu_state(hw, active);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr - Reads MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MAC address out of the adapter and stores it in the HW structure.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+	return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Validates the NVM checksum is correct. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.validate_nvm != NULL)
+		return hw->func.validate_nvm(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the NVM checksum. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.update_nvm != NULL)
+		return hw->func.update_nvm(hw);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_read_nvm - Reads NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to read
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.read_nvm != NULL)
+		return hw->func.read_nvm(hw, offset, words, data);
+	else
+		return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_write_nvm - Writes to NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to write
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.write_nvm != NULL)
+		return hw->func.write_nvm(hw, offset, words, data);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, u8 data)
+{
+	return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb_main.c	2007-08-07 18:26:50.000000000 -0400
@@ -0,0 +1,3992 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include "igb.h"
+
+char igb_driver_name[] = "igb";
+static char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver";
+
+#define DRV_VERSION "1.0.1"
+
+const char igb_driver_version[] = DRV_VERSION;
+static char igb_copyright[] = "Copyright (c) 2007 Intel Corporation.";
+
+/* PCI Device ID Table */
+static struct pci_device_id igb_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+	             E1000_DEV_ID_82575EB_COPPER)},
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+	             E1000_DEV_ID_82575EB_FIBER_SERDES)},
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
+	             E1000_DEV_ID_82575GB_QUAD_COPPER)},
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
+
+void igb_reset(struct igb_adapter *);
+static int igb_setup_all_tx_resources(struct igb_adapter *);
+static int igb_setup_all_rx_resources(struct igb_adapter *);
+static void igb_free_all_tx_resources(struct igb_adapter *);
+static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_free_tx_resources(struct igb_adapter *, struct igb_ring *);
+static void igb_free_rx_resources(struct igb_adapter *, struct igb_ring *);
+void igb_update_stats(struct igb_adapter *);
+static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit igb_remove(struct pci_dev *pdev);
+static int igb_sw_init(struct igb_adapter *);
+static int igb_open(struct net_device *);
+static int igb_close(struct net_device *);
+static void igb_configure_tx(struct igb_adapter *);
+static void igb_configure_rx(struct igb_adapter *);
+static void igb_setup_rctl(struct igb_adapter *);
+static void igb_clean_all_tx_rings(struct igb_adapter *);
+static void igb_clean_all_rx_rings(struct igb_adapter *);
+static void igb_clean_tx_ring(struct igb_adapter *, struct igb_ring *);
+static void igb_clean_rx_ring(struct igb_adapter *, struct igb_ring *);
+static void igb_set_multi(struct net_device *);
+static void igb_update_phy_info(unsigned long);
+static void igb_watchdog(unsigned long);
+static void igb_watchdog_task(struct net_device *);
+static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
+                                   struct igb_ring *);
+static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
+static struct net_device_stats *igb_get_stats(struct net_device *);
+static int igb_change_mtu(struct net_device *, int);
+static int igb_set_mac(struct net_device *, void *);
+static irqreturn_t igb_intr(int irq, void *, struct pt_regs *);
+static irqreturn_t igb_intr_msi(int irq, void *, struct pt_regs *);
+static irqreturn_t igb_msix_other(int irq, void *, struct pt_regs *);
+static irqreturn_t igb_msix_rx(int irq, void *, struct pt_regs *);
+static int igb_clean_rx_ring_msix(struct net_device *, int *);
+static bool igb_clean_tx_irq(struct igb_adapter *, struct igb_ring *);
+static int igb_clean(struct net_device *, int *);
+static bool igb_clean_rx_irq_adv(struct igb_adapter *,
+                                 struct igb_ring *, int *, int);
+static void igb_alloc_rx_buffers_adv(struct igb_adapter *,
+                                     struct igb_ring *, int);
+static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
+void igb_set_ethtool_ops(struct net_device *);
+static void igb_tx_timeout(struct net_device *);
+static void igb_reset_task(struct net_device *);
+static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
+static void igb_vlan_rx_add_vid(struct net_device *, u16);
+static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_restore_vlan(struct igb_adapter *);
+
+static int igb_suspend(struct pci_dev *, pm_message_t);
+#ifdef CONFIG_PM
+static int igb_resume(struct pci_dev *);
+#endif
+static void igb_shutdown(struct pci_dev *);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / netconsole */
+static void igb_netpoll (struct net_device *);
+#endif
+
+extern void igb_check_options(struct igb_adapter *);
+
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+                                              pci_channel_state_t);
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+static void igb_io_resume(struct pci_dev *);
+
+static struct pci_error_handlers igb_err_handler = {
+	.error_detected = igb_io_error_detected,
+	.slot_reset = igb_io_slot_reset,
+	.resume = igb_io_resume,
+};
+
+static struct pci_driver igb_driver = {
+	.name     = igb_driver_name,
+	.id_table = igb_pci_tbl,
+	.probe    = igb_probe,
+	.remove   = __devexit_p(igb_remove),
+#ifdef CONFIG_PM
+	/* Power Managment Hooks */
+	.suspend  = igb_suspend,
+	.resume   = igb_resume,
+#endif
+	.shutdown = igb_shutdown,
+	.err_handler = &igb_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * igb_init_module - Driver Registration Routine
+ *
+ * igb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igb_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s - version %s\n",
+	       igb_driver_string, igb_driver_version);
+
+	printk(KERN_INFO "%s\n", igb_copyright);
+
+	ret = pci_register_driver(&igb_driver);
+	return ret;
+}
+module_init(igb_init_module);
+
+/**
+ * igb_exit_module - Driver Exit Cleanup Routine
+ *
+ * igb_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit igb_exit_module(void)
+{
+	pci_unregister_driver(&igb_driver);
+}
+module_exit(igb_exit_module);
+
+/**
+ * igb_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.
+ **/
+
+static int igb_alloc_queues(struct igb_adapter *adapter)
+{
+	int i;
+
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+	                           sizeof(struct igb_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+	                           sizeof(struct igb_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		ring->adapter = adapter;
+		ring->itr_register = E1000_ITR;
+		ring->netdev = kzalloc(sizeof(struct net_device), GFP_ATOMIC);
+		if (!ring->netdev)
+			goto err;
+		ring->netdev->priv = (void *)(adapter->rx_ring + i);
+		ring->netdev->poll = igb_clean;
+		ring->netdev->weight = adapter->netdev->weight /
+		                      adapter->num_rx_queues;
+		dev_hold(ring->netdev);
+		set_bit(__LINK_STATE_START, &ring->netdev->state);
+	}
+	return E1000_SUCCESS;
+err:
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		kfree(ring->netdev);
+	}
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+	return -ENOMEM;
+}
+
+static void igb_configure_lli(struct igb_adapter *adapter)
+{
+	u16 port;
+
+	if (adapter->lli_port) {
+		/* use filter 0 for port */
+		port = ntohs((u16)adapter->lli_port);
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIR(0),
+			(port | E1000_IMIR_PORT_IM_EN));
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIREXT(0),
+			(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+	}
+
+	if (adapter->flags & FLAG_IGB_LLI_PUSH) {
+		/* use filter 1 for push flag */
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIR(1),
+			(E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIREXT(1),
+			(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
+	}
+
+	if (adapter->lli_size) {
+		/* use filter 2 for size */
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIR(2),
+			(E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+		E1000_WRITE_REG(&adapter->hw, E1000_IMIREXT(2),
+			(adapter->lli_size | E1000_IMIREXT_CTRL_BP));
+	}
+
+}
+
+#define IGB_N0_QUEUE -1
+static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
+                              int tx_queue, int msix_vector)
+{
+	u32 msixbm = 0;
+	struct e1000_hw *hw = &adapter->hw;
+		/* The 82575 assigns vectors using a bitmask, which matches the
+		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
+		   or more queues to a vector, we write the appropriate bits
+		   into the MSIXBM register for that vector. */
+		if (rx_queue > IGB_N0_QUEUE) {
+			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+			adapter->rx_ring[rx_queue].eims_value = msixbm;
+		}
+		if (tx_queue > IGB_N0_QUEUE) {
+			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+			adapter->tx_ring[tx_queue].eims_value =
+			          E1000_EICR_TX_QUEUE0 << tx_queue;
+		}
+		E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM0, msix_vector, msixbm);
+}
+
+/**
+ * igb_configure_msix - Configure MSI-X hardware
+ *
+ * igb_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void igb_configure_msix(struct igb_adapter *adapter)
+{
+	u32 tmp;
+	int i, vector = 0;
+	struct e1000_hw *hw = &adapter->hw;
+
+	adapter->eims_enable_mask = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *rx_ring = &adapter->rx_ring[i];
+		if (i < adapter->num_tx_queues) {
+			igb_assign_vector(adapter, i, i, vector++);
+			rx_ring->buddy = &adapter->tx_ring[i];
+			rx_ring->eims_value |= adapter->tx_ring[i].eims_value;
+		} else
+			igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
+		adapter->eims_enable_mask |= rx_ring->eims_value;
+		if (rx_ring->itr_val)
+			writel(1000000000 / (rx_ring->itr_val * 256),
+			       hw->hw_addr + rx_ring->itr_register);
+		else
+			writel(1, hw->hw_addr + rx_ring->itr_register);
+	}
+
+
+	/* set vector for other causes, i.e. link changes */
+		E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM0, vector++,
+		                      E1000_EIMS_OTHER);
+
+		/* disable IAM for ICR interrupt bits */
+		E1000_WRITE_REG(hw, E1000_IAM, 0);
+
+		tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		/* enable MSI-X PBA support*/
+		tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+		/* Auto-Mask interrupts upon ICR read. */
+		tmp |= E1000_CTRL_EXT_EIAME;
+		tmp |= E1000_CTRL_EXT_IRCA;
+
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * igb_request_msix - Initialize MSI-X interrupts
+ *
+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int igb_request_msix(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i, err = 0, vector = 0;
+
+	vector = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		if (strlen(netdev->name) < (IFNAMSIZ - 5))
+			sprintf(ring->netdev->name, "%s-Q%d", netdev->name, i);
+		else
+			memcpy(ring->netdev->name, netdev->name, IFNAMSIZ);
+		if ((err = request_irq(adapter->msix_entries[vector].vector,
+		                       &igb_msix_rx, 0, ring->netdev->name,
+		                       &(adapter->rx_ring[i]))))
+			goto out;
+		ring->itr_register = E1000_EITR0 + (vector << 2);
+		ring->itr_val = adapter->itr;
+		vector++;
+	}
+
+	if ((err = request_irq(adapter->msix_entries[vector].vector,
+	                       &igb_msix_other, 0, netdev->name, netdev)))
+		goto out;
+
+
+	adapter->netdev->poll = igb_clean_rx_ring_msix;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i].netdev->poll = igb_clean_rx_ring_msix;
+	igb_configure_msix(adapter);
+	return 0;
+out:
+	return err;
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & FLAG_IGB_HAS_MSI)
+		pci_disable_msi(adapter->pdev);
+	return;
+}
+
+
+/**
+ * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static void igb_set_interrupt_capability(struct igb_adapter *adapter)
+{
+	int err;
+	int numvecs, i;
+
+	numvecs = adapter->num_rx_queues + 1;
+	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+	                                GFP_KERNEL);
+	if (!adapter->msix_entries)
+		goto msi_only;
+
+	for (i = 0; i < numvecs; i++)
+		adapter->msix_entries[i].entry = i;
+
+	err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+	                      numvecs);
+	if (!err)
+		return;
+
+	igb_reset_interrupt_capability(adapter);
+
+	/* If we can't do MSI-X, try MSI */
+msi_only:
+	adapter->num_rx_queues = 1;
+	if (!pci_enable_msi(adapter->pdev))
+		adapter->flags |= FLAG_IGB_HAS_MSI;
+	return;
+}
+
+/**
+ * igb_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igb_request_irq(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	if (adapter->msix_entries) {
+		err = igb_request_msix(adapter);
+		if (!err)
+			goto request_done;
+		/* fall back to MSI */
+		igb_reset_interrupt_capability(adapter);
+		if (!pci_enable_msi(adapter->pdev))
+			adapter->flags |= FLAG_IGB_HAS_MSI;
+		igb_free_all_tx_resources(adapter);
+		igb_free_all_rx_resources(adapter);
+		adapter->num_rx_queues = 1;
+		igb_alloc_queues(adapter);
+	}
+	if (adapter->flags & FLAG_IGB_HAS_MSI) {
+		err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
+		                  netdev->name, netdev);
+		if (!err)
+			goto request_done;
+		/* fall back to legacy interrupts */
+		igb_reset_interrupt_capability(adapter);
+		adapter->flags &= ~FLAG_IGB_HAS_MSI;
+	}
+
+	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
+	                  netdev->name, netdev);
+
+	if (err)
+		DPRINTK(PROBE, ERR, "Error %d getting interrupt\n", err);
+
+request_done:
+	return err;
+}
+
+static void igb_free_irq(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (adapter->msix_entries) {
+		int vector = 0, i;
+
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			free_irq(adapter->msix_entries[vector++].vector,
+				&(adapter->rx_ring[i]));
+
+		free_irq(adapter->msix_entries[vector++].vector, netdev);
+		return;
+	}
+
+	free_irq(adapter->pdev->irq, netdev);
+}
+
+/**
+ * igb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void igb_irq_disable(struct igb_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
+		E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
+	}
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * igb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void igb_irq_enable(struct igb_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		E1000_WRITE_REG(&adapter->hw, E1000_EIMS,
+		                adapter->eims_enable_mask);
+		E1000_WRITE_REG(&adapter->hw, E1000_EIAC,
+		                adapter->eims_enable_mask);
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
+	} else
+	E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
+}
+
+static void igb_update_mng_vlan(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+	if (adapter->vlgrp) {
+		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+			if (adapter->hw.mng_cookie.status &
+				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+				igb_vlan_rx_add_vid(netdev, vid);
+				adapter->mng_vlan_id = vid;
+			} else
+				adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+
+			if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
+			    !vlan_group_get_device(adapter->vlgrp, old_vid))
+				igb_vlan_rx_kill_vid(netdev, old_vid);
+		} else
+			adapter->mng_vlan_id = vid;
+	}
+}
+
+/**
+ * igb_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ *
+ **/
+static void igb_release_hw_control(struct igb_adapter *adapter)
+{
+	u32 ctrl_ext;
+
+	/* Let firmware take over control of h/w */
+	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+	                ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+
+/**
+ * igb_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
+ *
+ **/
+static void igb_get_hw_control(struct igb_adapter *adapter)
+{
+	u32 ctrl_ext;
+
+	/* Let firmware know the driver has taken over */
+	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+	                ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+static void igb_init_manageability(struct igb_adapter *adapter)
+{
+	if (adapter->en_mng_pt) {
+		u32 manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
+		u32 manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+		/* disable hardware interception of ARP */
+		manc &= ~(E1000_MANC_ARP_EN);
+
+		/* enable receiving management packets to the host */
+		/* this will probably generate destination unreachable messages
+		 * from the host OS, but the packets will be handled on SMBUS */
+		manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+		manc2h |= E1000_MNG2HOST_PORT_623;
+		manc2h |= E1000_MNG2HOST_PORT_664;
+		E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
+
+		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+	}
+}
+
+static void igb_release_manageability(struct igb_adapter *adapter)
+{
+	if (adapter->en_mng_pt) {
+		u32 manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+		/* re-enable hardware interception of ARP */
+		manc |= E1000_MANC_ARP_EN;
+		manc &= ~E1000_MANC_EN_MNG2HOST;
+
+		/* don't explicitly have to mess with MANC2H since
+		 * MANC has an enable disable that gates MANC2H */
+
+		/* XXX stop the hardware watchdog ? */
+		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+	}
+}
+
+/**
+ * igb_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ **/
+static void igb_configure(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	igb_get_hw_control(adapter);
+	igb_set_multi(netdev);
+
+	igb_restore_vlan(adapter);
+	igb_init_manageability(adapter);
+
+	igb_configure_tx(adapter);
+	igb_setup_rctl(adapter);
+	igb_configure_rx(adapter);
+	/* call IGB_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &adapter->rx_ring[i];
+		igb_alloc_rx_buffers_adv(adapter, ring, IGB_DESC_UNUSED(ring));
+	}
+
+
+	adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+
+/**
+ * igb_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
+ **/
+int igb_up(struct igb_adapter *adapter)
+{
+
+	/* hardware has been reset, we need to reload some things */
+	igb_configure(adapter);
+
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	netif_poll_enable(adapter->netdev);
+
+	if (adapter->msix_entries)
+		igb_configure_msix(adapter);
+	igb_configure_lli(adapter);
+	/* Clear any pending interrupts. */
+	E1000_READ_REG(&adapter->hw, E1000_ICR);
+	igb_irq_enable(adapter);
+
+	/* Fire a link change interrupt to start the watchdog. */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+	return 0;
+}
+
+void igb_down(struct igb_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl, rctl;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__IGB_DOWN, &adapter->state);
+
+	/* disable receives in the hardware */
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	netif_tx_disable(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	E1000_WRITE_FLUSH(&adapter->hw);
+	msleep(10);
+
+	netif_poll_disable(netdev);
+	igb_irq_disable(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	netdev->tx_queue_len = adapter->tx_queue_len;
+	netif_carrier_off(netdev);
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	igb_reset(adapter);
+	igb_clean_all_tx_rings(adapter);
+	igb_clean_all_rx_rings(adapter);
+}
+
+void igb_reinit_locked(struct igb_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+	igb_down(adapter);
+	igb_up(adapter);
+	clear_bit(__IGB_RESETTING, &adapter->state);
+}
+
+void igb_reset(struct igb_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	u16 hwm;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+	pba = E1000_PBA_34K;
+
+	if (mac->max_frame_size > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+		/* adjust PBA for jumbo frames */
+		E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB. */
+		pba = E1000_READ_REG(&adapter->hw, E1000_PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* the tx fifo also stores 16 bytes of information about the tx
+		 * but don't include ethernet FCS because hardware appends it */
+		min_tx_space = (mac->max_frame_size +
+		                sizeof(struct e1000_tx_desc) -
+		                ETHERNET_FCS_SIZE) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = mac->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* if short on rx space, rx wins and must trump tx
+			 * adjustment */
+			if (pba < min_rx_space)
+				pba = min_rx_space;
+		}
+	}
+	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+	/* flow control settings */
+	/* The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, or
+	 * - the full Rx FIFO size minus one full frame */
+	hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - mac->max_frame_size));
+
+	mac->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
+	mac->fc_low_water = mac->fc_high_water - 8;
+	mac->fc_pause_time = 0xFFFF;
+	mac->fc_send_xon = 1;
+	mac->fc = mac->original_fc;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(&adapter->hw);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+	if (e1000_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	igb_update_mng_vlan(adapter);
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	e1000_reset_adaptive(&adapter->hw);
+	e1000_get_phy_info(&adapter->hw);
+	igb_release_manageability(adapter);
+}
+
+/**
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit igb_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct igb_adapter *adapter;
+	unsigned long mmio_start, mmio_len;
+
+	static int cards_found;
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = IGB_EEPROM_APME;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	pci_using_dac = 0;
+	err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+	if (!err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+		if (!err)
+			pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+			                                  DMA_32BIT_MASK);
+			if (err) {
+				IGB_ERR("No usable DMA configuration, "
+				        "aborting\n");
+				goto err_dma;
+			}
+		}
+	}
+
+	err = pci_request_regions(pdev, igb_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct igb_adapter));
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	SET_MODULE_OWNER(netdev);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+	adapter->msg_enable = (1 << debug) - 1;
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+
+	err = -EIO;
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr)
+		goto err_ioremap;
+
+	netdev->open = &igb_open;
+	netdev->stop = &igb_close;
+	netdev->get_stats = &igb_get_stats;
+	netdev->set_multicast_list = &igb_set_multi;
+	netdev->set_mac_address = &igb_set_mac;
+	netdev->change_mtu = &igb_change_mtu;
+	netdev->do_ioctl = &igb_ioctl;
+	igb_set_ethtool_ops(netdev);
+	netdev->tx_timeout = &igb_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+	netdev->poll = &igb_clean;
+	netdev->weight = 64;
+	netdev->vlan_rx_register = igb_vlan_rx_register;
+	netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = igb_netpoll;
+#endif
+	netdev->hard_start_xmit = &igb_xmit_frame_adv;
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+
+	adapter->bd_number = cards_found;
+
+	/* setup the private structure */
+	err = igb_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	e1000_get_bus_info(&adapter->hw);
+
+	adapter->hw.phy.wait_for_link = 0;
+	adapter->hw.mac.adaptive_ifs = 1;
+
+	/* Copper options */
+	if (adapter->hw.media_type == e1000_media_type_copper) {
+		adapter->hw.phy.mdix = AUTO_ALL_MODES;
+		adapter->hw.phy.disable_polarity_correction = 0;
+		adapter->hw.phy.ms_type = e1000_ms_hw_default;
+	}
+
+	if (e1000_check_reset_block(&adapter->hw))
+		DPRINTK(PROBE, INFO,
+		        "PHY reset is blocked due to SOL/IDER session.\n");
+
+	netdev->features = NETIF_F_SG |
+			   NETIF_F_HW_CSUM |
+			   NETIF_F_HW_VLAN_TX |
+			   NETIF_F_HW_VLAN_RX |
+			   NETIF_F_HW_VLAN_FILTER;
+
+	netdev->features |= NETIF_F_TSO;
+
+	netdev->features |= NETIF_F_TSO6;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+
+	/* before reading the NVM, reset the controller to put the device in a
+	 * known good starting state */
+	e1000_reset_hw(&adapter->hw);
+
+	/* make sure the NVM is good */
+	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+		DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* copy the MAC address out of the NVM */
+	if (e1000_read_mac_addr(&adapter->hw))
+		DPRINTK(PROBE, ERR, "NVM Read Error\n");
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &igb_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_info_timer);
+	adapter->phy_info_timer.function = &igb_update_phy_info;
+	adapter->phy_info_timer.data = (unsigned long) adapter;
+
+	INIT_WORK(&adapter->reset_task, 
+			(void (*)(void *))igb_reset_task, netdev);
+	INIT_WORK(&adapter->watchdog_task, 
+			(void (*)(void *))igb_watchdog_task, netdev);
+
+	igb_check_options(adapter);
+
+	/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	if (adapter->hw.bus.func == 0)
+		e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1,
+		               &eeprom_data);
+
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/* now that we have the eeprom settings, apply the special cases where
+	 * the eeprom may be wrong or the board simply won't support wake on
+	 * lan on a particular port */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting */
+		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		    E1000_STATUS_FUNC_1)
+			adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		adapter->eeprom_wol = 0;
+		break;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+
+	/* reset the hardware with the new settings */
+	igb_reset(adapter);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver. */
+	igb_get_hw_control(adapter);
+
+	/* tell the stack to leave us alone until igb_open() is called */
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+	netif_poll_disable(netdev);
+
+	strcpy(netdev->name, "eth%d");
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+
+	DPRINTK(PROBE, INFO, "Intel(R) Gigabit Ethernet Network Connection\n");
+	/* print bus type/speed/width info */
+	{
+	struct e1000_hw *hw = &adapter->hw;
+	DPRINTK(PROBE, INFO, "(PCIe:%s:%s) ",
+	      ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : "unknown"),
+	      ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+	       (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+	        "unknown"));
+	}
+
+	for (i = 0; i < 6; i++)
+		printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	DPRINTK(PROBE, INFO,
+	        "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+	        adapter->msix_entries ? "MSI-X" :
+	        (adapter->flags & FLAG_IGB_HAS_MSI) ? "MSI" :
+	        "legacy",
+	        adapter->num_rx_queues, adapter->num_tx_queues);
+
+	cards_found++;
+	return 0;
+
+err_register:
+	igb_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+
+	e1000_remove_device(&adapter->hw);
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * igb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit igb_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	/* flush_scheduled work may reschedule our watchdog task, so
+	 * explicitly disable watchdog tasks from being rescheduled  */
+	set_bit(__IGB_DOWN, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	flush_scheduled_work();
+
+	igb_release_manageability(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	igb_release_hw_control(adapter);
+
+	unregister_netdev(netdev);
+
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	e1000_remove_device(&adapter->hw);
+	igb_reset_interrupt_capability(adapter);
+
+	kfree(adapter->rx_ring->netdev);
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_regions(pdev);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit igb_sw_init(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	adapter->rx_ps_hdr_size = 0; /* disable packet split */
+	hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+	/* Initialize the hardware-specific values */
+	if (e1000_setup_init_funcs(hw, 1)) {
+		DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
+		return -EIO;
+	}
+
+	/* Number of supported queues. */
+	/* Having more queues than CPUs doesn't make sense. */
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+
+	igb_set_interrupt_capability(adapter);
+
+	if (igb_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	igb_irq_disable(adapter);
+
+	set_bit(__IGB_DOWN, &adapter->state);
+	return 0;
+}
+
+
+
+/**
+ * igb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int igb_open(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__IGB_TESTING, &adapter->state))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = igb_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = igb_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	/*
+	 * TODO: We currently don't power up/down the PHY
+	 */
+	/* e1000_power_up_phy(adapter); */
+
+	adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) {
+		igb_update_mng_vlan(adapter);
+	}
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.  */
+	igb_configure(adapter);
+
+	err = igb_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* From here on the code is the same as igb_up() */
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	netif_poll_enable(netdev);
+
+	igb_configure_lli(adapter);
+	igb_irq_enable(adapter);
+
+	/* Clear any pending interrupts. */
+	E1000_READ_REG(&adapter->hw, E1000_ICR);
+	/* Fire a link status change interrupt to start the watchdog. */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+
+	return E1000_SUCCESS;
+
+err_req_irq:
+	igb_release_hw_control(adapter);
+	/* e1000_power_down_phy(adapter); */
+	igb_free_all_rx_resources(adapter);
+err_setup_rx:
+	igb_free_all_tx_resources(adapter);
+err_setup_tx:
+	igb_reset(adapter);
+
+	return err;
+}
+
+/**
+ * igb_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the driver's control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int igb_close(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+	igb_down(adapter);
+
+	igb_free_irq(adapter);
+
+	igb_free_all_tx_resources(adapter);
+	igb_free_all_rx_resources(adapter);
+
+	/* kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it) */
+	if ((adapter->hw.mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	     !(adapter->vlgrp &&
+	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+		igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+	}
+
+	return 0;
+}
+
+/**
+ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int igb_setup_tx_resources(struct igb_adapter *adapter,
+                           struct igb_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct igb_buffer) * tx_ring->count;
+	tx_ring->buffer_info = vmalloc(size);
+	if (!tx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
+	                + sizeof(u32);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	                                     &tx_ring->dma);
+
+	if (!tx_ring->desc) {
+		vfree(tx_ring->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	tx_ring->adapter = adapter;
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	spin_lock_init(&tx_ring->tx_clean_lock);
+	return 0;
+}
+
+/**
+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_tx_resources(adapter,
+							&adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+	u64 tdba, tdwba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl, offset;
+	u32 txdctl;
+	int i;
+
+			offset = 0x100;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *ring = &(adapter->tx_ring[i]);
+		writel(ring->count * sizeof(struct e1000_tx_desc),
+			(hw->hw_addr + E1000_REGISTER(hw, E1000_TDLEN) +
+			(offset * i)));
+		tdba = ring->dma;
+		writel((tdba & 0x00000000ffffffffULL),
+		       (hw->hw_addr + E1000_REGISTER(hw, E1000_TDBAL) +
+		        (offset * i)));
+		writel((tdba >> 32),
+		       (hw->hw_addr + E1000_REGISTER(hw, E1000_TDBAH) +
+		        (offset * i)));
+
+		tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
+		tdwba |= 1; /* enable head wb */
+		writel((tdwba & 0x00000000ffffffffULL),
+		       (hw->hw_addr + E1000_REGISTER(hw, E1000_TDWBAL0) +
+		        (offset * i)));
+		writel((tdwba >> 32),
+		       (hw->hw_addr + E1000_REGISTER(hw, E1000_TDWBAH0) +
+		        (offset * i)));
+
+		ring->head = E1000_REGISTER(hw, E1000_TDH) + (offset * i);
+		ring->tail = E1000_REGISTER(hw, E1000_TDT) + (offset * i);
+		writel(0, hw->hw_addr + ring->tail);
+		writel(0, hw->hw_addr + ring->head);
+		txdctl = readl(hw->hw_addr + E1000_REGISTER(hw, E1000_TXDCTL) +
+		               (offset * i));
+		/* enable burst write back of (at least 1) completed
+		 * descriptor, if this is not done no bursts are done
+		 * and the part writes back each descriptor with an RS
+		 * bit set, one at a time */
+#define IGB_TXDCTL_WB_SHIFT 16
+		txdctl |= (1 << IGB_TXDCTL_WB_SHIFT);
+		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+		writel(txdctl, (hw->hw_addr + E1000_REGISTER(hw, E1000_TXDCTL) +
+		                (offset * i)));
+	}
+
+	/* Use the default values for the Tx Inter Packet Gap (IPG) timer */
+
+	/* Program the Transmit Control Register */
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
+
+	/* Enable transmits */
+	tctl |= E1000_TCTL_EN;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int igb_setup_rx_resources(struct igb_adapter *adapter,
+                           struct igb_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct igb_buffer) * rx_ring->count;
+	rx_ring->buffer_info = vmalloc(size);
+	if (!rx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rx_ring->buffer_info, 0, size);
+
+	desc_len = sizeof(union e1000_adv_rx_desc);
+
+	/* Round up to nearest 4K */
+
+	rx_ring->size = rx_ring->count * desc_len;
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+	                                     &rx_ring->dma);
+
+	if (!rx_ring->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		vfree(rx_ring->buffer_info);
+		return -ENOMEM;
+	}
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->pending_skb = NULL;
+
+	rx_ring->adapter = adapter;
+	rx_ring->netdev->priv = rx_ring;
+	rx_ring->netdev->poll = adapter->netdev->poll;
+	rx_ring->netdev->weight = 64;
+	set_bit(__LINK_STATE_START, &rx_ring->netdev->state);
+
+	return 0;
+}
+
+/**
+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		dev_hold(adapter->rx_ring[i].netdev);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_rx_resources(adapter,
+							&adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_rctl(struct igb_adapter *adapter)
+{
+	u32 rctl;
+	u32 srrctl = 0;
+	int i;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* disable the stripping of CRC because it breaks
+	 * BMC firmware connected over SMBUS
+	rctl |= E1000_RCTL_SECRC;
+	*/
+
+	rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+	if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
+		/* Setup buffer sizes */
+		rctl &= ~E1000_RCTL_SZ_4096;
+		rctl |= E1000_RCTL_BSEX;
+		switch (adapter->rx_buffer_len) {
+		case IGB_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case IGB_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case IGB_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case IGB_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case IGB_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case IGB_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case IGB_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+		}
+	} else {
+		rctl &= ~E1000_RCTL_BSEX;
+		srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+	}
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+	/* 82575 and greater support packet-split where the protocol
+	 * header is placed in skb->data and the packet data is
+	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+	 * In the case of a non-split, skb->data is linearly filled,
+	 * followed by the page buffers.  Therefore, skb->data is
+	 * sized to hold the largest protocol header.
+	 */
+	/* allocations using alloc_page take too long for regular MTU
+	 * so only enable packet split for jumbo frames */
+	if (rctl & E1000_RCTL_LPE) {
+		adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
+		srrctl = adapter->rx_ps_hdr_size <<
+		         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+		/* buffer size is ALWAYS one page */
+		srrctl |= PAGE_SIZE >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+	} else {
+		adapter->rx_ps_hdr_size = 0;
+		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		int offset = 0x100;
+		writel(srrctl, (adapter->hw.hw_addr +
+		                E1000_REGISTER(&adapter->hw, E1000_SRRCTL0) +
+		                (i * offset)));
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+}
+
+/**
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+	u64 rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rdlen, rctl, rxcsum;
+	u32 rxdctl;
+	int i;
+	u32 offset;
+
+	/* disable receives while setting up the descriptors */
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_FLUSH(hw);
+	mdelay(10);
+
+	rdlen = adapter->rx_ring[0].count * sizeof(union e1000_adv_rx_desc);
+
+	if (adapter->itr_setting > 3)
+		E1000_WRITE_REG(hw, E1000_ITR,
+	                        1000000000 / (adapter->itr * 256));
+
+	offset = 0x100;
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		rdba = ring->dma;
+		writel((rdba & 0x00000000ffffffffULL),
+			(hw->hw_addr + E1000_REGISTER(hw, E1000_RDBAL) +
+			(offset * i)));
+		writel((rdba >> 32),
+		       (hw->hw_addr + E1000_REGISTER(hw, E1000_RDBAH) +
+		        (offset * i)));
+		writel(rdlen, (hw->hw_addr + E1000_REGISTER(hw, E1000_RDLEN) +
+		               (offset * i)));
+		writel(0, (hw->hw_addr + E1000_REGISTER(hw, E1000_RDH) +
+		           (offset * i)));
+		writel(0, (hw->hw_addr + E1000_REGISTER(hw, E1000_RDT) +
+		           (offset * i)));
+		ring->head = E1000_REGISTER(hw, E1000_RDH) + (offset * i);
+		ring->tail = E1000_REGISTER(hw, E1000_RDT) + (offset * i);
+		rxdctl = readl(hw->hw_addr +
+		               E1000_REGISTER(hw, E1000_RXDCTL) +
+		               (i * offset));
+
+		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+		rxdctl &= 0xFFF00000;
+		rxdctl |= IGB_RX_PTHRESH;
+		rxdctl |= IGB_RX_HTHRESH << 8;
+		rxdctl |= IGB_RX_WTHRESH << 16;
+		writel(rxdctl, (hw->hw_addr +
+		                E1000_REGISTER(hw, E1000_RXDCTL) +
+		                (i * offset)));
+	}
+
+	{
+		/* Enable Receive Checksum Offload for TCP and UDP */
+		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		if (adapter->rx_csum == 1) {
+			rxcsum |= E1000_RXCSUM_TUOFL;
+
+			/* Enable IPv4 payload checksum for UDP fragments
+			 * Must be used in conjunction with packet-split. */
+			if (adapter->rx_ps_hdr_size)
+				rxcsum |= E1000_RXCSUM_IPPCSE;
+		} else {
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+	}
+
+	if (adapter->vlgrp)
+		E1000_WRITE_REG(hw, E1000_RLPML,
+		                hw->mac.max_frame_size + VLAN_TAG_SIZE);
+	else
+		E1000_WRITE_REG(hw, E1000_RLPML, hw->mac.max_frame_size);
+
+	/* Enable Receives */
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/**
+ * igb_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void igb_free_tx_resources(struct igb_adapter *adapter,
+                                  struct igb_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	igb_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
+                                           struct igb_buffer *buffer_info)
+{
+	if (buffer_info->dma) {
+		pci_unmap_page(adapter->pdev,
+				buffer_info->dma,
+				buffer_info->length,
+				PCI_DMA_TODEVICE);
+		buffer_info->dma = 0;
+	}
+	if (buffer_info->skb) {
+		dev_kfree_skb_any(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	buffer_info->time_stamp = 0;
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * igb_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void igb_clean_tx_ring(struct igb_adapter *adapter,
+                              struct igb_ring *tx_ring)
+{
+	struct igb_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	if (!tx_ring->buffer_info)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		igb_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct igb_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->head);
+	writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * igb_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void igb_free_rx_resources(struct igb_adapter *adapter,
+                                  struct igb_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	igb_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		igb_free_rx_resources(adapter, &adapter->rx_ring[i]);
+		dev_put(adapter->rx_ring[i].netdev);
+	}
+}
+
+/**
+ * igb_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void igb_clean_rx_ring(struct igb_adapter *adapter,
+                              struct igb_ring *rx_ring)
+{
+	struct igb_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	if (!rx_ring->buffer_info)
+		return;
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->dma) {
+			if (adapter->rx_ps_hdr_size) {
+				pci_unmap_single(pdev, buffer_info->dma,
+				                 adapter->rx_ps_hdr_size,
+				                 PCI_DMA_FROMDEVICE);
+			} else {
+				pci_unmap_single(pdev, buffer_info->dma,
+				                 adapter->rx_buffer_len,
+				                 PCI_DMA_FROMDEVICE);
+			}
+			buffer_info->dma = 0;
+		}
+
+		if (buffer_info->skb) {
+			dev_kfree_skb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+		if (buffer_info->page) {
+			pci_unmap_page(pdev, buffer_info->page_dma,
+			               PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			put_page(buffer_info->page);
+			buffer_info->page = NULL;
+			buffer_info->page_dma = 0;
+		}
+	}
+
+	/* there also may be some cached data from a chained receive */
+	if (rx_ring->pending_skb) {
+		dev_kfree_skb(rx_ring->pending_skb);
+		rx_ring->pending_skb = NULL;
+	}
+
+	size = sizeof(struct igb_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->head);
+	writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * igb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_set_mac(struct net_device *netdev, void *p)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+	return 0;
+}
+
+/**
+ * igb_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_multi(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct dev_mc_list *mc_ptr;
+	u8  *mta_list;
+	u32 rctl;
+	int i;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+	if (netdev->flags & IFF_PROMISC)
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+	else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= E1000_RCTL_MPE;
+		rctl &= ~E1000_RCTL_UPE;
+	} else
+		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+	if (netdev->mc_count) {
+		mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+		if (!mta_list)
+			return;
+
+		/* The shared function expects a packed array of only addresses. */
+		mc_ptr = netdev->mc_list;
+
+		for (i = 0; i < netdev->mc_count; i++) {
+			if (!mc_ptr)
+				break;
+			memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+			mc_ptr = mc_ptr->next;
+		}
+		e1000_mc_addr_list_update(hw, mta_list, i, 1, mac->rar_entry_count);
+		kfree(mta_list);
+	} else {
+		e1000_mc_addr_list_update(hw, NULL, 0, 1, mac->rar_entry_count);
+	}
+
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+static void igb_update_phy_info(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
+	e1000_get_phy_info(&adapter->hw);
+}
+
+
+/**
+ * igb_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igb_watchdog(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *)data;
+	/* Do the rest outside of interrupt context */
+	schedule_work(&adapter->watchdog_task);
+}
+
+static void igb_watchdog_task(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_ring *tx_ring = adapter->tx_ring;
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	u32 link;
+	s32 ret_val;
+
+	if ((netif_carrier_ok(netdev)) &&
+	    (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU))
+		goto link_up;
+
+	ret_val = e1000_check_for_link(&adapter->hw);
+	if ((ret_val == E1000_ERR_PHY) &&
+	    (adapter->hw.phy.type == e1000_phy_igp_3) &&
+	    (E1000_READ_REG(&adapter->hw, E1000_CTRL) &
+	     E1000_PHY_CTRL_GBE_DISABLE)) {
+		DPRINTK(LINK, INFO,
+		        "Gigabit has been disabled, downgrading speed\n");
+	}
+
+	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+	    !(E1000_READ_REG(&adapter->hw, E1000_TXCW) & E1000_TXCW_ANE))
+		link = mac->serdes_has_link;
+	else
+		link = E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		                      E1000_STATUS_LU;
+
+	if (link) {
+		if (!netif_carrier_ok(netdev)) {
+			u32 ctrl;
+			e1000_get_speed_and_duplex(&adapter->hw,
+			                           &adapter->link_speed,
+			                           &adapter->link_duplex);
+
+			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+			        "Flow Control: %s\n",
+			        adapter->link_speed,
+			        adapter->link_duplex == FULL_DUPLEX ?
+			        "Full Duplex" : "Half Duplex",
+			        ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+			        E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+			        E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+			/* tweak tx_queue_len according to speed/duplex and
+			 * adjust the timeout factor */
+			netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 14;
+				break;
+			case SPEED_100:
+				netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+				          round_jiffies(jiffies + 2 * HZ));
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+				          round_jiffies(jiffies + 2 * HZ));
+		}
+	}
+
+link_up:
+	igb_update_stats(adapter);
+
+	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+	adapter->gorcl_old = adapter->stats.gorcl;
+	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+	adapter->gotcl_old = adapter->stats.gotcl;
+
+	e1000_update_adaptive(&adapter->hw);
+
+	if (!netif_carrier_ok(netdev)) {
+		if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	tx_ring->detect_tx_hung = 1;
+
+	/* Reset the timer */
+	if (!test_bit(__IGB_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+		          round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * igb_update_itr - update the dynamic ITR value based on statistics
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see igb_param.c)
+ *      NOTE:  These calculations are only valid when operating in a single-
+ *             queue environment.
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
+                                   int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+	switch (itr_setting) {
+	case lowest_latency:
+		/* handle TSO and jumbo frames */
+		if (bytes/packets > 8000)
+			retval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512)) {
+			retval = low_latency;
+		}
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* this if handles the TSO accounting */
+			if (bytes/packets > 8000) {
+				retval = bulk_latency;
+			} else if ((packets < 10) || ((bytes/packets) > 1200)) {
+				retval = bulk_latency;
+			} else if ((packets > 35)) {
+				retval = lowest_latency;
+			}
+		} else if (bytes/packets > 2000) {
+			retval = bulk_latency;
+		} else if (packets <= 2 && bytes < 512) {
+			retval = lowest_latency;
+		}
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35) {
+				retval = low_latency;
+			}
+		} else if (bytes < 6000) {
+			retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+static void igb_set_itr(struct igb_adapter *adapter, u16 itr_register, int rx_only)
+{
+	u16 current_itr;
+	u32 new_itr = adapter->itr;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (adapter->link_speed != SPEED_1000) {
+		current_itr = 0;
+		new_itr = 4000;
+		goto set_itr_now;
+	}
+
+	adapter->rx_itr = igb_update_itr(adapter,
+	                            adapter->rx_itr,
+	                            adapter->rx_ring->total_packets,
+	                            adapter->rx_ring->total_bytes);
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+		adapter->rx_itr = low_latency;
+
+	if (!rx_only) {
+		adapter->tx_itr = igb_update_itr(adapter,
+		                            adapter->tx_itr,
+		                            adapter->tx_ring->total_packets,
+		                            adapter->tx_ring->total_bytes);
+		/* conservative mode (itr 3) eliminates the lowest_latency setting */
+		if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+			adapter->tx_itr = low_latency;
+
+		current_itr = max(adapter->rx_itr, adapter->tx_itr);
+	} else {
+		current_itr = adapter->rx_itr;
+	}
+
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 70000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+		new_itr = 4000;
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != adapter->itr) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing */
+		new_itr = new_itr > adapter->itr ?
+		             min(adapter->itr + (new_itr >> 2), new_itr) :
+		             new_itr;
+		/* Don't write the value here; it resets the adapter's
+		 * internal timer, and causes us to delay far longer than
+		 * we should between interrupts.  Instead, we write the ITR
+		 * value at the beginning of the next interrupt so the timing
+		 * ends up being correct.
+		 */
+		adapter->itr = new_itr;
+		adapter->set_itr = 1;
+	}
+
+	return;
+}
+
+
+#define IGB_TX_FLAGS_CSUM		0x00000001
+#define IGB_TX_FLAGS_VLAN		0x00000002
+#define IGB_TX_FLAGS_TSO		0x00000004
+#define IGB_TX_FLAGS_IPV4		0x00000008
+#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT	16
+
+static inline int igb_tso_adv(struct igb_adapter *adapter,
+                              struct igb_ring *tx_ring,
+                              struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+	struct e1000_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	int err;
+	struct igb_buffer *buffer_info;
+	u32 info = 0, tu_cmd = 0;
+	u32 mss_l4len_idx, l4len;
+	*hdr_len = 0;
+
+	if (skb_header_cloned(skb)) {
+		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		if (err)
+			return err;
+	}
+
+	l4len = skb->h.th->doff << 2;
+	*hdr_len += l4len;
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		skb->nh.iph->tot_len = 0;
+		skb->nh.iph->check = 0;
+		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
+						      skb->nh.iph->daddr, 0,
+						      IPPROTO_TCP,
+						      0);
+	} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+		skb->nh.ipv6h->payload_len = 0;
+		skb->h.th->check = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+						    &skb->nh.ipv6h->daddr,
+						    0, IPPROTO_TCP, 0);
+	}
+
+	i = tx_ring->next_to_use;
+
+	buffer_info = &tx_ring->buffer_info[i];
+	context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
+	/* VLAN MACLEN IPLEN */
+	if (tx_flags & IGB_TX_FLAGS_VLAN)
+		info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+	info |= ((skb->nh.raw - skb->data) << E1000_ADVTXD_MACLEN_SHIFT);
+	*hdr_len += (skb->nh.raw - skb->data);
+	info |= skb->h.raw - skb->nh.raw;
+	*hdr_len += skb->h.raw - skb->nh.raw;
+	context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+	tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+	tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+
+	context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+
+	/* MSS L4LEN IDX */
+	mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
+	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+
+	/* Context index must be unique per ring.  Luckily, so is the interrupt
+	 * mask value. */
+	mss_l4len_idx |= tx_ring->eims_value >> 4;
+
+	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+	context_desc->seqnum_seed = 0;
+
+	buffer_info->time_stamp = jiffies;
+	buffer_info->dma = 0;
+	if (++i == tx_ring->count) i = 0;
+	tx_ring->next_to_use = i;
+
+	return 1;
+}
+
+static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
+                                        struct igb_ring *tx_ring,
+                                        struct sk_buff *skb, u32 tx_flags)
+{
+	struct e1000_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	struct igb_buffer *buffer_info;
+	u32 info = 0, tu_cmd = 0;
+
+	if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
+	    (tx_flags & IGB_TX_FLAGS_VLAN)) {
+		i = tx_ring->next_to_use;
+		buffer_info = &tx_ring->buffer_info[i];
+		context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		if (tx_flags & IGB_TX_FLAGS_VLAN)
+			info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+		info |= ((skb->nh.raw - skb->data) << E1000_ADVTXD_MACLEN_SHIFT);
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			info |= skb->h.raw - skb->nh.raw;
+
+		context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+		tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			if (skb->protocol == htons(ETH_P_IP))
+				tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+			if (skb->sk && (skb->sk->sk_protocol == IPPROTO_TCP))
+				tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+		}
+
+		context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+		context_desc->seqnum_seed = 0;
+		context_desc->mss_l4len_idx =
+		                          cpu_to_le32(tx_ring->eims_value >> 4);
+
+		buffer_info->time_stamp = jiffies;
+		buffer_info->dma = 0;
+
+		if (++i == tx_ring->count) i = 0;
+		tx_ring->next_to_use = i;
+
+		return 1;
+	}
+
+
+	return 0;
+}
+
+#define IGB_MAX_TXD_PWR	16
+#define IGB_MAX_DATA_PER_TXD	(1<<IGB_MAX_TXD_PWR)
+
+static inline int igb_tx_map_adv(struct igb_adapter *adapter,
+                                 struct igb_ring *tx_ring,
+                                 struct sk_buff *skb)
+{
+	struct igb_buffer *buffer_info;
+	unsigned int len = skb_headlen(skb);
+	unsigned int count = 0, i;
+	unsigned int f;
+
+	i = tx_ring->next_to_use;
+
+	buffer_info = &tx_ring->buffer_info[i];
+	BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
+	buffer_info->length = len;
+	/* set time_stamp *before* dma to help avoid a possible race */
+	buffer_info->time_stamp = jiffies;
+	buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
+	                                  PCI_DMA_TODEVICE);
+	count++;
+	if (++i == tx_ring->count) i = 0;
+
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+
+		buffer_info = &tx_ring->buffer_info[i];
+		BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
+		buffer_info->length = len;
+		buffer_info->time_stamp = jiffies;
+		buffer_info->dma = pci_map_page(adapter->pdev,
+		                                frag->page,
+		                                frag->page_offset,
+		                                len,
+		                                PCI_DMA_TODEVICE);
+
+		count++;
+		if (++i == tx_ring->count) i = 0;
+	}
+
+	i = (i == 0) ? tx_ring->count - 1 : i - 1;
+	tx_ring->buffer_info[i].skb = skb;
+
+	return count;
+}
+
+static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
+                                    struct igb_ring *tx_ring,
+                                    int tx_flags, int count, u32 paylen,
+                                    u8 hdr_len)
+{
+	union e1000_adv_tx_desc *tx_desc = NULL;
+	struct igb_buffer *buffer_info;
+	u32 olinfo_status = 0, cmd_type_len;
+	unsigned int i;
+
+	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
+	                E1000_ADVTXD_DCMD_DEXT);
+
+	if (tx_flags & IGB_TX_FLAGS_VLAN)
+		cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+
+	if (tx_flags & IGB_TX_FLAGS_TSO) {
+		cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+
+		/* insert tcp checksum */
+		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+		/* insert ip checksum */
+		if (tx_flags & IGB_TX_FLAGS_IPV4)
+			olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+
+	} else if (tx_flags & IGB_TX_FLAGS_CSUM)
+		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+	if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+	                IGB_TX_FLAGS_VLAN))
+		olinfo_status |= tx_ring->eims_value >> 4;
+
+	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
+
+	i = tx_ring->next_to_use;
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->read.cmd_type_len =
+			cpu_to_le32(cmd_type_len | buffer_info->length);
+		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		if (++i == tx_ring->count) i = 0;
+	}
+
+	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it syncronizes IO on IA64/Altix systems */
+	mmiowb();
+}
+
+static int __igb_maybe_stop_tx(struct net_device *netdev,
+                               struct igb_ring *tx_ring, int size)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (IGB_DESC_UNUSED(tx_ring) < size)
+		return -EBUSY;
+
+	/* A reprieve! */
+	netif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int igb_maybe_stop_tx(struct net_device *netdev,
+                             struct igb_ring *tx_ring, int size)
+{
+	if (IGB_DESC_UNUSED(tx_ring) >= size)
+		return 0;
+	return __igb_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1 )
+
+static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
+                                   struct net_device *netdev,
+                                   struct igb_ring *tx_ring)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	unsigned int tx_flags = 0;
+	unsigned int len;
+	u8 hdr_len = 0;
+	int tso = 0;
+
+	len = skb_headlen(skb);
+
+	if (test_bit(__IGB_DOWN, &adapter->state)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+
+	/* need: 1 descriptor per page,
+         *       + 2 desc gap to keep tail from touching head,
+         *       + 1 desc for skb->data,
+         *       + 1 desc for context descriptor,
+         * otherwise try next time */
+	if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+		/* this is a hard error */
+		return NETDEV_TX_BUSY;
+	}
+
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		tx_flags |= IGB_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+	}
+
+	tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
+	                                      &hdr_len) : 0;
+
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (tso) {
+		tx_flags |= IGB_TX_FLAGS_TSO;
+	} else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
+			if (skb->ip_summed == CHECKSUM_PARTIAL)
+				tx_flags |= IGB_TX_FLAGS_CSUM;
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IGB_TX_FLAGS_IPV4;
+
+	igb_tx_queue_adv(adapter, tx_ring, tx_flags,
+	                 igb_tx_map_adv(adapter, tx_ring, skb),
+	                 skb->len, hdr_len);
+
+	netdev->trans_start = jiffies;
+
+	/* Make sure there is space in the ring for the next send. */
+	igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+
+	return NETDEV_TX_OK;
+}
+
+static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_ring *tx_ring = &adapter->tx_ring[0];
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
+}
+
+/**
+ * igb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void igb_tx_timeout(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	adapter->tx_timeout_count++;
+	schedule_work(&adapter->reset_task);
+	E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->eims_enable_mask &
+		~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
+}
+
+static void igb_reset_task(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	igb_reinit_locked(adapter);
+}
+
+/**
+ * igb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *
+igb_get_stats(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * igb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETHERNET_FCS_SIZE;
+
+	if ((max_frame < ETH_ZLEN + ETHERNET_FCS_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+		DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+		return -EINVAL;
+	}
+
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+	/* igb_down has a dependency on max_frame_size */
+	adapter->hw.mac.max_frame_size = max_frame;
+	if (netif_running(netdev))
+		igb_down(adapter);
+
+	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size.
+	 * i.e. RXBUFFER_2048 --> size-4096 slab
+	 */
+
+	if (max_frame <= IGB_RXBUFFER_256)
+		adapter->rx_buffer_len = IGB_RXBUFFER_256;
+	else if (max_frame <= IGB_RXBUFFER_512)
+		adapter->rx_buffer_len = IGB_RXBUFFER_512;
+	else if (max_frame <= IGB_RXBUFFER_1024)
+		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
+	else if (max_frame <= IGB_RXBUFFER_2048)
+		adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+	else
+		adapter->rx_buffer_len = IGB_RXBUFFER_4096;
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+	if ((max_frame == ETH_FRAME_LEN + ETHERNET_FCS_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+	        netdev->mtu, new_mtu);
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev))
+		igb_up(adapter);
+	else
+		igb_reset(adapter);
+
+	clear_bit(__IGB_RESETTING, &adapter->state);
+
+	return 0;
+}
+
+/**
+ * igb_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+void igb_update_stats(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+	/*
+	 * Prevent stats update while adapter is being reset, or if the pci
+	 * connection is down.
+	 */
+	if (adapter->link_speed == 0)
+		return;
+	if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+		return;
+
+	adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+	adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
+	adapter->stats.gorcl += E1000_READ_REG(hw, E1000_GORCL);
+	adapter->stats.gorch += E1000_READ_REG(hw, E1000_GORCH);
+	adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
+	adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
+	adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
+
+	adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
+	adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
+	adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
+	adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
+	adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+	adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+	adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
+	adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
+
+	adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC);
+	adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
+	adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
+	adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
+	adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
+	adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
+	adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
+	adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+	adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+	adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
+	adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+	adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+	adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
+	adapter->stats.gotcl += E1000_READ_REG(hw, E1000_GOTCL);
+	adapter->stats.gotch += E1000_READ_REG(hw, E1000_GOTCH);
+	adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
+	adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
+	adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
+	adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
+	adapter->stats.torl += E1000_READ_REG(hw, E1000_TORL);
+	adapter->stats.torh += E1000_READ_REG(hw, E1000_TORH);
+	adapter->stats.totl += E1000_READ_REG(hw, E1000_TOTL);
+	adapter->stats.toth += E1000_READ_REG(hw, E1000_TOTH);
+	adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
+
+	adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+	adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+	adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+	adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+	adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+	adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+
+	adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
+	adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+	/* used for adaptive IFS */
+
+	hw->mac.tx_packet_delta = E1000_READ_REG(hw, E1000_TPT);
+	adapter->stats.tpt += hw->mac.tx_packet_delta;
+	hw->mac.collision_delta = E1000_READ_REG(hw, E1000_COLC);
+	adapter->stats.colc += hw->mac.collision_delta;
+
+	adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+	adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+	adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+	adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+	adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+	adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
+	adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+	adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+	adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+	adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+	adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+	adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+	adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+	adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.rx_packets = adapter->stats.gprc;
+	adapter->net_stats.tx_packets = adapter->stats.gptc;
+	adapter->net_stats.rx_bytes = adapter->stats.gorcl;
+	adapter->net_stats.tx_bytes = adapter->stats.gotcl;
+	adapter->net_stats.multicast = adapter->stats.mprc;
+	adapter->net_stats.collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/* RLEC on some newer hardware can be incorrect so build
+	* our own version based on RUC and ROC */
+	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	adapter->net_stats.rx_length_errors = adapter->stats.ruc +
+	                                      adapter->stats.roc;
+	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	adapter->net_stats.tx_errors = adapter->stats.ecol +
+	                               adapter->stats.latecol;
+	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	/* Phy Stats */
+	if (hw->media_type == e1000_media_type_copper) {
+		if ((adapter->link_speed == SPEED_1000) &&
+		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+			adapter->phy_stats.idle_errors += phy_tmp;
+		}
+	}
+
+	/* Management Stats */
+	adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
+	adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
+	adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
+}
+
+static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 eicr;
+	/* disable interrupts from the "other" bit, avoid re-entry */
+	E1000_WRITE_REG(hw, E1000_EIMC, E1000_EIMS_OTHER);
+
+	eicr = E1000_READ_REG(hw, E1000_EICR);
+
+	if (eicr & E1000_EIMS_OTHER) {
+		u32 icr = E1000_READ_REG(hw, E1000_ICR);
+		/* reading ICR causes bit 31 of EICR to be cleared */
+		if (!(icr & E1000_ICR_LSC))
+			goto no_link_interrupt;
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__IGB_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+no_link_interrupt:
+	E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
+	E1000_WRITE_REG(hw, E1000_EIMS, E1000_EIMS_OTHER);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t igb_msix_rx(int irq, void *data, struct pt_regs *regs)
+{
+	struct igb_ring *rxr = data;
+	struct igb_adapter *adapter = rxr->adapter;
+	if (!rxr->itr_val)
+		E1000_WRITE_REG(&adapter->hw, E1000_EIMC, rxr->eims_value);
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->set_itr) {
+		E1000_WRITE_REG(&adapter->hw, rxr->itr_register,
+		        1000000000 / (adapter->itr * 256));
+		adapter->set_itr = 0;
+	}
+	if (netif_rx_schedule_prep(rxr->netdev)) {
+		rxr->total_bytes = 0;
+		rxr->total_packets = 0;
+		if (rxr->buddy) {
+			rxr->buddy->total_bytes = 0;
+			rxr->buddy->total_packets = 0;
+		}
+		__netif_rx_schedule(rxr->netdev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * igb_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr_msi(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->set_itr) {
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+		        1000000000 / (adapter->itr * 256));
+		adapter->set_itr = 0;
+	}
+	/* read ICR disables interrupts using IAM */
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->mac.get_link_status = 1;
+		if (!test_bit(__IGB_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (netif_rx_schedule_prep(netdev)) {
+		adapter->tx_ring->total_bytes = 0;
+		adapter->tx_ring->total_packets = 0;
+		adapter->rx_ring->total_bytes = 0;
+		adapter->rx_ring->total_packets = 0;
+		__netif_rx_schedule(netdev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * igb_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+	u32 eicr = 0;
+	if (!icr)
+		return IRQ_NONE;  /* Not our interrupt */
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->set_itr) {
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+		        1000000000 / (adapter->itr * 256));
+		adapter->set_itr = 0;
+	}
+
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if (!(icr & E1000_ICR_INT_ASSERTED))
+		return IRQ_NONE;
+
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write */
+	eicr = E1000_READ_REG(hw, E1000_EICR);
+
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__IGB_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (netif_rx_schedule_prep(netdev)) {
+		adapter->tx_ring->total_bytes = 0;
+		adapter->rx_ring->total_bytes = 0;
+		adapter->tx_ring->total_packets = 0;
+		adapter->rx_ring->total_packets = 0;
+		__netif_rx_schedule(netdev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * igb_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+static int igb_clean(struct net_device *poll_dev, int *budget)
+{
+	struct igb_adapter *adapter;
+	int work_to_do = min(*budget, poll_dev->quota);
+	int tx_clean_complete = 1, work_done = 0;
+	int i;
+
+	/* Must NOT use netdev_priv macro here. */
+	adapter = poll_dev->priv;
+
+	/* Keep link state information with original netdev */
+	if (!netif_carrier_ok(poll_dev))
+		goto quit_polling;
+
+	/* igb_clean is called per-cpu.  This lock protects tx_ring[i] from
+	 * being cleaned by multiple cpus simultaneously.  A failure obtaining
+	 * the lock means tx_ring[i] is currently being cleaned anyway. */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) {
+			tx_clean_complete &= igb_clean_tx_irq(adapter,
+			                                &adapter->tx_ring[i]);
+			spin_unlock(&adapter->tx_ring[i].tx_clean_lock);
+		}
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done,
+		                     work_to_do / adapter->num_rx_queues);
+		*budget -= work_done;
+		poll_dev->quota -= work_done;
+	}
+
+	/* If no Tx and not enough Rx work done, exit the polling mode */
+	if ((tx_clean_complete && (work_done < work_to_do)) ||
+	    !netif_running(poll_dev)) {
+quit_polling:
+		if (adapter->itr_setting & 3)
+			igb_set_itr(adapter, E1000_ITR, 0);
+		netif_rx_complete(poll_dev);
+		if (!test_bit(__IGB_DOWN, &adapter->state))
+			igb_irq_enable(adapter);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int igb_clean_rx_ring_msix(struct net_device *netdev, int *budget)
+{
+	struct igb_ring *rxr = netdev->priv;
+	struct igb_adapter *adapter = rxr->adapter;
+	struct net_device *real_netdev = adapter->netdev;
+	int tx_clean_complete = 1;
+	int work_to_do = min(*budget, netdev->quota);
+	int work_done = 0;
+
+	/* Keep link state information with original netdev */
+	if (!netif_carrier_ok(real_netdev))
+		goto quit_polling;
+
+	igb_clean_rx_irq_adv(adapter, rxr, &work_done, work_to_do);
+
+	if (rxr->buddy) {
+			tx_clean_complete =
+			        igb_clean_tx_irq(adapter, rxr->buddy);
+	}
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	/* If not enough Rx work done, exit the polling mode */
+	if ((tx_clean_complete && (work_done == 0)) ||
+	    !netif_running(real_netdev)) {
+quit_polling:
+		if (adapter->itr_setting & 3)
+			igb_set_itr(adapter, rxr->itr_register, 0);
+		netif_rx_complete(netdev);
+
+		E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims_value);
+		return 0;
+	}
+
+	return 1;
+}
+/**
+ * igb_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * returns 1 if ring is completely cleaned
+ **/
+static bool igb_clean_tx_irq(struct igb_adapter *adapter,
+                                  struct igb_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc;
+	struct igb_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	u32 head, oldhead;
+	unsigned int count = 0;
+	bool cleaned = 0;
+	bool retval = 1;
+	unsigned int total_bytes = 0, total_packets = 0;
+
+	head = *(volatile u32 *)((struct e1000_tx_desc *)tx_ring->desc
+	                                                 + tx_ring->count);
+	head = le32_to_cpu(head);
+	i = tx_ring->next_to_clean;
+	while (1) {
+		while (i != head) {
+			cleaned = 1;
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			skb = buffer_info->skb;
+
+			if (skb) {
+				unsigned int segs, bytecount;
+				/* gso_segs is currently only valid for tcp */
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+				            skb->len;
+				total_packets += segs;
+				total_bytes += bytecount;
+			}
+
+			igb_unmap_and_free_tx_resource(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			if (++i == tx_ring->count) i = 0;
+
+			if (count++ == IGB_MAX_TX_CLEAN) {
+				retval = 0;
+				goto done_cleaning;
+			}
+		}
+		oldhead = head;
+		head = *(volatile u32 *)((struct e1000_tx_desc *)tx_ring->desc +
+		                         tx_ring->count);
+		head = le32_to_cpu(head);
+		if (head == oldhead)
+			goto done_cleaning;
+	}  /* while (1) */
+
+done_cleaning:
+	tx_ring->next_to_clean = i;
+
+	if (unlikely(cleaned &&
+	             netif_carrier_ok(netdev) &&
+	             IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (netif_queue_stopped(netdev) &&
+		    !(test_bit(__IGB_DOWN, &adapter->state))) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (tx_ring->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		tx_ring->detect_tx_hung = 0;
+		if (tx_ring->buffer_info[i].time_stamp &&
+		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+		               (adapter->tx_timeout_factor * HZ))
+		    && !(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		         E1000_STATUS_TXOFF)) {
+
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"  head (WB)            <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  jiffies              <%lx>\n"
+					"  desc.status          <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct igb_ring)),
+				readl(adapter->hw.hw_addr + tx_ring->head),
+				readl(adapter->hw.hw_addr + tx_ring->tail),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				head,
+				tx_ring->buffer_info[i].time_stamp,
+				jiffies,
+				tx_desc->upper.fields.status);
+			netif_stop_queue(netdev);
+		}
+	}
+	tx_ring->total_bytes += total_bytes;
+	tx_ring->total_packets += total_packets;
+	return retval;
+}
+
+/**
+ * igb_receive_skb - helper function to handle rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void igb_receive_skb(struct igb_adapter *adapter, u8 status, u16 vlan,
+                            struct sk_buff *skb)
+{
+	if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) {
+		vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+		                         le16_to_cpu(vlan) &
+		                         E1000_RXD_SPC_VLAN_MASK);
+	} else
+		netif_receive_skb(skb);
+}
+
+
+static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+                                       u32 status_err, struct sk_buff *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
+	if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
+		return;
+	/* TCP/UDP checksum error bit is set */
+	if (status_err &
+	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	adapter->hw_csum_good++;
+}
+
+static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter,
+                                      struct igb_ring *rx_ring,
+                                      int *work_done, int work_to_do)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	union e1000_adv_rx_desc *rx_desc, *next_rxd;
+	struct igb_buffer *buffer_info, *next_buffer;
+	struct sk_buff *skb;
+	unsigned int i, j;
+	u32 length, hlen, staterr;
+	bool cleaned = 0;
+	int cleaned_count = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+		buffer_info = &rx_ring->buffer_info[i];
+
+		/* HW will not DMA in data larger than the given buffer, even
+		 * if it parses the (NFS, of course) header to be larger.  In
+		 * that case, it fills the header buffer and spills the rest
+		 * into the page.
+		 */
+		hlen = le16_to_cpu((rx_desc->wb.lower.lo_dword.hdr_info &
+		  E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT);
+		if (hlen > adapter->rx_ps_hdr_size)
+			hlen = adapter->rx_ps_hdr_size;
+
+		length = le16_to_cpu(rx_desc->wb.upper.length);
+		cleaned = 1;
+		cleaned_count++;
+
+		if (rx_ring->pending_skb != NULL) {
+			skb = rx_ring->pending_skb;
+			rx_ring->pending_skb = NULL;
+			j = rx_ring->pending_skb_page;
+		} else {
+			skb = buffer_info->skb;
+			prefetch(skb->data - NET_IP_ALIGN);
+			buffer_info->skb = NULL;
+			if (hlen) {
+				pci_unmap_single(pdev, buffer_info->dma,
+						 adapter->rx_ps_hdr_size +
+						   NET_IP_ALIGN,
+						 PCI_DMA_FROMDEVICE);
+				skb_put(skb, hlen);
+			} else {
+				pci_unmap_single(pdev, buffer_info->dma,
+						 adapter->rx_buffer_len +
+						   NET_IP_ALIGN,
+						 PCI_DMA_FROMDEVICE);
+				skb_put(skb, length);
+				goto send_up;
+			}
+			j = 0;
+		}
+
+		while (length) {
+			pci_unmap_page(pdev, buffer_info->page_dma,
+				PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			buffer_info->page_dma = 0;
+			skb_fill_page_desc(skb, j, buffer_info->page,
+						0, length);
+			buffer_info->page = NULL;
+
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+			rx_desc->wb.upper.status_error = 0;
+			if (staterr & E1000_RXD_STAT_EOP) {
+				break;
+			}
+
+			j++;
+			cleaned_count++;
+			if (++i == rx_ring->count) i = 0;
+
+			buffer_info = &rx_ring->buffer_info[i];
+			rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+			length = le16_to_cpu(rx_desc->wb.upper.length);
+			if (!(staterr & E1000_RXD_STAT_DD)) {
+				rx_ring->pending_skb = skb;
+				rx_ring->pending_skb_page = j;
+				goto out;
+			}
+		}
+send_up:
+		pskb_trim(skb, skb->len - 4);
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
+		prefetch(next_rxd);
+		next_buffer = &rx_ring->buffer_info[i];
+
+		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		total_bytes += skb->len;
+		total_packets++;
+
+		igb_rx_checksum_adv(adapter, staterr, skb);
+
+		skb->protocol = eth_type_trans(skb, netdev);
+
+		igb_receive_skb(adapter, staterr, rx_desc->wb.upper.vlan, skb);
+
+		netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.upper.status_error = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+			igb_alloc_rx_buffers_adv(adapter, rx_ring,
+			                         cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+out:
+	rx_ring->next_to_clean = i;
+	cleaned_count = IGB_DESC_UNUSED(rx_ring);
+
+	if (cleaned_count)
+		igb_alloc_rx_buffers_adv(adapter, rx_ring, cleaned_count);
+
+	rx_ring->total_packets += total_packets;
+	rx_ring->total_bytes += total_bytes;
+	return cleaned;
+}
+
+/**
+ * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void igb_alloc_rx_buffers_adv(struct igb_adapter *adapter,
+                                     struct igb_ring *rx_ring,
+                                     int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+
+		if (adapter->rx_ps_hdr_size && !buffer_info->page) {
+			buffer_info->page = alloc_page(GFP_ATOMIC);
+			if (!buffer_info->page) {
+				adapter->alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+			buffer_info->page_dma =
+				pci_map_page(pdev,
+				             buffer_info->page,
+				             0, PAGE_SIZE,
+				             PCI_DMA_FROMDEVICE);
+		}
+
+		if (!buffer_info->skb) {
+			int bufsz;
+
+			if (adapter->rx_ps_hdr_size)
+				bufsz = adapter->rx_ps_hdr_size;
+			else
+				bufsz = adapter->rx_buffer_len;
+			bufsz += NET_IP_ALIGN;
+			skb = netdev_alloc_skb(netdev, bufsz);
+
+			if (!skb) {
+				adapter->alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+
+			/* Make buffer alignment 2 beyond a 16 byte boundary
+			 * this will result in a 16 byte aligned IP header after
+			 * the 14 byte MAC header is removed
+			 */
+			skb_reserve(skb, NET_IP_ALIGN);
+
+			buffer_info->skb = skb;
+			buffer_info->dma = pci_map_single(pdev, skb->data,
+			                                  bufsz,
+			                                  PCI_DMA_FROMDEVICE);
+
+		}
+		/* Refresh the desc even if buffer_addrs didn't change because
+		 * each write-back erases this info. */
+		if (adapter->rx_ps_hdr_size) {
+			rx_desc->read.pkt_addr =
+			     cpu_to_le64(buffer_info->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
+		} else {
+			rx_desc->read.pkt_addr =
+			     cpu_to_le64(buffer_info->dma);
+			rx_desc->read.hdr_addr = 0;
+		}
+
+		if (++i == rx_ring->count) i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+no_buffers:
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		if (i-- == 0)
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+	}
+}
+
+static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (adapter->hw.media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->hw.phy.addr;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+				   &data->val_out))
+			return -EIO;
+		break;
+	case SIOCSMIIREG:
+	default:
+		return -EOPNOTSUPP;
+	}
+	return E1000_SUCCESS;
+}
+
+static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return igb_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+	u16 cap_offset;
+
+	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (!cap_offset)
+		return -E1000_ERR_CONFIG;
+
+	pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+	return E1000_SUCCESS;
+}
+
+static void igb_vlan_rx_register(struct net_device *netdev,
+                                 struct vlan_group *grp)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl, rctl;
+
+	igb_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		/* enable VLAN receive filtering */
+		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+		rctl |= E1000_RCTL_VFE;
+		rctl &= ~E1000_RCTL_CFIEN;
+		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+		igb_update_mng_vlan(adapter);
+		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+		                adapter->hw.mac.max_frame_size + VLAN_TAG_SIZE);
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		/* disable VLAN filtering */
+		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+		rctl &= ~E1000_RCTL_VFE;
+		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+		if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
+			igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+			adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+		}
+		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+		                adapter->hw.mac.max_frame_size);
+	}
+
+	if (!test_bit(__IGB_DOWN, &adapter->state))
+		igb_irq_enable(adapter);
+}
+
+static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+	/* add VID to filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta |= (1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+
+	igb_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+	if (!test_bit(__IGB_DOWN, &adapter->state))
+		igb_irq_enable(adapter);
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		igb_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta &= ~(1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void igb_restore_vlan(struct igb_adapter *adapter)
+{
+	igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			igb_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+
+	mac->autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((adapter->hw.media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_10_HALF;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_10_FULL;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_100_HALF;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_100_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		mac->autoneg = 1;
+		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl, ctrl_ext, rctl, status;
+	u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+		igb_down(adapter);
+		igb_free_irq(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	status = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		igb_setup_rctl(adapter);
+		igb_set_multi(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl |= E1000_RCTL_MPE;
+			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+		}
+
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		/* advertise wake from D3Cold */
+		#define E1000_CTRL_ADVD3WUC 0x00100000
+		/* phy power management enable */
+		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+		ctrl |= E1000_CTRL_ADVD3WUC;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		if (adapter->hw.media_type == e1000_media_type_fiber ||
+		   adapter->hw.media_type == e1000_media_type_internal_serdes) {
+			/* keep the laser running in D3 */
+			ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+			E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
+		}
+
+		/* Allow time for pending master requests to run */
+		e1000_disable_pcie_master(&adapter->hw);
+
+		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, wufc);
+		pci_enable_wake(pdev, PCI_D3hot, 1);
+		pci_enable_wake(pdev, PCI_D3cold, 1);
+	} else {
+		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0);
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+	}
+
+	igb_release_manageability(adapter);
+
+	/* make sure adapter isn't asleep if manageability is enabled */
+	if (adapter->en_mng_pt) {
+		pci_enable_wake(pdev, PCI_D3hot, 1);
+		pci_enable_wake(pdev, PCI_D3cold, 1);
+	}
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	igb_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int igb_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "igb: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(netdev)) {
+		err = igb_request_irq(adapter);
+		if (err)
+			return err;
+	}
+
+	/* e1000_power_up_phy(adapter); */
+	igb_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	igb_init_manageability(adapter);
+
+	if (netif_running(netdev))
+		igb_up(adapter);
+
+	netif_device_attach(netdev);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver. */
+	igb_get_hw_control(adapter);
+
+	return 0;
+}
+#endif
+
+static void igb_shutdown(struct pci_dev *pdev)
+{
+	igb_suspend(pdev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void igb_netpoll(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	int i;
+	int work_done = 0, work_to_do = adapter->netdev->weight;
+
+	igb_irq_disable(adapter);
+	adapter->flags |= FLAG_IGB_IN_NETPOLL;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		igb_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i],
+		                     &work_done, work_to_do);
+	}
+	adapter->flags &= ~FLAG_IGB_IN_NETPOLL;
+	igb_irq_enable(adapter);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/**
+ * igb_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+                                              pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev->priv;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev))
+		igb_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igb_resume routine.
+ */
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev->priv;
+
+	if (pci_enable_device(pdev)) {
+		DPRINTK(PROBE, ERR,
+		        "Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	igb_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * igb_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igb_resume routine.
+ */
+static void igb_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = netdev->priv;
+
+	igb_init_manageability(adapter);
+
+	if (netif_running(netdev)) {
+		if (igb_up(adapter)) {
+			DPRINTK(PROBE, INFO,
+			        "igb_up failed after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver. */
+	igb_get_hw_control(adapter);
+
+}
+
+s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size)
+{
+	hw->dev_spec = kzalloc(size, GFP_KERNEL);
+
+	if (!hw->dev_spec)
+		return -ENOMEM;
+
+	return E1000_SUCCESS;
+}
+
+void e1000_free_dev_spec_struct(struct e1000_hw *hw)
+{
+	kfree(hw->dev_spec);
+}
+
+/* igb_main.c */
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/Makefile	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,36 @@
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2007 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_IGB) += igb.o
+
+igb-objs := e1000_82575.o e1000_api.o e1000_mac.o e1000_manage.o \
+	    e1000_nvm.o e1000_phy.o igb_ethtool.o igb_main.o igb_param.o
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_defines.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,1362 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+#define E1000_WUS_ARP          E1000_WUFC_ARP
+#define E1000_WUS_IPV4         E1000_WUFC_IPV4
+#define E1000_WUS_IPV6         E1000_WUFC_IPV6
+#define E1000_WUS_FLX0         E1000_WUFC_FLX0
+#define E1000_WUS_FLX1         E1000_WUFC_FLX1
+#define E1000_WUS_FLX2         E1000_WUFC_FLX2
+#define E1000_WUS_FLX3         E1000_WUFC_FLX3
+#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000 /* Interrupt delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT   16
+#define E1000_I2CCMD_REG_ADDR         0x00FF0000
+#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
+#define E1000_I2CCMD_PHY_ADDR         0x07000000
+#define E1000_I2CCMD_OPCODE_READ      0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
+#define E1000_I2CCMD_RESET            0x10000000
+#define E1000_I2CCMD_READY            0x20000000
+#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
+#define E1000_I2CCMD_ERROR            0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR  255
+#define E1000_I2CCMD_PHY_TIMEOUT      200
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_ENABLE_MASK                 0x00000007
+#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
+                                             * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
+                                                    * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
+                                             * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
+                                                    * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_10            0
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
+#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
+#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
+#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
+#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_10          0
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
+#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
+#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
+#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
+#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME   20
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE       4
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_SPD_EN             0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K  0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+#define E1000_PBS_24K E1000_PBA_24K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+                                              still to be processed. */
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_CC         0x10000000        /* Receive config change */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                           E1000_GCR_RXDSCW_NO_SNOOP      | \
+                           E1000_GCR_RXDSCR_NO_SNOOP      | \
+                           E1000_GCR_TXD_NO_SNOOP         | \
+                           E1000_GCR_TXDSCW_NO_SNOOP      | \
+                           E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type
+                                         * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+
+#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
+#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_NVM_RW_REG_DATA   16   /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_VERSION                0x0005
+#define NVM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define NVM_PHY_CLASS_WORD         0x0007
+#define NVM_INIT_CONTROL1_REG      0x000A
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_FLASH_VERSION          0x0032
+#define NVM_CHECKSUM_REG           0x003F
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+#define NVM_WORD0F_ANE              0x0800
+#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
+#define NVM_WORD0F_LPLU             0x0001
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_MAC_ADDR_OFFSET        0
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD          0xFFFF
+#define NVM_PHY_CLASS_A            0x8000
+#define NVM_SERDES_AMPLITUDE_MASK  0x000F
+#define NVM_SIZE_MASK              0x1C00
+#define NVM_SIZE_SHIFT             10
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+#define NVM_SWDPIO_EXT_SHIFT       4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erast/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+#define NVM_STATUS_WEN_SPI         0x02
+#define NVM_STATUS_BP0_SPI         0x04
+#define NVM_STATUS_BP1_SPI         0x08
+#define NVM_STATUS_WPEN_SPI        0x80
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+                              (ID_LED_OFF1_OFF2 <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER        0xE6
+#define PCIX_STATUS_REGISTER_LO      0xE8
+#define PCIX_STATUS_REGISTER_HI      0xEA
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+#define PCIX_STATUS_LO_FUNC_MASK     0x7
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1011_I_REV_4     0x04
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define M88_VENDOR           0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
+#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
+                                        /* 1=Enable Extended 10BASE-T distance
+                                         * (Lower 10BASE-T RX Threshold)
+                                         * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+                                        /* 1=5-Bit interface in 100BASE-TX
+                                         * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+                                            * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
+                                              * Will assert lost lock and bring
+                                              * link down if idle not seen
+                                              * within 1ms in 1000BASE-T
+                                              */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb_regtest.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,86 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool register test data */
+struct igb_reg_test {
+	u16 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x100 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+/* default register test */
+static struct igb_reg_test reg_test_82575[] = {
+	{ E1000_FCAL,	1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_FCAH,	1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_FCT,	1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_VET,	1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDBAL,	4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH,	4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN,	4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	/* Enable all four RX queues before testing. */
+	{ E1000_RXDCTL,	4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+	/* RDH is read-only for 82575, only test RDT. */
+	{ E1000_RDT0,	4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RXDCTL,	4, WRITE_NO_TEST, 0, 0 },
+	{ E1000_FCRTH,	1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+	{ E1000_FCTTV,	1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TIPG,	1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+	{ E1000_TDBAL,	4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH,	4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN,	4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ E1000_RCTL,	1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RCTL,	1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
+	{ E1000_RCTL,	1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
+	{ E1000_TCTL,	1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_TXCW,	1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
+	{ E1000_RA,	16, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA,	16, TABLE64_TEST_HI,
+						0x800FFFFF, 0xFFFFFFFF },
+	{ E1000_MTA,	128, TABLE32_TEST,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0 }
+};
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_manage.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,246 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "e1000_api.h"
+#include "e1000_manage.h"
+
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8  sum = 0;
+
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operaton
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+	u32 hicr;
+	s32 ret_val = E1000_SUCCESS;
+	u8  i;
+
+
+	/* Check that the host interface is enabled. */
+	hicr = E1000_READ_REG(hw, E1000_HICR);
+	if ((hicr & E1000_HICR_EN) == 0) {
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+	/* check the previous command is completed */
+	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+		hicr = E1000_READ_REG(hw, E1000_HICR);
+		if (!(hicr & E1000_HICR_C))
+			break;
+		mdelay(1);
+	}
+
+	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_mng_write_cmd_header_generic - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                     struct e1000_host_mng_command_header *hdr)
+{
+	u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+
+	/* Write the whole command header structure with new checksum. */
+
+	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+	length >>= 2;
+	/* Write the relevant command block into the ram area. */
+	for (i = 0; i < length; i++) {
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+		                            *((u32 *) hdr + i));
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mng_host_if_write_generic - Writes to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum)
+{
+	u8 *tmp;
+	u8 *bufptr = buffer;
+	u32 data = 0;
+	s32 ret_val = E1000_SUCCESS;
+	u16 remaining, i, j, prev_bytes;
+
+
+	/* sum = only sum of the data and it is not checksum */
+
+	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	tmp = (u8 *)&data;
+	prev_bytes = offset & 0x3;
+	offset >>= 2;
+
+	if (prev_bytes) {
+		data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+		for (j = prev_bytes; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+		length -= j - prev_bytes;
+		offset++;
+	}
+
+	remaining = length & 0x3;
+	length -= remaining;
+
+	/* Calculate length in DWORDs */
+	length >>= 2;
+
+	/* The device driver writes the relevant command block into the
+	 * ram area. */
+	for (i = 0; i < length; i++) {
+		for (j = 0; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+	if (remaining) {
+		for (j = 0; j < sizeof(u32); j++) {
+			if (j < remaining)
+				*(tmp + j) = *bufptr++;
+			else
+				*(tmp + j) = 0;
+
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_enable_mng_pass_thru - Enable processing of ARP's
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to allow ARPs to be processed by the host.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = 0;
+
+
+	if (!hw->mac.asf_firmware_present)
+		goto out;
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+		goto out;
+
+	if (hw->mac.arc_subsystem_valid == 1) {
+		fwsm = E1000_READ_REG(hw, E1000_FWSM);
+		factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = 1;
+			goto out;
+		}
+	} else {
+		if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = 1;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb.h	2007-08-07 17:36:23.000000000 -0400
@@ -0,0 +1,308 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include "igb_compat.h"
+#include "e1000_api.h"
+#include "e1000_82575.h"
+
+struct igb_adapter;
+
+#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
+
+#define PFX "igb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
+/* Interrupt defines */
+#define IGB_MAX_TX_CLEAN 72
+
+#define IGB_MIN_DYN_ITR 3000
+#define IGB_MAX_DYN_ITR 96000
+#define IGB_START_ITR 6000
+
+#define IGB_DYN_ITR_PACKET_THRESHOLD 2
+#define IGB_DYN_ITR_LENGTH_LOW 200
+#define IGB_DYN_ITR_LENGTH_HIGH 1000
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD                  256
+#define IGB_MIN_TXD                       80
+#define IGB_MAX_TXD                     4096
+
+#define IGB_DEFAULT_RXD                  256
+#define IGB_MIN_RXD                       80
+#define IGB_MAX_RXD                     4096
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES                  4
+#define IGB_MAX_TX_QUEUES                  4
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ *           descriptors available in its onboard memory.
+ *           Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ *           available in host memory.
+ *           If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ *           descriptors until either it has this many to write back, or the
+ *           ITR timer expires.
+ */
+#define IGB_RX_PTHRESH                    16
+#define IGB_RX_HTHRESH                     8
+#define IGB_RX_WTHRESH                     1
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_128   128    /* Used for packet split */
+#define IGB_RXBUFFER_256   256    /* Used for packet split */
+#define IGB_RXBUFFER_512   512
+#define IGB_RXBUFFER_1024  1024
+#define IGB_RXBUFFER_2048  2048
+#define IGB_RXBUFFER_4096  4096
+#define IGB_RXBUFFER_8192  8192
+#define IGB_RXBUFFER_16384 16384
+
+/* Packet Buffer allocations */
+#define IGB_PBA_BYTES_SHIFT 0xA
+#define IGB_TX_HEAD_ADDR_SHIFT 7
+#define IGB_PBA_TX_MASK 0xFFFF0000
+
+#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define IGB_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define IGB_EEPROM_APME         0x0400
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE -1
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct igb_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+
+	union {
+		/* TX */
+		struct {
+			unsigned long time_stamp;
+			u32 length;
+		};
+		/* RX */
+		struct {
+			struct page *page;
+			u64 page_dma;
+		};
+	};
+};
+
+struct igb_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct igb_ring {
+	struct igb_adapter *adapter; /* backlink */
+	void *desc;         /* pointer to the descriptor ring memory */
+	dma_addr_t dma;     /* physical address of the descriptor ring */
+	unsigned int size;  /* length of descriptor ring in bytes */
+	u16 count;          /* number of descriptors in the ring */
+	u16 next_to_use;    /* index to next free descriptor */
+	u16 next_to_clean;  /* index to next processed descriptor */
+	u16 head, tail;     /* indices to ring head/tail */
+
+	u16 itr_register;
+	u32 itr_val;
+	u32 eims_value;
+
+	struct igb_buffer *buffer_info;
+
+	union {
+		/* TX */
+		struct {
+			spinlock_t tx_clean_lock;
+			bool detect_tx_hung;
+			char name[IFNAMSIZ + 5];
+		};
+		/* RX */
+		struct {
+			struct sk_buff *pending_skb;
+			int pending_skb_page;
+			struct net_device *netdev;
+			struct igb_ring *buddy;
+		};
+	};
+
+	int cpu;
+
+	unsigned int total_bytes;
+	unsigned int total_packets;
+};
+
+#define IGB_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_ADV(R, i)	    \
+	(&(((union e1000_adv_rx_desc *)((R).desc))[i]))
+#define E1000_TX_DESC_ADV(R, i)	    \
+	(&(((union e1000_adv_tx_desc *)((R).desc))[i]))
+#define E1000_TX_CTXTDESC_ADV(R, i)	    \
+	(&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+
+#define MAX_MSIX_COUNT 10
+/* board specific private data structure */
+
+struct igb_adapter {
+	/* TX */
+	struct igb_ring *tx_ring;        /* One per active queue */
+	int num_tx_queues;
+	unsigned int restart_queue;
+	unsigned long tx_queue_len;
+	u32 txd_cmd;
+	u32 gotcl;
+	u64 gotcl_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+
+	/* RX */
+	struct igb_ring *rx_ring;        /* One per active queue */
+	int num_rx_queues;
+	u32 alloc_rx_buff_failed;
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u64 gorcl_old;
+	u32 gorcl;
+	u16 rx_ps_hdr_size;
+	bool rx_csum;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+	/* timers, tasks */
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+	struct timer_list blink_timer;
+
+	struct work_struct reset_task;
+	struct work_struct watchdog_task;
+
+	struct vlan_group *vlgrp;
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+	int set_itr;
+
+	u8 fc_autoneg;
+	u8 tx_timeout_factor;
+	unsigned long led_status;
+
+
+	u32 test_icr;
+	struct igb_ring test_tx_ring;
+	struct igb_ring test_rx_ring;
+
+	int msg_enable;
+	struct msix_entry *msix_entries;
+	u32 eims_enable_mask;
+	u32 lli_port;
+	u32 lli_size;
+
+	/* to not mess up cache alignment, always add to the bottom */
+	unsigned long state;
+	unsigned int flags;
+	u32 eeprom_wol;
+};
+
+enum e1000_state_t {
+	__IGB_TESTING,
+	__IGB_RESETTING,
+	__IGB_DOWN
+};
+
+#define FLAG_IGB_HAS_MSI     (1 << 0)
+#define FLAG_IGB_LLI_PUSH    (1 << 1)
+#define FLAG_IGB_IN_NETPOLL  (1 << 2)
+
+extern char igb_driver_name[];
+extern const char igb_driver_version[];
+
+extern int igb_up(struct igb_adapter *);
+extern void igb_down(struct igb_adapter *);
+extern void igb_reinit_locked(struct igb_adapter *);
+extern void igb_reset(struct igb_adapter *);
+extern int igb_set_spd_dplx(struct igb_adapter *, u16 );
+extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
+extern void igb_update_stats(struct igb_adapter *);
+
+#endif /* _IGB_H_ */
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb_compat.h	2007-08-07 17:35:33.000000000 -0400
@@ -0,0 +1,22 @@
+#ifndef __IGB_COMPAT_H__
+#define __IGB_COMPAT_H__
+
+#include <linux/if_vlan.h>
+
+typedef unsigned int bool;
+
+#define ETH_FCS_LEN               4
+
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+						       int vlan_id)
+{
+	return vg->vlan_devices[vlan_id];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+					 struct net_device *dev)
+{
+	vg->vlan_devices[vlan_id] = NULL;
+}
+
+#endif 
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_82575.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,1310 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * e1000_82575
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "e1000_api.h"
+#include "e1000_82575.h"
+
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+
+static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82575(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
+static void e1000_release_phy_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
+static void e1000_release_nvm_82575(struct e1000_hw *hw);
+static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                         u16 *duplex);
+static s32  e1000_init_hw_82575(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 *data);
+static void e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index);
+static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw);
+static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+                                            u32 offset, u16 data);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static s32  e1000_configure_pcs_link_82575(struct e1000_hw *hw);
+static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+                                                 u16 *speed, u16 *duplex);
+static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
+
+struct e1000_dev_spec_82575 {
+	bool sgmii_active;
+};
+
+/**
+ *  e1000_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (hw->media_type != e1000_media_type_copper) {
+		phy->type                = e1000_phy_none;
+		goto out;
+	}
+
+	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us      = 100;
+
+	func->acquire_phy        = e1000_acquire_phy_82575;
+	func->check_reset_block  = e1000_check_reset_block_generic;
+	func->commit_phy         = e1000_phy_sw_reset_generic;
+	func->get_cfg_done       = e1000_get_cfg_done_82575;
+	func->release_phy        = e1000_release_phy_82575;
+
+	if (e1000_sgmii_active_82575(hw) == 1) {
+		func->reset_phy          = e1000_phy_hw_reset_sgmii_82575;
+		func->read_phy_reg       = e1000_read_phy_reg_sgmii_82575;
+		func->write_phy_reg      = e1000_write_phy_reg_sgmii_82575;
+	} else {
+		func->reset_phy          = e1000_phy_hw_reset_generic;
+		func->read_phy_reg       = e1000_read_phy_reg_igp;
+		func->write_phy_reg      = e1000_write_phy_reg_igp;
+	}
+
+	/* Set phy->phy_addr and phy->id. */
+	ret_val = e1000_get_phy_id_82575(hw);
+
+	/* Verify phy id and set remaining function pointers */
+	switch (phy->id) {
+	case M88E1111_I_PHY_ID:
+		phy->type                = e1000_phy_m88;
+		func->check_polarity     = e1000_check_polarity_m88;
+		func->get_phy_info       = e1000_get_phy_info_m88;
+		func->get_cable_length   = e1000_get_cable_length_m88;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy->type                = e1000_phy_igp_3;
+		func->check_polarity     = e1000_check_polarity_igp;
+		func->get_phy_info       = e1000_get_phy_info_igp;
+		func->get_cable_length   = e1000_get_cable_length_igp_2;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+		func->set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
+		func->set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+
+	nvm->opcode_bits        = 8;
+	nvm->delay_usec         = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	nvm->type               = e1000_nvm_eeprom_spi;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+	                  E1000_EECD_SIZE_EX_SHIFT);
+
+	/* Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+	nvm->word_size	= 1 << size;
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_nvm_82575;
+	func->read_nvm          = e1000_read_nvm_eerd;
+	func->release_nvm       = e1000_release_nvm_82575;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->valid_led_default = e1000_valid_led_default_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+	func->write_nvm         = e1000_write_nvm_spi;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	struct e1000_dev_spec_82575 *dev_spec;
+	u32 ctrl, ctrl_ext;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82575);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+	if (ret_val)
+		goto out;
+
+	dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
+
+	/* Set media type */
+        /* The 82575 uses bits 22:23 for link mode. The mode can be changed
+         * based on the EEPROM. We cannot rely upon device ID. There
+         * is no distinguishable difference between fiber and internal
+         * SerDes mode on the 82575. There can be an external PHY attached
+         * on the SGMII interface. For this, we'll set sgmii_active to 1.
+         */
+	hw->media_type = e1000_media_type_copper;
+	dev_spec->sgmii_active = 0;
+
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) ==
+	    E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) {
+		hw->media_type = e1000_media_type_internal_serdes;
+	} else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) {
+		dev_spec->sgmii_active = 1;
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_I2C_ENA));
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = 1;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+	                ? 1 : 0;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pcie_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82575;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82575;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface link setup */
+	func->setup_physical_interface =
+	        (hw->media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82575
+	                : e1000_setup_fiber_serdes_link_82575;
+	/* check for link */
+	func->check_for_link = e1000_check_for_link_82575;
+	/* receive address register setting */
+	func->rar_set = e1000_rar_set_82575;
+	/* multicast address update */
+	func->mc_addr_list_update = e1000_mc_addr_list_update_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_82575;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82575 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+
+	hw->func.init_mac_params = e1000_init_mac_params_82575;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82575;
+	hw->func.init_phy_params = e1000_init_phy_params_82575;
+}
+
+/**
+ *  e1000_acquire_phy_82575 - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask;
+
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+
+	return e1000_acquire_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_82575 - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask;
+
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	e1000_release_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the serial gigabit media independent
+ *  interface and stores the retrieved information in data.
+ **/
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                          u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		return -E1000_ERR_PARAM;
+	}
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+	          (E1000_I2CCMD_OPCODE_READ));
+
+	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		return -E1000_ERR_PHY;
+	}
+
+	/* Need to byte-swap the 16-bit value. */
+	*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the serial gigabit
+ *  media independent interface.
+ **/
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+	u16 phy_data_swapped;
+
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		return -E1000_ERR_PARAM;
+	}
+
+	/* Swap the data bytes for the I2C interface */
+	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+	          (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+	          E1000_I2CCMD_OPCODE_WRITE |
+	          phy_data_swapped);
+
+	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		return -E1000_ERR_PHY;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id_82575 - Retreive PHY addr and id
+ *  @hw: pointer to the HW structure
+ *
+ *  Retreives the PHY address and ID for both PHY's which do and do not use
+ *  sgmi interface.
+ **/
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val = E1000_SUCCESS;
+	u16 phy_id;
+
+
+	/* For SGMII PHYs, we try the list of possible addresses until
+	 * we find one that works.  For non-SGMII PHYs
+	 * (e.g. integrated copper PHYs), an address of 1 should
+	 * work.  The result of this function should mean phy->phy_addr
+	 * and phy->id are set correctly.
+	 */
+	if (e1000_sgmii_active_82575(hw) == 0) {
+		phy->addr = 1;
+		ret_val = e1000_get_phy_id(hw);
+		goto out;
+	}
+
+	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
+	 * Therefore, we need to test 1-7
+	 */
+	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+		ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+		if (ret_val == E1000_SUCCESS) {
+			/* At the time of this writing, The M88 part is
+			 * the only supported SGMII PHY product. */
+			if (phy_id == M88_VENDOR)
+				break;
+		}
+	}
+
+	/* A valid PHY type couldn't be found. */
+	if (phy->addr == 8) {
+		phy->addr = 0;
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	ret_val = e1000_get_phy_id(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+
+	/* This isn't a true "hard" reset, but is the only reset
+	 * available to us at this time.
+	 */
+
+
+	/* SFP documentation requires the following to configure the SPF module
+	 * to work on SGMII.  No further documentation is given.
+	 */
+	ret_val = e1000_write_phy_reg(hw, 0x1B, 0x8084);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_phy_commit(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: 1 to enable LPLU, 0 to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+		if (ret_val)
+			goto out;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained. */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_82575 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclussive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+
+	ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_acquire_nvm_generic(hw);
+
+	if (ret_val)
+		e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+
+	e1000_release_nvm_generic(hw);
+	e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = E1000_SUCCESS;
+	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+
+	while (i < timeout) {
+		if (e1000_get_hw_semaphore_generic(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/* Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask) */
+		e1000_put_hw_semaphore_generic(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+
+	while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
+	/* Empty */
+
+	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ *  e1000_get_cfg_done_82575 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+
+	while (timeout) {
+		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+			break;
+		msleep(1);
+		timeout--;
+	}
+	if (!timeout) {
+	}
+
+	/* If EEPROM is not marked present, init the PHY manually */
+	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	    (hw->phy.type == e1000_phy_igp_3)) {
+		e1000_phy_init_script_igp3(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_82575 - Get link speed/duplex info
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  This is a wrapper function, if using the serial gigabit media independent
+ *  interface, use pcs to retreive the link speed and duplex information.
+ *  Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                        u16 *duplex)
+{
+	s32 ret_val;
+
+
+	if (hw->media_type != e1000_media_type_copper ||
+	    e1000_sgmii_active_82575(hw) == 1) {
+		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+		                                               duplex);
+	} else {
+		ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+		                                                    duplex);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_link_82575 - Check for link
+ *  @hw: pointer to the HW structure
+ *
+ *  If sgmii is enabled, then use the pcs register to determine link, otherwise
+ *  use the generic interface for determining link.
+ **/
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 speed, duplex;
+
+
+	/* SGMII link check is done through the PCS register. */
+	if ((hw->media_type != e1000_media_type_copper) ||
+	    (e1000_sgmii_active_82575(hw) == 1))
+		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+		                                               &duplex);
+	else
+		ret_val = e1000_check_for_copper_link_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Using the physical coding sub-layer (PCS), retreive the current speed and
+ *  duplex, then store the values in the pointers provided.
+ **/
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+                                                u16 *duplex)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 pcs;
+
+
+	/* Set up defaults for the return values of this function */
+	mac->serdes_has_link = 0;
+	*speed = 0;
+	*duplex = 0;
+
+	/* Read the PCS Status register for link state. For non-copper mode,
+	 * the status register is not accurate. The PCS status register is
+	 * used instead. */
+	pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+	/* The link up bit determines when link is up on autoneg. The sync ok
+	 * gets set once both sides sync up and agree upon link. Stable link
+	 * can be determined by checking for both link up and link sync ok
+	 */
+	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+		mac->serdes_has_link = 1;
+
+		/* Detect and store PCS speed */
+		if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+			*speed = SPEED_1000;
+		} else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+			*speed = SPEED_100;
+		} else {
+			*speed = SPEED_10;
+		}
+
+		/* Detect and store PCS duplex */
+		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+			*duplex = FULL_DUPLEX;
+		} else {
+			*duplex = HALF_DUPLEX;
+		}
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_rar_set_82575 - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+static void e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+
+	if (index < E1000_RAR_ENTRIES_82575) {
+		e1000_rar_set_generic(hw, addr, index);
+		goto out;
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_reset_hw_82575 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+	u32 ctrl, icr;
+	s32 ret_val;
+
+
+	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+	}
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msleep(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val) {
+		/* When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+	}
+
+	/* If EEPROM is not present, run manual init scripts */
+	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+		e1000_reset_init_script_82575(hw);
+
+	/* Clear any pending interrupt events. */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82575 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		goto out;
+	}
+
+	/* Disabling VLAN filtering */
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82575(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82575 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+	u32 ctrl, led_ctrl;
+	s32  ret_val;
+	bool link;
+
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
+		ret_val = e1000_copper_link_setup_m88(hw);
+		break;
+	case e1000_phy_igp_3:
+		ret_val = e1000_copper_link_setup_igp(hw);
+		/* Setup activity LED */
+		led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
+		led_ctrl &= IGP_ACTIVITY_LED_MASK;
+		led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.autoneg) {
+		/* Setup autoneg and flow control advertisement
+		 * and perform autonegotiation. */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings. */
+		ret_val = e1000_phy_force_speed_duplex(hw);
+		if (ret_val) {
+			goto out;
+		}
+	}
+
+	ret_val = e1000_configure_pcs_link_82575(hw);
+	if (ret_val)
+		goto out;
+
+	/* Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw,
+	                                     COPPER_LINK_UP_LIMIT,
+	                                     10,
+	                                     &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		/* Config the MAC and PHY after link is up */
+		e1000_config_collision_dist_generic(hw);
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+	} else {
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures speed and duplex for fiber and serdes links.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 reg;
+
+
+	/* On the 82575, SerDes loopback mode persists until it is
+	 * explicitly turned off or a power cycle is performed.  A read to
+	 * the register does not indicate its status.  Therefore, we ensure
+	 * loopback mode is disabled during initialization.
+	 */
+	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+	/* Force link up, set 1gb, set both sw defined pins */
+	reg = E1000_READ_REG(hw, E1000_CTRL);
+	reg |= E1000_CTRL_SLU |
+	       E1000_CTRL_SPD_1000 |
+	       E1000_CTRL_FRCSPD |
+	       E1000_CTRL_SWDPIN0 |
+	       E1000_CTRL_SWDPIN1;
+	E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+	/* Set switch control to serdes energy detect */
+	reg = E1000_READ_REG(hw, E1000_CONNSW);
+	reg |= E1000_CONNSW_ENRGSRC;
+	E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+
+	/* New SerDes mode allows for forcing speed or autonegotiating speed
+	 * at 1gb. Autoneg should be default set by most drivers. This is the
+	 * mode that will be compatible with older link partners and switches.
+	 * However, both are supported by the hardware and some drivers/tools.
+	 */
+	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+	if (hw->mac.autoneg) {
+		/* Set PCS register for autoneg */
+		reg |= E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
+		       E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
+		       E1000_PCS_LCTL_AN_ENABLE |     /* Enable Autoneg */
+		       E1000_PCS_LCTL_AN_RESTART;     /* Restart autoneg */
+	} else {
+		/* Set PCS register for forced speed */
+		reg |= E1000_PCS_LCTL_FLV_LINK_UP |   /* Force link up */
+		       E1000_PCS_LCTL_FSV_1000 |      /* Force 1000    */
+		       E1000_PCS_LCTL_FDV_FULL |      /* SerDes Full duplex */
+		       E1000_PCS_LCTL_FSD |           /* Force Speed */
+		       E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
+	}
+	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_configure_pcs_link_82575 - Configure PCS link
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  only used on copper connections where the serialized gigabit media
+ *  independent interface (sgmii) is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
+ **/
+static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg = 0;
+
+
+	if (hw->media_type != e1000_media_type_copper ||
+	    e1000_sgmii_active_82575(hw) == 0)
+		goto out;
+
+	/* For SGMII, we need to issue a PCS autoneg restart */
+	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+	/* AN time out should be disabled for SGMII mode */
+	reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+
+	if (mac->autoneg) {
+		/* Make sure forced speed and force link are not set */
+		reg &= ~(E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+		/* The PHY should be setup prior to calling this function.
+		 * All we need to do is restart autoneg and enable autoneg.
+		 */
+		reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE;
+	} else {
+		/* Set PCS regiseter for forced speed */
+
+		/* Turn off bits for full duplex, speed, and autoneg */
+		reg &= ~(E1000_PCS_LCTL_FSV_1000 |
+		         E1000_PCS_LCTL_FSV_100 |
+		         E1000_PCS_LCTL_FDV_FULL |
+		         E1000_PCS_LCTL_AN_ENABLE);
+
+		/* Check for duplex first */
+		if (mac->forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
+			reg |= E1000_PCS_LCTL_FDV_FULL;
+
+		/* Now set speed */
+		if (mac->forced_speed_duplex & E1000_ALL_100_SPEED)
+			reg |= E1000_PCS_LCTL_FSV_100;
+
+		/* Force speed and force link */
+		reg |= E1000_PCS_LCTL_FSD |
+		       E1000_PCS_LCTL_FORCE_LINK |
+		       E1000_PCS_LCTL_FLV_LINK_UP;
+	}
+	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+out:
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_sgmii_active_82575 - Return sgmii state
+ *  @hw: pointer to the HW structure
+ *
+ *  82575 silicon has a serialized gigabit media independent interface (sgmii)
+ *  which can be enabled for use in the embedded applications.  Simply
+ *  return the current state of the sgmii interface.
+ **/
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82575 *dev_spec;
+	bool ret_val;
+
+
+	if (hw->mac.type != e1000_82575) {
+		ret_val = 0;
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
+
+	ret_val = dev_spec->sgmii_active;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_init_script_82575 - Inits HW defaults after reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Inits recommended HW defaults after a reset when there is no EEPROM
+ *  detected. This is only for the 82575.
+ **/
+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
+{
+
+	if (hw->mac.type == e1000_82575) {
+		/* SerDes configuration via SERDESCTRL */
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+
+		/* CCM configuration via CCMCTL register */
+		e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+		e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+
+		/* PCIe lanes configuration */
+		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+		/* PCIe PLL Configuration */
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+	u32 temp;
+
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+
+	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICTXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+	temp = E1000_READ_REG(hw, E1000_CBTMPC);
+	temp = E1000_READ_REG(hw, E1000_HTDPMC);
+	temp = E1000_READ_REG(hw, E1000_CBRMPC);
+	temp = E1000_READ_REG(hw, E1000_RPTHC);
+	temp = E1000_READ_REG(hw, E1000_HGPTC);
+	temp = E1000_READ_REG(hw, E1000_HTCBDPC);
+	temp = E1000_READ_REG(hw, E1000_HGORCL);
+	temp = E1000_READ_REG(hw, E1000_HGORCH);
+	temp = E1000_READ_REG(hw, E1000_HGOTCL);
+	temp = E1000_READ_REG(hw, E1000_HGOTCH);
+	temp = E1000_READ_REG(hw, E1000_LENERRS);
+
+	/* This register should not be read in copper configurations */
+	if (hw->media_type == e1000_media_type_internal_serdes)
+		temp = E1000_READ_REG(hw, E1000_SCVPC);
+}
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_phy.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,1723 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "e1000_api.h"
+#include "e1000_phy.h"
+
+static s32  e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static void e1000_release_phy(struct e1000_hw *hw);
+static s32  e1000_acquire_phy(struct e1000_hw *hw);
+static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+                                               u16 *phy_ctrl);
+static s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+static s32  e1000_phy_setup_autoneg(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] =
+	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_2_cable_length_table) / \
+                 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  e1000_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+	u32 manc;
+
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	       E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 phy_id;
+
+
+	ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id = (u32)(phy_id << 16);
+	udelay(20);
+	ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_reset_dsp_generic - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+static s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_READ));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < 64; i++) {
+		udelay(50);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	*data = (u16) mdic;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+	        (offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_WRITE));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		udelay(5);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+						   IGP01E1000_PHY_PAGE_SELECT,
+						   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+					  MAX_PHY_REG_ADDRESS & offset,
+					  data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+						   IGP01E1000_PHY_PAGE_SELECT,
+						   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+					   MAX_PHY_REG_ADDRESS & offset,
+					   data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/* Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+		break;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/* Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	if (phy->revision < E1000_REVISION_4) {
+		/* Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = e1000_read_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == E1000_REVISION_2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = e1000_write_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             phy_data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Commit the changes. */
+	ret_val = e1000_phy_commit(hw);
+	if (ret_val) {
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_phy_hw_reset(hw);
+	if (ret_val) {
+		goto out;
+	}
+
+	/* Wait 15ms for MAC to configure PHY from NVM settings. */
+	msleep(15);
+
+	/* The NVM settings will configure LPLU in D3 for
+	 * non-IGP1 PHYs. */
+	if (phy->type == e1000_phy_igp) {
+		/* disable lplu d3 during driver init */
+		ret_val = e1000_set_d3_lplu_state(hw, 0);
+		if (ret_val) {
+			goto out;
+		}
+	}
+
+	/* disable lplu d0 during driver init */
+	ret_val = e1000_set_d0_lplu_state(hw, 0);
+	if (ret_val) {
+		goto out;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		goto out;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/* when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default. */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				goto out;
+		}
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  wait_for_link, then wait for autoneg to complete before exiting.
+ **/
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+
+	/* Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	ret_val = e1000_phy_setup_autoneg(hw);
+	if (ret_val) {
+		goto out;
+	}
+
+	/* Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	/* Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->wait_for_link) {
+		ret_val = e1000_wait_autoneg(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	hw->mac.get_link_status = 1;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = e1000_read_phy_reg(hw,
+		                            PHY_1000T_CTRL,
+		                            &mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+	                         NWAY_AR_100TX_HD_CAPS |
+	                         NWAY_AR_10T_FD_CAPS   |
+	                         NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF) {
+	}
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/* Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          but we do not support receiving pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *          in the EEPROM is used.
+	 */
+	switch (hw->mac.fc) {
+	case e1000_fc_none:
+		/* Flow control (RX & TX) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/* RX Flow control is enabled, and TX Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		/* Since there really isn't a way to advertise that we are
+		 * capable of RX Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric RX PAUSE.  Later
+		 * (in e1000_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/* TX Flow control is enabled, and RX Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/* Flow control (both RX and TX) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		ret_val = e1000_write_phy_reg(hw,
+		                              PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+
+	udelay(1);
+
+	if (phy->wait_for_link) {
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+
+	/* Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+	if (phy->wait_for_link) {
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			/* We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000_write_phy_reg(hw,
+			                              M88E1000_PHY_PAGE_SELECT,
+			                              0x001d);
+			if (ret_val)
+				goto out;
+			ret_val = e1000_phy_reset_dsp_generic(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+
+	/* Turn off flow control when forcing speed/duplex */
+	mac->fc = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+	}
+
+	e1000_config_collision_dist_generic(hw);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP02E1000_PHY_POWER_MGMT,
+		                             data);
+		if (ret_val)
+			goto out;
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained. */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_downshift_generic - Checks whether a downshift in speed occured
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+
+	switch (phy->type) {
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = 0;
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask) ? 1 : 0;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+
+	/* Polarity is determined based on the speed of
+	 * our connection. */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/* This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg_generic - Wait for auto-neg compeletion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msleep(100);
+	}
+
+	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+
+	for (i = 0; i < iterations; i++) {
+		/* Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			mdelay(usec_interval/1000);
+		else
+			udelay(usec_interval);
+	}
+
+	*success = (i < iterations) ? 1 : 0;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which reperesent the
+ *  cobination of course and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+	                                                 {IGP02E1000_PHY_AGC_A,
+	                                                  IGP02E1000_PHY_AGC_B,
+	                                                  IGP02E1000_PHY_AGC_C,
+	                                                  IGP02E1000_PHY_AGC_D};
+
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Getting bits 15:9, which represent the combination of
+		 * course and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length. */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+		                IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+	              e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+	                         (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+
+	if (hw->media_type != e1000_media_type_copper) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+	                           ? 1
+	                           : 0;
+
+	ret_val = e1000_check_polarity_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? 1 : 0;
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = 1;
+
+	ret_val = e1000_check_polarity_igp(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? 1 : 0;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_sw_reset_generic - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_ctrl;
+
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and relase the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl;
+
+
+	ret_val = e1000_check_reset_block(hw);
+	if (ret_val) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	E1000_WRITE_FLUSH(hw);
+
+	udelay(phy->reset_delay_us);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+
+	udelay(150);
+
+	e1000_release_phy(hw);
+
+	ret_val = e1000_get_phy_cfg_done(hw);
+
+out:
+	return ret_val;
+}
+
+/* Internal function pointers */
+
+/**
+ *  e1000_get_phy_cfg_done - Generic PHY configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family did not implement a family specific
+ *  get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+	if (hw->func.get_cfg_done != NULL)
+		return hw->func.get_cfg_done(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_phy - Generic release PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return if silicon family does not require a semaphore when accessing the
+ *  PHY.
+ **/
+static void e1000_release_phy(struct e1000_hw *hw)
+{
+	if (hw->func.release_phy != NULL)
+		hw->func.release_phy(hw);
+}
+
+/**
+ *  e1000_acquire_phy - Generic acquire PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family does not require a semaphore when
+ *  accessing the PHY.
+ **/
+static s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_phy != NULL)
+		return hw->func.acquire_phy(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  When the silicon family has not implemented a forced speed/duplex
+ *  function for the PHY, simply return E1000_SUCCESS.
+ **/
+s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+	if (hw->func.force_speed_duplex != NULL)
+		return hw->func.force_speed_duplex(hw);
+	else
+		return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1000_write_phy_reg(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1000_write_phy_reg(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1000_write_phy_reg(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1000_write_phy_reg(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to TX amplitude in Giga mode */
+	e1000_write_phy_reg(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1000_write_phy_reg(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1000_write_phy_reg(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1000_write_phy_reg(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1000_write_phy_reg(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1000_write_phy_reg(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1000_write_phy_reg(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1000_write_phy_reg(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1000_write_phy_reg(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1000_write_phy_reg(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1000_write_phy_reg(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1000_write_phy_reg(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1000_write_phy_reg(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1000_write_phy_reg(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1000_write_phy_reg(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1000_write_phy_reg(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1000_write_phy_reg(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1000_write_phy_reg(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1000_write_phy_reg(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1000_write_phy_reg(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1000_write_phy_reg(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1000_write_phy_reg(hw, 0x1798, 0xD008);
+	/* Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1000_write_phy_reg(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1000_write_phy_reg(hw, 0x187A, 0x0800);
+	/* Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1000_write_phy_reg(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1000_write_phy_reg(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1000_write_phy_reg(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1000_write_phy_reg(hw, 0x0000, 0x1340);
+
+	return E1000_SUCCESS;
+}
+
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_phy.h	2007-08-07 17:37:27.000000000 -0400
@@ -0,0 +1,163 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+#include "igb_compat.h"
+
+enum e1000_ms_type {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+};
+
+s32  e1000_check_downshift_generic(struct e1000_hw *hw);
+s32  e1000_check_polarity_m88(struct e1000_hw *hw);
+s32  e1000_check_polarity_igp(struct e1000_hw *hw);
+s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32  e1000_copper_link_autoneg(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32  e1000_get_phy_id(struct e1000_hw *hw);
+s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
+s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
+s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                                u32 usec_interval, bool *success);
+s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+#define E1000_MAX_PHY_ADDR                4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
+#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define IGP4_PHY_PAGE_SELECT              22   /* Page Select for IGP 4 */
+#define IGP_PAGE_SHIFT                    5
+#define PHY_REG_MASK                      0x1F
+
+#define IGP4_WUC_PAGE                     800
+#define IGP4_WUC_ADDRESS_OPCODE           0x11
+#define IGP4_WUC_DATA_OPCODE              0x12
+#define IGP4_WUC_ENABLE_PAGE              769
+#define IGP4_WUC_ENABLE_REG               17
+#define IGP4_WUC_ENABLE_BIT               (1 << 2)
+#define IGP4_WUC_HOST_WU_BIT              (1 << 4)
+
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+#define IGP01E1000_GMII_FLEX_SPD          0x0010 /* Enable flexible speed
+                                                  * on link-up */
+#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0008
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define IGP03E1000_PHY_MISC_CTRL          0x1B
+#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
+#define E1000_KMRNCTRLSTA_REN             0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED    0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
+#define IFE_PSC_FORCE_POLARITY             0x0020
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_manage.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,76 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr);
+
+enum e1000_mng_mode {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG    0x20000000
+
+#define E1000_FWSM_MODE_MASK  0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE                  0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+#define E1000_HICR_EN              0x01  /* Enable bit - RO */
+#define E1000_HICR_C               0x02  /* Driver sets this bit when done
+                                          * to put command in RAM */
+#define E1000_HICR_SV              0x04  /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET        0x80
+
+#define E1000_IAMT_SIGNATURE  0x544D4149 /* Intel(R) Active Management
+                                          * Technology signature */
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_mac.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,1416 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
+static s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+
+/**
+ *  e1000_remove_device_generic - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it.
+ **/
+void e1000_remove_device_generic(struct e1000_hw *hw)
+{
+
+	/* Freeing the dev_spec member of e1000_hw structure */
+	e1000_free_dev_spec_struct(hw);
+}
+
+/**
+ *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+	u32 status;
+	u16 pcie_link_status, pci_header_type;
+
+
+	bus->type = e1000_bus_type_pci_express;
+	bus->speed = e1000_bus_speed_2500;
+
+	ret_val = e1000_read_pcie_cap_reg(hw,
+	                                  PCIE_LINK_STATUS,
+	                                  &pcie_link_status);
+	if (ret_val)
+		bus->width = e1000_bus_width_unknown;
+	else
+		bus->width = (enum e1000_bus_width)((pcie_link_status &
+		                                PCIE_LINK_WIDTH_MASK) >>
+		                               PCIE_LINK_WIDTH_SHIFT);
+
+	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		bus->func = (status & E1000_STATUS_FUNC_MASK)
+		            >> E1000_STATUS_FUNC_SHIFT;
+	} else {
+		bus->func = 0;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_init_rx_addrs_generic - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setups the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+
+
+	/* Setup the receive address */
+
+	e1000_rar_set_generic(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	for (i = 1; i < rar_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_rar_set_generic - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+		   ((u32) addr[1] << 8) |
+		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	if (!hw->mac.disable_av)
+		rar_high |= E1000_RAH_AV;
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+}
+
+/**
+ *  e1000_mta_set_generic - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta;
+
+	/* The MTA is a register array of 32-bit registers. It is
+	 * treated like an array of (32*mta_reg_count) bits.  We want to
+	 * set bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
+	 * mask to bits 31:5 of the hash value which gives us the
+	 * register we're modifying.  The hash bit within that register
+	 * is determined by the lower 5 bits of the hash value.
+	 */
+	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+	hash_bit = hash_value & 0x1F;
+
+	mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_mc_addr_list_update_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.
+ **/
+void e1000_mc_addr_list_update_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count,
+                                       u32 rar_used_count, u32 rar_count)
+{
+	u32 hash_value;
+	u32 i;
+
+
+	/* Load the first set of multicast addresses into the exact
+	 * filters (RAR).  If there are not enough to fill the RAR
+	 * array, clear the filters.
+	 */
+	for (i = rar_used_count; i < rar_count; i++) {
+		if (mc_addr_count) {
+			e1000_rar_set(hw, mc_addr_list, i);
+			mc_addr_count--;
+			mc_addr_list += ETH_ALEN;
+		} else {
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
+			E1000_WRITE_FLUSH(hw);
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
+			E1000_WRITE_FLUSH(hw);
+		}
+	}
+
+	/* Clear the old settings from the MTA */
+	for (i = 0; i < hw->mac.mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Load any remaining multicast addresses into the hash table. */
+	for (; mc_addr_count > 0; mc_addr_count--) {
+		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+		e1000_mta_set(hw, hash_value);
+		mc_addr_list += ETH_ALEN;
+	}
+}
+
+/**
+ *  e1000_hash_mc_addr_generic - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  e1000_mta_set_generic()
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask. */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/* The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 */
+	/* For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB                 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+	default:
+	case 0:
+		break;
+	case 1:
+		bit_shift += 1;
+		break;
+	case 2:
+		bit_shift += 2;
+		break;
+	case 3:
+		bit_shift += 4;
+		break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+	                          (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+	u32 temp;
+
+
+	temp = E1000_READ_REG(hw, E1000_CRCERRS);
+	temp = E1000_READ_REG(hw, E1000_SYMERRS);
+	temp = E1000_READ_REG(hw, E1000_MPC);
+	temp = E1000_READ_REG(hw, E1000_SCC);
+	temp = E1000_READ_REG(hw, E1000_ECOL);
+	temp = E1000_READ_REG(hw, E1000_MCC);
+	temp = E1000_READ_REG(hw, E1000_LATECOL);
+	temp = E1000_READ_REG(hw, E1000_COLC);
+	temp = E1000_READ_REG(hw, E1000_DC);
+	temp = E1000_READ_REG(hw, E1000_SEC);
+	temp = E1000_READ_REG(hw, E1000_RLEC);
+	temp = E1000_READ_REG(hw, E1000_XONRXC);
+	temp = E1000_READ_REG(hw, E1000_XONTXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFRXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFTXC);
+	temp = E1000_READ_REG(hw, E1000_FCRUC);
+	temp = E1000_READ_REG(hw, E1000_GPRC);
+	temp = E1000_READ_REG(hw, E1000_BPRC);
+	temp = E1000_READ_REG(hw, E1000_MPRC);
+	temp = E1000_READ_REG(hw, E1000_GPTC);
+	temp = E1000_READ_REG(hw, E1000_GORCL);
+	temp = E1000_READ_REG(hw, E1000_GORCH);
+	temp = E1000_READ_REG(hw, E1000_GOTCL);
+	temp = E1000_READ_REG(hw, E1000_GOTCH);
+	temp = E1000_READ_REG(hw, E1000_RNBC);
+	temp = E1000_READ_REG(hw, E1000_RUC);
+	temp = E1000_READ_REG(hw, E1000_RFC);
+	temp = E1000_READ_REG(hw, E1000_ROC);
+	temp = E1000_READ_REG(hw, E1000_RJC);
+	temp = E1000_READ_REG(hw, E1000_TORL);
+	temp = E1000_READ_REG(hw, E1000_TORH);
+	temp = E1000_READ_REG(hw, E1000_TOTL);
+	temp = E1000_READ_REG(hw, E1000_TOTH);
+	temp = E1000_READ_REG(hw, E1000_TPR);
+	temp = E1000_READ_REG(hw, E1000_TPT);
+	temp = E1000_READ_REG(hw, E1000_MPTC);
+	temp = E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ *  e1000_check_for_copper_link_generic - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+
+	/* We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = 0;
+
+	/* Check if there was DownShift, must be checked
+	 * immediately after link-up */
+	e1000_check_downshift_generic(hw);
+
+	/* If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000_config_collision_dist_generic(hw);
+
+	/* Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_generic - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	/* In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	ret_val = e1000_set_default_fc_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/* We want to save off the original Flow Control configuration just
+	 * in case we get disconnected and then reconnected into a different
+	 * hub or switch with different Flow Control capabilities.
+	 */
+	mac->original_fc = mac->fc;
+
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/* Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, mac->fc_pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_generic - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  tansmission as well.
+ **/
+static s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+	u32 fcrtl = 0, fcrth = 0;
+
+
+	/* Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (mac->fc & e1000_fc_tx_pause) {
+		/* We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = mac->fc_low_water;
+		if (mac->fc_send_xon)
+			fcrtl |= E1000_FCRTL_XONE;
+
+		fcrth = mac->fc_high_water;
+	}
+	E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+	E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+
+	if (mac->fc != e1000_fc_default)
+		goto out;
+
+	/* Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+	if (ret_val) {
+		goto out;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		mac->fc = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		mac->fc = e1000_fc_tx_pause;
+	else
+		mac->fc = e1000_fc_full;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "mac->fc" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+
+	switch (mac->fc) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_fc_after_link_up_generic - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+
+	/* Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->media_type == e1000_media_type_fiber ||
+		    hw->media_type == e1000_media_type_internal_serdes)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	} else {
+		if (hw->media_type == e1000_media_type_copper)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	}
+
+	if (ret_val) {
+		goto out;
+	}
+
+	/* Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
+		/* Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE))
+			goto out;
+
+		/* The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+					    &mii_nway_adv_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+					    &mii_nway_lp_ability_reg);
+		if (ret_val)
+			goto out;
+
+		/* Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 */
+		/* Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/* Now we need to check if the user selected RX ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF  the TRANSMISSION of PAUSE frames.
+			 */
+			if (mac->original_fc == e1000_fc_full)
+				mac->fc = e1000_fc_full;
+			else
+				mac->fc = e1000_fc_rx_pause;
+		}
+		/* For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		          (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			mac->fc = e1000_fc_tx_pause;
+		}
+		/* For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		         !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			mac->fc = e1000_fc_rx_pause;
+		}
+		/* Per the IEEE spec, at this point flow control should be
+		 * disabled.  However, we want to consider that we could
+		 * be connected to a legacy switch that doesn't advertise
+		 * desired flow control, but can be forced on the link
+		 * partner.  So if we advertised no flow control, that is
+		 * what we will resolve to.  If we advertised some kind of
+		 * receive capability (Rx Pause Only or Full Flow Control)
+		 * and the link partner advertised none, we will configure
+		 * ourselves to enable Rx Flow Control only.  We can do
+		 * this safely for two reasons:  If the link partner really
+		 * didn't want flow control enabled, and we enable Rx, no
+		 * harm done since we won't be receiving any PAUSE frames
+		 * anyway.  If the intent on the link partner was to have
+		 * flow control enabled, then by us enabling RX only, we
+		 * can at least receive pause frames and process them.
+		 * This is a good idea because in most cases, since we are
+		 * predominantly a server NIC, more times than not we will
+		 * be asked to delay transmission of packets than asking
+		 * our link partner to pause transmission of frames.
+		 */
+		else if ((mac->original_fc == e1000_fc_none ||
+		          mac->original_fc == e1000_fc_tx_pause) ||
+		         mac->fc_strict_ieee) {
+			mac->fc = e1000_fc_none;
+		} else {
+			mac->fc = e1000_fc_rx_pause;
+		}
+
+		/* Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			goto out;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			mac->fc = e1000_fc_none;
+
+		/* Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = e1000_force_mac_fc_generic(hw);
+		if (ret_val) {
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_copper_generic - Retreive current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+	u32 status;
+
+
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	if (status & E1000_STATUS_SPEED_1000) {
+		*speed = SPEED_1000;
+	} else if (status & E1000_STATUS_SPEED_100) {
+		*speed = SPEED_100;
+	} else {
+		*speed = SPEED_10;
+	}
+
+	if (status & E1000_STATUS_FD) {
+		*duplex = FULL_DUPLEX;
+	} else {
+		*duplex = HALF_DUPLEX;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_generic(hw);
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_generic - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+
+	swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_get_auto_rd_done_generic - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+	s32 i = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+			break;
+		msleep(1);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_generic - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_id_led_init_generic -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+
+	ret_val = hw->func.valid_led_default(hw, &data);
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (hw->func.setup_led != e1000_setup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if (hw->media_type == e1000_media_type_fiber) {
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		hw->mac.ledctl_default = ledctl;
+		/* Turn off LED0 */
+		ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+		            E1000_LEDCTL_LED0_BLINK |
+		            E1000_LEDCTL_LED0_MODE_MASK);
+		ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+		           E1000_LEDCTL_LED0_MODE_SHIFT);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	} else if (hw->media_type == e1000_media_type_copper) {
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (hw->func.cleanup_led != e1000_cleanup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the led's which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+
+	if (hw->media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/* set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+				                 (i * 8));
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+
+	switch (hw->media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+
+	switch (hw->media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_disable_pcie_master_generic - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 (E1000_SUCCESS) if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	while (timeout) {
+		if (!(E1000_READ_REG(hw, E1000_STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		udelay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+
+	if (!mac->adaptive_ifs) {
+		goto out;
+	}
+
+	if (!mac->ifs_params_forced) {
+		mac->current_ifs_val = 0;
+		mac->ifs_min_val = IFS_MIN;
+		mac->ifs_max_val = IFS_MAX;
+		mac->ifs_step_size = IFS_STEP;
+		mac->ifs_ratio = IFS_RATIO;
+	}
+
+	mac->in_ifs_mode = 0;
+	E1000_WRITE_REG(hw, E1000_AIT, 0);
+out:
+	return;
+}
+
+/**
+ *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+
+	if (!mac->adaptive_ifs) {
+		goto out;
+	}
+
+	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+			mac->in_ifs_mode = 1;
+			if (mac->current_ifs_val < mac->ifs_max_val) {
+				if (!mac->current_ifs_val)
+					mac->current_ifs_val = mac->ifs_min_val;
+				else
+					mac->current_ifs_val +=
+						mac->ifs_step_size;
+				E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
+			}
+		}
+	} else {
+		if (mac->in_ifs_mode &&
+		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+			mac->current_ifs_val = 0;
+			mac->in_ifs_mode = 0;
+			E1000_WRITE_REG(hw, E1000_AIT, 0);
+		}
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+
+	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+		hw->phy.mdix = 1;
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                      u32 offset, u8 data)
+{
+	u32 i, regvalue = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+
+	/* Set up the address and data */
+	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+	E1000_WRITE_REG(hw, reg, regvalue);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		udelay(5);
+		regvalue = E1000_READ_REG(hw, reg);
+		if (regvalue & E1000_GEN_CTL_READY)
+			break;
+	}
+	if (!(regvalue & E1000_GEN_CTL_READY)) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb_ethtool.c	2007-08-07 17:44:45.000000000 -0400
@@ -0,0 +1,1659 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for igb */
+
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "igb.h"
+#include "igb_regtest.h"
+
+struct igb_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
+		      offsetof(struct igb_adapter, m)
+static const struct igb_stats igb_gstrings_stats[] = {
+	{ "rx_packets", IGB_STAT(stats.gprc) },
+	{ "tx_packets", IGB_STAT(stats.gptc) },
+	{ "rx_bytes", IGB_STAT(stats.gorcl) },
+	{ "tx_bytes", IGB_STAT(stats.gotcl) },
+	{ "rx_broadcast", IGB_STAT(stats.bprc) },
+	{ "tx_broadcast", IGB_STAT(stats.bptc) },
+	{ "rx_multicast", IGB_STAT(stats.mprc) },
+	{ "tx_multicast", IGB_STAT(stats.mptc) },
+	{ "rx_errors", IGB_STAT(net_stats.rx_errors) },
+	{ "tx_errors", IGB_STAT(net_stats.tx_errors) },
+	{ "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
+	{ "multicast", IGB_STAT(stats.mprc) },
+	{ "collisions", IGB_STAT(stats.colc) },
+	{ "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
+	{ "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
+	{ "rx_crc_errors", IGB_STAT(stats.crcerrs) },
+	{ "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
+	{ "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
+	{ "rx_missed_errors", IGB_STAT(stats.mpc) },
+	{ "tx_aborted_errors", IGB_STAT(stats.ecol) },
+	{ "tx_carrier_errors", IGB_STAT(stats.tncrs) },
+	{ "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_window_errors", IGB_STAT(stats.latecol) },
+	{ "tx_abort_late_coll", IGB_STAT(stats.latecol) },
+	{ "tx_deferred_ok", IGB_STAT(stats.dc) },
+	{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
+	{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
+	{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", IGB_STAT(restart_queue) },
+	{ "rx_long_length_errors", IGB_STAT(stats.roc) },
+	{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
+	{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
+	{ "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
+	{ "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
+	{ "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
+	{ "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
+	{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
+	{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
+	{ "rx_long_byte_count", IGB_STAT(stats.gorcl) },
+	{ "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
+	{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
+	{ "rx_header_split", IGB_STAT(rx_hdr_split) },
+	{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
+	{ "tx_smbus", IGB_STAT(stats.mgptc) },
+	{ "rx_smbus", IGB_STAT(stats.mgprc) },
+	{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
+};
+
+#define IGB_QUEUE_STATS_LEN \
+	((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \
+	  ((struct igb_adapter *)netdev->priv)->num_rx_queues : 0 ) + \
+	 (((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \
+	  ((struct igb_adapter *)netdev->priv)->num_tx_queues : 0 ))) * \
+	(sizeof(struct igb_queue_stats) / sizeof(u64)))
+#define IGB_GLOBAL_STATS_LEN	\
+	sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
+#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
+static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
+
+static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (hw->media_type == e1000_media_type_copper) {
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_1000baseT_Full|
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_TP);
+		if (hw->phy.type == e1000_phy_ife)
+			ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_TP;
+
+		if (hw->mac.autoneg == 1) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			/* the e1000 autoneg seems to match ethtool nicely */
+			ecmd->advertising |= hw->phy.autoneg_advertised;
+		}
+
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy.addr;
+	} else {
+		ecmd->supported   = (SUPPORTED_1000baseT_Full |
+				     SUPPORTED_FIBRE |
+				     SUPPORTED_Autoneg);
+
+		ecmd->advertising = (ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg);
+
+		ecmd->port = PORT_FIBRE;
+	}
+
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU) {
+
+		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+		                                   &adapter->link_duplex);
+		ecmd->speed = adapter->link_speed;
+
+		/* unfortunately FULL_DUPLEX != DUPLEX_FULL
+		 *          and HALF_DUPLEX != DUPLEX_HALF */
+
+		if (adapter->link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+			 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	return 0;
+}
+
+static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed */
+	if (e1000_check_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot change link characteristics "
+		        "when SoL/IDER is active.\n");
+		return -EINVAL;
+	}
+
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		hw->mac.autoneg = 1;
+		if (hw->media_type == e1000_media_type_fiber)
+			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+			                             ADVERTISED_FIBRE |
+			                             ADVERTISED_Autoneg;
+		else
+			hw->phy.autoneg_advertised = ecmd->advertising |
+			                             ADVERTISED_TP |
+			                             ADVERTISED_Autoneg;
+		ecmd->advertising = hw->phy.autoneg_advertised;
+	} else
+		if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+			clear_bit(__IGB_RESETTING, &adapter->state);
+			return -EINVAL;
+		}
+
+	/* reset the link */
+
+	if (netif_running(adapter->netdev)) {
+		igb_down(adapter);
+		igb_up(adapter);
+	} else
+		igb_reset(adapter);
+
+	clear_bit(__IGB_RESETTING, &adapter->state);
+	return 0;
+}
+
+static void igb_get_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pause)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	pause->autoneg =
+		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	if (hw->mac.fc == e1000_fc_rx_pause)
+		pause->rx_pause = 1;
+	else if (hw->mac.fc == e1000_fc_tx_pause)
+		pause->tx_pause = 1;
+	else if (hw->mac.fc == e1000_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int igb_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	adapter->fc_autoneg = pause->autoneg;
+
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->mac.fc = e1000_fc_full;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->mac.fc = e1000_fc_rx_pause;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->mac.fc = e1000_fc_tx_pause;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->mac.fc = e1000_fc_none;
+
+	hw->mac.original_fc = hw->mac.fc;
+
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		if (netif_running(adapter->netdev)) {
+			igb_down(adapter);
+			igb_up(adapter);
+		} else
+			igb_reset(adapter);
+	} else
+		retval = ((hw->media_type == e1000_media_type_fiber) ?
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+	clear_bit(__IGB_RESETTING, &adapter->state);
+	return retval;
+}
+
+static u32 igb_get_rx_csum(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	return adapter->rx_csum;
+}
+
+static int igb_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	adapter->rx_csum = data;
+
+	return 0;
+}
+
+static u32 igb_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int igb_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int igb_set_tso(struct net_device *netdev, u32 data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	if (data)
+		netdev->features |= NETIF_F_TSO;
+	else
+		netdev->features &= ~NETIF_F_TSO;
+
+	if (data)
+		netdev->features |= NETIF_F_TSO6;
+	else
+		netdev->features &= ~NETIF_F_TSO6;
+
+	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+	return 0;
+}
+
+static u32 igb_get_msglevel(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void igb_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int igb_get_regs_len(struct net_device *netdev)
+{
+#define IGB_REGS_LEN 32
+	return IGB_REGS_LEN * sizeof(u32);
+}
+
+static void igb_get_regs(struct net_device *netdev,
+	                 struct ethtool_regs *regs, void *p)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, IGB_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = E1000_READ_REG(hw, E1000_CTRL);
+	regs_buff[1]  = E1000_READ_REG(hw, E1000_STATUS);
+
+	regs_buff[2]  = E1000_READ_REG(hw, E1000_RCTL);
+	regs_buff[3]  = E1000_READ_REG(hw, E1000_RDLEN);
+	regs_buff[4]  = E1000_READ_REG(hw, E1000_RDH);
+	regs_buff[5]  = E1000_READ_REG(hw, E1000_RDT);
+	regs_buff[6]  = E1000_READ_REG(hw, E1000_RDTR);
+
+	regs_buff[7]  = E1000_READ_REG(hw, E1000_TCTL);
+	regs_buff[8]  = E1000_READ_REG(hw, E1000_TDLEN);
+	regs_buff[9]  = E1000_READ_REG(hw, E1000_TDH);
+	regs_buff[10] = E1000_READ_REG(hw, E1000_TDT);
+	regs_buff[11] = E1000_READ_REG(hw, E1000_TIDV);
+
+	regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	regs_buff[13] = (u32)phy_data; /* cable length */
+	regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+	regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+	regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+	regs_buff[18] = regs_buff[13]; /* cable polarity */
+	regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+	regs_buff[20] = regs_buff[17]; /* polarity correction */
+	/* phy receive errors */
+	regs_buff[22] = adapter->phy_stats.receive_errors;
+	regs_buff[23] = regs_buff[13]; /* mdix mode */
+	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
+	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+}
+
+static int igb_get_eeprom_len(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	return adapter->hw.nvm.word_size * 2;
+}
+
+static int igb_get_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) *
+			(last_word - first_word + 1), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	if (hw->nvm.type == e1000_nvm_eeprom_spi)
+		ret_val = e1000_read_nvm(hw, first_word,
+		                         last_word - first_word + 1,
+		                         eeprom_buff);
+	else {
+		for (i = 0; i < last_word - first_word + 1; i++)
+			if ((ret_val = e1000_read_nvm(hw, first_word + i, 1,
+			                              &eeprom_buff[i])))
+				break;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+			eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int igb_set_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	void *ptr;
+	int max_len, first_word, last_word, ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = hw->nvm.word_size * 2;
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (void *)eeprom_buff;
+
+	if (eeprom->offset & 1) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, first_word, 1,
+					    &eeprom_buff[0]);
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, last_word, 1,
+		                  &eeprom_buff[last_word - first_word]);
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_word - first_word + 1; i++)
+		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+	ret_val = e1000_write_nvm(hw, first_word,
+	                          last_word - first_word + 1, eeprom_buff);
+
+	/* Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 controllers */
+	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
+		e1000_update_nvm_checksum(hw);
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void igb_get_drvinfo(struct net_device *netdev,
+                            struct ethtool_drvinfo *drvinfo)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	char firmware_version[32];
+	u16 eeprom_data;
+
+	strncpy(drvinfo->driver,  igb_driver_name, 32);
+	strncpy(drvinfo->version, igb_driver_version, 32);
+
+	/* EEPROM image version # is reported as firmware version # for
+	 * 82575 controllers */
+	e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
+	sprintf(firmware_version, "%d.%d-%d",
+		(eeprom_data & 0xF000) >> 12,
+		(eeprom_data & 0x0FF0) >> 4,
+		eeprom_data & 0x000F);
+
+	strncpy(drvinfo->fw_version, firmware_version, 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = IGB_STATS_LEN;
+	drvinfo->testinfo_len = IGB_TEST_LEN;
+	drvinfo->regdump_len = igb_get_regs_len(netdev);
+	drvinfo->eedump_len = igb_get_eeprom_len(netdev);
+}
+
+static void igb_get_ringparam(struct net_device *netdev,
+                              struct ethtool_ringparam *ring)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_ring *tx_ring = adapter->tx_ring;
+	struct igb_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = IGB_MAX_RXD;
+	ring->tx_max_pending = IGB_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int igb_set_ringparam(struct net_device *netdev,
+                             struct ethtool_ringparam *ring)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_buffer *old_buf;
+	struct igb_buffer *old_rx_buf;
+	void *old_desc;
+	int i, err;
+	u32 new_rx_count, new_tx_count, old_size;
+	dma_addr_t old_dma;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
+	new_rx_count = min(new_rx_count, (u32) IGB_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
+	new_tx_count = min(new_tx_count, (u32) IGB_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring->count) &&
+	    (new_rx_count == adapter->rx_ring->count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (netif_running(adapter->netdev))
+		igb_down(adapter);
+
+	/*
+	 * We can't just free everything and then setup again,
+	 * because the ISRs in MSI-X mode get passed pointers
+	 * to the tx and rx ring structs.
+	 */
+	if (new_tx_count != adapter->tx_ring->count) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			/* Save existing descriptor ring */
+			old_buf = adapter->tx_ring[i].buffer_info;
+			old_desc = adapter->tx_ring[i].desc;
+			old_size = adapter->tx_ring[i].size;
+			old_dma = adapter->tx_ring[i].dma;
+			/* Try to allocate a new one */
+			adapter->tx_ring[i].buffer_info = NULL;
+			adapter->tx_ring[i].desc = NULL;
+			adapter->tx_ring[i].count = new_tx_count;
+			err = igb_setup_tx_resources(adapter,
+						&adapter->tx_ring[i]);
+			if (err) {
+				/* Restore the old one so at least
+				   the adapter still works, even if
+				   we failed the request */
+				adapter->tx_ring[i].buffer_info = old_buf;
+				adapter->tx_ring[i].desc = old_desc;
+				adapter->tx_ring[i].size = old_size;
+				adapter->tx_ring[i].dma = old_dma;
+				goto err_setup;
+			}
+			/* Free the old buffer manually */
+			vfree(old_buf);
+			pci_free_consistent(adapter->pdev, old_size,
+			                    old_desc, old_dma);
+		}
+	}
+
+	if (new_rx_count != adapter->rx_ring->count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+
+			old_rx_buf = adapter->rx_ring[i].buffer_info;
+			old_desc = adapter->rx_ring[i].desc;
+			old_size = adapter->rx_ring[i].size;
+			old_dma = adapter->rx_ring[i].dma;
+
+			adapter->rx_ring[i].buffer_info = NULL;
+			adapter->rx_ring[i].desc = NULL;
+			adapter->rx_ring[i].dma = 0;
+			adapter->rx_ring[i].count = new_rx_count;
+			err = igb_setup_rx_resources(adapter,
+			                             &adapter->rx_ring[i]);
+			if (err) {
+				adapter->rx_ring[i].buffer_info = old_rx_buf;
+				adapter->rx_ring[i].desc = old_desc;
+				adapter->rx_ring[i].size = old_size;
+				adapter->rx_ring[i].dma = old_dma;
+				goto err_setup;
+			}
+
+			vfree(old_rx_buf);
+			pci_free_consistent(adapter->pdev, old_size, old_desc,
+			                    old_dma);
+		}
+	}
+
+	err = 0;
+err_setup:
+	if (netif_running(adapter->netdev))
+		igb_up(adapter);
+
+	clear_bit(__IGB_RESETTING, &adapter->state);
+	return err;
+}
+
+#define REG_PATTERN_TEST(R, M, W)                                              \
+{                                                                              \
+	u32 _pat, _value;                                                      \
+	u32 _test[] =                                                          \
+		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};              \
+	for (_pat = 0; _pat < ARRAY_SIZE(_test); _pat++) {                     \
+		writel((_test[_pat] & W), (adapter->hw.hw_addr + R));          \
+		_value = readl(adapter->hw.hw_addr + R);                       \
+		if (_value != (_test[_pat] & W & M)) {                         \
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
+			        "0x%08X expected 0x%08X\n",                    \
+			        R, _value, (_test[_pat] & W & M));             \
+			*data = R;                                             \
+			return 1;                                              \
+		}                                                              \
+	}                                                                      \
+}
+
+#define REG_SET_AND_CHECK(R, M, W)                                             \
+{                                                                              \
+	u32 _value;                                                             \
+	writel((W & M), (adapter->hw.hw_addr + R));                            \
+	_value = readl(adapter->hw.hw_addr + R);                               \
+	if ((W & M) != (_value & M)) {                                         \
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
+		        "expected 0x%08X\n", R, (_value & M), (W & M));        \
+		*data = R;                                                     \
+		return 1;                                                      \
+	}                                                                      \
+}
+
+static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
+{
+	struct igb_reg_test *test;
+	u32 value, before, after;
+	u32 i, toggle;
+
+	toggle = 0x7FFFF3FF;
+	test = reg_test_82575;
+
+	/* Because the status register is such a special case,
+	 * we handle it separately from the rest of the register
+	 * tests.  Some bits are read-only, some toggle, and some
+	 * are writable on newer MACs.
+	 */
+	before = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+	value = (E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle);
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, toggle);
+	after = E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, before);
+
+	/* Perform the remainder of the register test, looping through
+	 * the test table until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 0x100),
+						test->mask,
+						test->write);
+				break;
+			case SET_READ_TEST:
+				REG_SET_AND_CHECK(test->reg + (i * 0x100),
+						test->mask,
+						test->write);
+				break;
+			case WRITE_NO_TEST:
+				writel(test->write,
+				    (adapter->hw.hw_addr + test->reg)
+				        + (i * 0x100));
+				break;
+			case TABLE32_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 4),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_LO:
+				REG_PATTERN_TEST(test->reg + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_HI:
+				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			}
+		}
+		test++;
+	}
+
+	*data = 0;
+	return 0;
+}
+
+static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
+{
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
+
+	*data = 0;
+	/* Read and add up the contents of the EEPROM */
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+			*data = 1;
+			break;
+		}
+		checksum += temp;
+	}
+
+	/* If Checksum is not Correct return error else test passed */
+	if ((checksum != (u16) NVM_SUM) && !(*data))
+		*data = 2;
+
+	return *data;
+}
+
+static irqreturn_t igb_test_intr(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = (struct net_device *) data;
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+	return IRQ_HANDLED;
+}
+
+static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i = 0, shared_int = 1;
+	u32 irq = adapter->pdev->irq;
+
+	*data = 0;
+
+	/* Hook up test interrupt handler just for this test */
+	if (adapter->msix_entries) {
+		/* NOTE: we don't test MSI-X interrupts here, yet */
+		return 0;
+	} else if (adapter->flags & FLAG_IGB_HAS_MSI) {
+		shared_int = 0;
+		if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
+			*data = 1;
+			return -1;
+		}
+	} else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
+	                        netdev->name, netdev)) {
+		shared_int = 0;
+	} else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
+	                       netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
+			                ~mask & 0x00007FFF);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/* Enable the interrupt to be reported in
+		 * the cause register and then force the same
+		 * interrupt and see if one gets posted.  If
+		 * an interrupt was not posted to the bus, the
+		 * test failed.
+		 */
+		adapter->test_icr = 0;
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask);
+		E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr & mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/* Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
+			                ~mask & 0x00007FFF);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void igb_free_desc_rings(struct igb_adapter *adapter)
+{
+	struct igb_ring *tx_ring = &adapter->test_tx_ring;
+	struct igb_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i;
+
+	if (tx_ring->desc && tx_ring->buffer_info) {
+		for (i = 0; i < tx_ring->count; i++) {
+			struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
+			if (buf->dma)
+				pci_unmap_single(pdev, buf->dma, buf->length,
+				                 PCI_DMA_TODEVICE);
+			if (buf->skb)
+				dev_kfree_skb(buf->skb);
+		}
+	}
+
+	if (rx_ring->desc && rx_ring->buffer_info) {
+		for (i = 0; i < rx_ring->count; i++) {
+			struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
+			if (buf->dma)
+				pci_unmap_single(pdev, buf->dma,
+				                 IGB_RXBUFFER_2048,
+				                 PCI_DMA_FROMDEVICE);
+			if (buf->skb)
+				dev_kfree_skb(buf->skb);
+		}
+	}
+
+	if (tx_ring->desc) {
+		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
+		                    tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+	if (rx_ring->desc) {
+		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
+		                    rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+
+	kfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+	kfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	return;
+}
+
+static int igb_setup_desc_rings(struct igb_adapter *adapter)
+{
+	struct igb_ring *tx_ring = &adapter->test_tx_ring;
+	struct igb_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl;
+	int size, i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!tx_ring->count)
+		tx_ring->count = IGB_DEFAULT_TXD;
+
+	size = tx_ring->count * sizeof(struct igb_buffer);
+	tx_ring->buffer_info = kzalloc(size, GFP_KERNEL);
+	if (!tx_ring->buffer_info) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	                                     &tx_ring->dma);
+	if (!tx_ring->desc) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL,
+			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH, ((u64) tx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN,
+			tx_ring->count * sizeof(struct e1000_tx_desc));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDH, 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TDT, 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+			E1000_TCTL_PSP | E1000_TCTL_EN |
+			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+	for (i = 0; i < tx_ring->count; i++) {
+		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+		struct sk_buff *skb;
+		unsigned int skb_size = 1024;
+
+		skb = alloc_skb(skb_size, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, skb_size);
+		tx_ring->buffer_info[i].skb = skb;
+		tx_ring->buffer_info[i].length = skb->len;
+		tx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+		tx_desc->lower.data = cpu_to_le32(skb->len);
+		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+						   E1000_TXD_CMD_IFCS |
+						   E1000_TXD_CMD_RS);
+		tx_desc->upper.data = 0;
+	}
+
+	/* Setup Rx descriptor ring and Rx buffers */
+
+	if (!rx_ring->count)
+		rx_ring->count = IGB_DEFAULT_RXD;
+
+	size = rx_ring->count * sizeof(struct igb_buffer);
+	rx_ring->buffer_info = kzalloc(size, GFP_KERNEL);
+	if (!rx_ring->buffer_info) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+	                                     &rx_ring->dma);
+	if (!rx_ring->desc) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL,
+			((u64) rx_ring->dma & 0xFFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH, ((u64) rx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN, rx_ring->size);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDH, 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT, 0);
+	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	E1000_WRITE_REG(&adapter->hw, E1000_SRRCTL0, 0);
+
+	for (i = 0; i < rx_ring->count; i++) {
+		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
+		struct sk_buff *skb;
+
+		skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rx_ring->buffer_info[i].skb = skb;
+		rx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048,
+				       PCI_DMA_FROMDEVICE);
+		rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	igb_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static void igb_phy_disable_receiver(struct igb_adapter *adapter)
+{
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
+}
+
+static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
+{
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
+
+	adapter->hw.mac.autoneg = 0;
+
+	if (adapter->hw.phy.type == e1000_phy_m88) {
+		/* Auto-MDI/MDIX Off */
+		e1000_write_phy_reg(&adapter->hw,
+				    M88E1000_PHY_SPEC_CTRL, 0x0808);
+		/* reset to update Auto-MDI/MDIX */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x9140);
+		/* autoneg off */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8140);
+	} else if (adapter->hw.phy.type == e1000_phy_gg82563)
+		e1000_write_phy_reg(&adapter->hw,
+		                    GG82563_PHY_KMRN_MODE_CTRL,
+		                    0x1CC);
+
+	ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+
+	if (adapter->hw.phy.type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x6100);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	} else {
+		/* force 1000, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x4140);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	}
+
+	if (adapter->hw.media_type == e1000_media_type_copper &&
+	    adapter->hw.phy.type == e1000_phy_m88)
+		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+	else {
+		/* Set the ILOS bit on the fiber Nic if half duplex link is
+		 * detected. */
+		stat_reg = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+		if ((stat_reg & E1000_STATUS_FD) == 0)
+			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg);
+
+	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+	 */
+	if (adapter->hw.phy.type == e1000_phy_m88)
+		igb_phy_disable_receiver(adapter);
+
+	udelay(500);
+
+	return 0;
+}
+
+static int igb_set_phy_loopback(struct igb_adapter *adapter)
+{
+	return igb_integrated_phy_loopback(adapter);
+}
+
+static int igb_setup_loopback_test(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (hw->media_type == e1000_media_type_fiber ||
+	    hw->media_type == e1000_media_type_internal_serdes) {
+		rctl = E1000_READ_REG(hw, E1000_RCTL);
+		rctl |= E1000_RCTL_LBM_TCVR;
+		E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+		return 0;
+	} else if (hw->media_type == e1000_media_type_copper) {
+		return igb_set_phy_loopback(adapter);
+	}
+
+	return 7;
+}
+
+static void igb_loopback_cleanup(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+	u16 phy_reg;
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+	hw->mac.autoneg = 1;
+	if (hw->phy.type == e1000_phy_gg82563)
+		e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
+	e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+	if (phy_reg & MII_CR_LOOPBACK) {
+		phy_reg &= ~MII_CR_LOOPBACK;
+		e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+		e1000_phy_commit(hw);
+	}
+}
+
+static void igb_create_lbtest_frame(struct sk_buff *skb,
+                                    unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int igb_run_loopback_test(struct igb_adapter *adapter)
+{
+	struct igb_ring *tx_ring = &adapter->test_tx_ring;
+	struct igb_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val = 0;
+	unsigned long time;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT, rx_ring->count - 1);
+
+	/* Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rx_ring->count <= tx_ring->count)
+		lc = ((tx_ring->count / 64) * 2) + 1;
+	else
+		lc = ((rx_ring->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+			                        1024);
+			pci_dma_sync_single_for_device(pdev,
+				tx_ring->buffer_info[k].dma,
+				tx_ring->buffer_info[k].length,
+				PCI_DMA_TODEVICE);
+			if (unlikely(++k == tx_ring->count)) k = 0;
+		}
+		E1000_WRITE_REG(&adapter->hw, E1000_TDT, k);
+		msleep(200);
+		time = jiffies; /* set the start time for the receive */
+		good_cnt = 0;
+		do { /* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+			                rx_ring->buffer_info[l].dma,
+			                IGB_RXBUFFER_2048,
+			                PCI_DMA_FROMDEVICE);
+
+			ret_val = igb_check_lbtest_frame(
+			                     rx_ring->buffer_info[l].skb, 1024);
+			if (!ret_val)
+				good_cnt++;
+			if (unlikely(++l == rx_ring->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			ret_val = 13; /* ret_val is the same as mis-compare */
+			break;
+		}
+		if (jiffies >= (time + 20)) {
+			ret_val = 14; /* error code for time out error */
+			break;
+		}
+	} /* end loop count loop */
+	return ret_val;
+}
+
+static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
+{
+	/* PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active */
+	if (e1000_check_reset_block(&adapter->hw)) {
+		DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+		        "when SoL/IDER is active.\n");
+		*data = 0;
+		goto out;
+	}
+	*data = igb_setup_desc_rings(adapter);
+	if (!*data)
+		goto out;
+	*data = igb_setup_loopback_test(adapter);
+	if (!*data)
+		goto err_loopback;
+	*data = igb_run_loopback_test(adapter);
+	igb_loopback_cleanup(adapter);
+
+err_loopback:
+	igb_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static int igb_link_test(struct igb_adapter *adapter, u64 *data)
+{
+	*data = 0;
+	if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
+		int i = 0;
+		adapter->hw.mac.serdes_has_link = 0;
+
+		/* On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes */
+		do {
+			e1000_check_for_link(&adapter->hw);
+			if (adapter->hw.mac.serdes_has_link == 1)
+				return *data;
+			msleep(20);
+		} while (i++ < 3750);
+
+		*data = 1;
+	} else {
+		e1000_check_for_link(&adapter->hw);
+		if (adapter->hw.mac.autoneg)
+			msleep(4000);
+
+		if (!(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		      E1000_STATUS_LU))
+			*data = 1;
+	}
+	return *data;
+}
+
+static int igb_diag_test_count(struct net_device *netdev)
+{
+	return IGB_TEST_LEN;
+}
+
+static void igb_diag_test(struct net_device *netdev,
+                          struct ethtool_test *eth_test, u64 *data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u16 autoneg_advertised;
+	u8 forced_speed_duplex, autoneg;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__IGB_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		/* save speed, duplex, autoneg settings */
+		autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+		forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+		autoneg = adapter->hw.mac.autoneg;
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (igb_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			igb_reset(adapter);
+
+		if (igb_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		igb_reset(adapter);
+		if (igb_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		igb_reset(adapter);
+		if (igb_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		igb_reset(adapter);
+		if (igb_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* restore speed, duplex, autoneg settings */
+		adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+		adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+		adapter->hw.mac.autoneg = autoneg;
+
+		/* force this routine to wait until autoneg complete/timeout */
+		adapter->hw.phy.wait_for_link = 1;
+		igb_reset(adapter);
+		adapter->hw.phy.wait_for_link = 0;
+
+		clear_bit(__IGB_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (igb_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__IGB_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int igb_wol_exclusion(struct igb_adapter *adapter,
+                             struct ethtool_wolinfo *wol)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 1; /* fail by default */
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82575EB_COPPER:
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+		/* Wake events not supported on port B */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		/* WoL not supported */
+		wol->supported = 0;
+		break;
+	default:
+		/* dual port cards only support WoL on port A from now on
+		 * unless it was enabled in the eeprom for port B
+		 * so exclude FUNC_1 ports from having WoL enabled */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+		    !adapter->eeprom_wol) {
+			wol->supported = 0;
+			break;
+		}
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST |
+	                 WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	/* this function will set ->supported = 0 and return 1 if wol is not
+	 * supported by this hardware */
+	if (igb_wol_exclusion(adapter, wol))
+		return;
+
+	/* apply any specific unsupported masks here */
+	switch (adapter->hw.device_id) {
+	default:
+		break;
+	}
+
+	if (adapter->wol & E1000_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & E1000_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & E1000_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & E1000_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (igb_wol_exclusion(adapter, wol))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	switch (hw->device_id) {
+	default:
+		break;
+	}
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= E1000_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= E1000_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= E1000_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define IGB_ID_INTERVAL		(HZ/4)
+
+/* bit defines for adapter->led_status */
+#define IGB_LED_ON		0
+
+static void igb_led_blink_callback(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
+
+	if (test_and_change_bit(IGB_LED_ON, &adapter->led_status))
+		e1000_led_off(&adapter->hw);
+	else
+		e1000_led_on(&adapter->hw);
+
+	mod_timer(&adapter->blink_timer, jiffies + IGB_ID_INTERVAL);
+}
+
+static int igb_phys_id(struct net_device *netdev, u32 data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
+		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+
+	if (hw->phy.type == e1000_phy_ife) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = igb_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long) adapter;
+		}
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+		e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+	} else {
+		e1000_blink_led(hw);
+		msleep_interruptible(data * 1000);
+	}
+
+	e1000_led_off(hw);
+	clear_bit(IGB_LED_ON, &adapter->led_status);
+	e1000_cleanup_led(hw);
+
+	return 0;
+}
+
+static int igb_nway_reset(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		igb_reinit_locked(adapter);
+	return 0;
+}
+
+static int igb_get_stats_count(struct net_device *netdev)
+{
+	return IGB_STATS_LEN;
+}
+
+static void igb_get_ethtool_stats(struct net_device *netdev,
+                                  struct ethtool_stats *stats, u64 *data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+#if defined(CONFIG_IGB_MQ) || defined (CONFIG_IGB_MQ_RX)
+	u64 *queue_stat;
+	int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
+	int j;
+#endif
+	int i;
+
+	igb_update_stats(adapter);
+	for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
+		data[i] = (igb_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+}
+
+static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *igb_gstrings_test,
+			IGB_TEST_LEN*ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, igb_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			sprintf(p, "tx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			sprintf(p, "rx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+		}
+/*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static struct ethtool_ops igb_ethtool_ops = {
+	.get_settings           = igb_get_settings,
+	.set_settings           = igb_set_settings,
+	.get_drvinfo            = igb_get_drvinfo,
+	.get_regs_len           = igb_get_regs_len,
+	.get_regs               = igb_get_regs,
+	.get_wol                = igb_get_wol,
+	.set_wol                = igb_set_wol,
+	.get_msglevel           = igb_get_msglevel,
+	.set_msglevel           = igb_set_msglevel,
+	.nway_reset             = igb_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = igb_get_eeprom_len,
+	.get_eeprom             = igb_get_eeprom,
+	.set_eeprom             = igb_set_eeprom,
+	.get_ringparam          = igb_get_ringparam,
+	.set_ringparam          = igb_set_ringparam,
+	.get_pauseparam         = igb_get_pauseparam,
+	.set_pauseparam         = igb_set_pauseparam,
+	.get_rx_csum            = igb_get_rx_csum,
+	.set_rx_csum            = igb_set_rx_csum,
+	.get_tx_csum            = igb_get_tx_csum,
+	.set_tx_csum            = igb_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = igb_set_tso,
+	.self_test_count        = igb_diag_test_count,
+	.self_test              = igb_diag_test,
+	.get_strings            = igb_get_strings,
+	.phys_id                = igb_phys_id,
+	.get_stats_count        = igb_get_stats_count,
+	.get_ethtool_stats      = igb_get_ethtool_stats,
+	.get_perm_addr          = ethtool_op_get_perm_addr,
+};
+
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+}
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_regs.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,467 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR0    0x01680  /* Ext. Int. Throttling Rate Vector 0 - RW */
+#define E1000_EITR1    0x01684  /* Ext. Int. Throttling Rate Vector 1 - RW */
+#define E1000_EITR2    0x01688  /* Ext. Int. Throttling Rate Vector 2 - RW */
+#define E1000_EITR3    0x0168C  /* Ext. Int. Throttling Rate Vector 3 - RW */
+#define E1000_EITR4    0x01690  /* Ext. Int. Throttling Rate Vector 4 - RW */
+#define E1000_EITR5    0x01694  /* Ext. Int. Throttling Rate Vector 5 - RW */
+#define E1000_EITR6    0x01698  /* Ext. Int. Throttling Rate Vector 6 - RW */
+#define E1000_EITR7    0x0169C  /* Ext. Int. Throttling Rate Vector 7 - RW */
+#define E1000_EITR8    0x016A0  /* Ext. Int. Throttling Rate Vector 8 - RW */
+#define E1000_EITR9    0x016A4  /* Ext. Int. Throttling Rate Vector 9 - RW */
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
+#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDFPCQ0  0x02430
+#define E1000_RDFPCQ1  0x02434
+#define E1000_RDFPCQ2  0x02438
+#define E1000_RDFPCQ3  0x0243C
+#define E1000_PBRTH    0x02458  /* PB RX Arbitration Threshold - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+#define E1000_SRRCTL0  0x0280C
+#define E1000_SRRCTL(_n) (0x280C + (_n << 8)) /* Split and Replication
+                                               * RX Control - RW */
+#define E1000_RDPUMB   0x025CC  /* DMA RX Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD   0x025D0  /* DMA RX Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD   0x025D4  /* DMA RX Descriptor uC Data Write - RW */
+#define E1000_RDPURD   0x025D8  /* DMA RX Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL  0x025DC  /* DMA RX Descriptor uC Control - RW */
+#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ *
+ */
+#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
+#define E1000_RDBAH_REG(_n)   (E1000_RDBAH + (_n << 8))
+#define E1000_RDLEN_REG(_n)   (E1000_RDLEN + (_n << 8))
+#define E1000_RDH_REG(_n)     (E1000_RDH + (_n << 8))
+#define E1000_RDT_REG(_n)     (E1000_RDT + (_n << 8))
+#define E1000_RXDCTL_REG(_n)  (E1000_RXDCTL + (_n << 8))
+#define E1000_TDBAL_REG(_n)   (E1000_TDBAL + (_n << 8))
+#define E1000_TDBAH_REG(_n)   (E1000_TDBAH + (_n << 8))
+#define E1000_TDLEN_REG(_n)   (E1000_TDLEN + (_n << 8))
+#define E1000_TDH_REG(_n)     (E1000_TDH + (_n << 8))
+#define E1000_TDT_REG(_n)     (E1000_TDT + (_n << 8))
+#define E1000_TXDCTL_REG(_n)  (E1000_TXDCTL + (_n << 8))
+#define E1000_TARC_REG(_n)    (E1000_TARC0 + (_n << 8))
+#define E1000_RDBAL2         0x02A00 /* RX Descriptor Base Low Queue 2 - RW */
+#define E1000_RDBAH2         0x02A04 /* RX Descriptor Base High Queue 2 - RW */
+#define E1000_RDLEN2         0x02A08 /* RX Descriptor Length Queue 2 - RW */
+#define E1000_RDH2           0x02A10 /* RX Descriptor Head Queue 2 - RW */
+#define E1000_RDT2           0x02A18 /* RX Descriptor Tail Queue 2 - RW */
+#define E1000_RXDCTL2        0x02A28 /* RX Descriptor Control queue 2 - RW */
+#define E1000_RDBAL3         0x02B00 /* RX Descriptor Base Low Queue 3 - RW */
+#define E1000_RDBAH3         0x02B04 /* RX Descriptor Base High Queue 3 - RW */
+#define E1000_RDLEN3         0x02B08 /* RX Descriptor Length Queue 3 - RW */
+#define E1000_RDH3           0x02B10 /* RX Descriptor Head Queue 3 - RW */
+#define E1000_RDT3           0x02B18 /* RX Descriptor Tail Queue 3 - RW */
+#define E1000_RXDCTL3        0x02B28 /* RX Descriptor Control Queue 3 - RW */
+#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_TDPUMB   0x0357C  /* DMA TX Descriptor uC Mail Box - RW */
+#define E1000_TDPUAD   0x03580  /* DMA TX Descriptor uC Addr Command - RW */
+#define E1000_TDPUWD   0x03584  /* DMA TX Descriptor uC Data Write - RW */
+#define E1000_TDPURD   0x03588  /* DMA TX Descriptor uC Data  Read  - RW */
+#define E1000_TDPUCTL  0x0358C  /* DMA TX Descriptor uC Control - RW */
+#define E1000_DTXCTL   0x03590  /* DMA TX Control - RW */
+#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818  /* TX Descriptor Tail - RW */
+#define E1000_TDBAL0   E1000_TDBAL /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH0   E1000_TDBAH /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN0   E1000_TDLEN /* TX Descriptor Length - RW */
+#define E1000_TDH0     E1000_TDH   /* TX Descriptor Head - RW */
+#define E1000_TDT0     E1000_TDT   /* TX Descriptor Tail - RW */
+#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDWBAL0         0x03838 /* TX Desc. WB Addr Low Queue 0 - RW */
+#define E1000_TDWBAH0         0x0383C /* TX Desc. WB Addr High Queue 0 - RW */
+#define E1000_TDWBAL_REG(_n) (E1000_TDWBAL0 + (_n << 8))
+#define E1000_TDWBAH_REG(_n) (E1000_TDWBAH0 + (_n << 8))
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
+#define E1000_TDWBAL1     0x03938  /* TX Descriptor WB Addr Low Queue 1 - RW */
+#define E1000_TDWBAH1     0x0393C  /* TX Descriptor WB Addr High Queue 1 - RW */
+#define E1000_TDBAL2      0x03A00  /* TX Descriptor Base Low Queue 2 - RW */
+#define E1000_TDBAH2      0x03A04  /* TX Descriptor Base High Queue 2 - RW */
+#define E1000_TDLEN2      0x03A08  /* TX Descriptor Length Queue 2 - RW */
+#define E1000_TDH2        0x03A10  /* TX Descriptor Head Queue 2 - RW */
+#define E1000_TDT2        0x03A18  /* TX Descriptor Tail Queue 2 - RW */
+#define E1000_TXDCTL2     0x03A28  /* TX Descriptor Control 2 - RW */
+#define E1000_TDWBAL2     0x03A38  /* TX Descriptor WB Addr Low Queue 2 - RW */
+#define E1000_TDWBAH2     0x03A3C  /* TX Descriptor WB Addr High Queue 2 - RW */
+#define E1000_TDBAL3      0x03B00  /* TX Descriptor Base Low Queue 3 - RW */
+#define E1000_TDBAH3      0x03B04  /* TX Descriptor Base High Queue 3 - RW */
+#define E1000_TDLEN3      0x03B08  /* TX Descriptor Length Queue 3 - RW */
+#define E1000_TDH3        0x03B10  /* TX Descriptor Head Queue 3 - RW */
+#define E1000_TDT3        0x03B18  /* TX Descriptor Tail Queue 3 - RW */
+#define E1000_TXDCTL3     0x03B28  /* TX Descriptor Control 3 - RW */
+#define E1000_TDWBAL3     0x03B38  /* TX Descriptor WB Addr Low Queue 3 - RW */
+#define E1000_TDWBAH3     0x03B3C  /* TX Descriptor WB Addr High Queue 3 - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker TX Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC      0x04044  /* Circuit Breaker RX Dropped Count */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker RX Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets TX Count */
+#define E1000_HTCBDPC     0x04124  /* Host TX Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* RX Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_PSRTYPE  0x05480  /* Packet Split Receive Type - RW */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VMD_CTL  0x0581C  /* VMDq Control - RW */
+#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA      0x0003C /* PHY address - RW */
+#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
+#define E1000_MSIXBM0   0x01600 /* MSI-X Allocation Register 0 - RW */
+#define E1000_MSIXBM1   0x01604 /* MSI-X Allocation Register 1 - RW */
+#define E1000_MSIXBM2   0x01608 /* MSI-X Allocation Register 2 - RW */
+#define E1000_MSIXBM3   0x0160C /* MSI-X Allocation Register 3 - RW */
+#define E1000_MSIXBM4   0x01610 /* MSI-X Allocation Register 4 - RW */
+#define E1000_MSIXBM5   0x01614 /* MSI-X Allocation Register 5 - RW */
+#define E1000_MSIXBM6   0x01618 /* MSI-X Allocation Register 6 - RW */
+#define E1000_MSIXBM7   0x0161C /* MSI-X Allocation Register 7 - RW */
+#define E1000_MSIXBM8   0x01620 /* MSI-X Allocation Register 8 - RW */
+#define E1000_MSIXBM9   0x01624 /* MSI-X Allocation Register 9 - RW */
+#define E1000_MSIXTADD0 0x0C000 /* MSI-X Table entry addr low reg 0 - RW */
+#define E1000_MSIXTADD1 0x0C010 /* MSI-X Table entry addr low reg 1 - RW */
+#define E1000_MSIXTADD2 0x0C020 /* MSI-X Table entry addr low reg 2 - RW */
+#define E1000_MSIXTADD3 0x0C030 /* MSI-X Table entry addr low reg 3 - RW */
+#define E1000_MSIXTADD4 0x0C040 /* MSI-X Table entry addr low reg 4 - RW */
+#define E1000_MSIXTADD5 0x0C050 /* MSI-X Table entry addr low reg 5 - RW */
+#define E1000_MSIXTADD6 0x0C060 /* MSI-X Table entry addr low reg 6 - RW */
+#define E1000_MSIXTADD7 0x0C070 /* MSI-X Table entry addr low reg 7 - RW */
+#define E1000_MSIXTADD8 0x0C080 /* MSI-X Table entry addr low reg 8 - RW */
+#define E1000_MSIXTADD9 0x0C090 /* MSI-X Table entry addr low reg 9 - RW */
+#define E1000_MSIXTUADD0 0x0C004 /* MSI-X Table entry addr upper reg 0 - RW */
+#define E1000_MSIXTUADD1 0x0C014 /* MSI-X Table entry addr upper reg 1 - RW */
+#define E1000_MSIXTUADD2 0x0C024 /* MSI-X Table entry addr upper reg 2 - RW */
+#define E1000_MSIXTUADD3 0x0C034 /* MSI-X Table entry addr upper reg 3 - RW */
+#define E1000_MSIXTUADD4 0x0C044 /* MSI-X Table entry addr upper reg 4 - RW */
+#define E1000_MSIXTUADD5 0x0C054 /* MSI-X Table entry addr upper reg 5 - RW */
+#define E1000_MSIXTUADD6 0x0C064 /* MSI-X Table entry addr upper reg 6 - RW */
+#define E1000_MSIXTUADD7 0x0C074 /* MSI-X Table entry addr upper reg 7 - RW */
+#define E1000_MSIXTUADD8 0x0C084 /* MSI-X Table entry addr upper reg 8 - RW */
+#define E1000_MSIXTUADD9 0x0C094 /* MSI-X Table entry addr upper reg 9 - RW */
+#define E1000_MSIXTMSG0 0x0C008 /* MSI-X Table entry message reg 0 - RW */
+#define E1000_MSIXTMSG1 0x0C018 /* MSI-X Table entry message reg 1 - RW */
+#define E1000_MSIXTMSG2 0x0C028 /* MSI-X Table entry message reg 2 - RW */
+#define E1000_MSIXTMSG3 0x0C038 /* MSI-X Table entry message reg 3 - RW */
+#define E1000_MSIXTMSG4 0x0C048 /* MSI-X Table entry message reg 4 - RW */
+#define E1000_MSIXTMSG5 0x0C058 /* MSI-X Table entry message reg 5 - RW */
+#define E1000_MSIXTMSG6 0x0C068 /* MSI-X Table entry message reg 6 - RW */
+#define E1000_MSIXTMSG7 0x0C078 /* MSI-X Table entry message reg 7 - RW */
+#define E1000_MSIXTMSG8 0x0C088 /* MSI-X Table entry message reg 8 - RW */
+#define E1000_MSIXTMSG9 0x0C098 /* MSI-X Table entry message reg 9 - RW */
+#define E1000_MSIXVCTRL0 0x0C00C /* MSI-X Table entry vector ctrl reg 0 - RW */
+#define E1000_MSIXVCTRL1 0x0C01C /* MSI-X Table entry vector ctrl reg 1 - RW */
+#define E1000_MSIXVCTRL2 0x0C02C /* MSI-X Table entry vector ctrl reg 2 - RW */
+#define E1000_MSIXVCTRL3 0x0C03C /* MSI-X Table entry vector ctrl reg 3 - RW */
+#define E1000_MSIXVCTRL4 0x0C04C /* MSI-X Table entry vector ctrl reg 4 - RW */
+#define E1000_MSIXVCTRL5 0x0C05C /* MSI-X Table entry vector ctrl reg 5 - RW */
+#define E1000_MSIXVCTRL6 0x0C06C /* MSI-X Table entry vector ctrl reg 6 - RW */
+#define E1000_MSIXVCTRL7 0x0C07C /* MSI-X Table entry vector ctrl reg 7 - RW */
+#define E1000_MSIXVCTRL8 0x0C08C /* MSI-X Table entry vector ctrl reg 8 - RW */
+#define E1000_MSIXVCTRL9 0x0C09C /* MSI-X Table entry vector ctrl reg 9 - RW */
+#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+
+#define E1000_REGISTER(a, reg) (reg)
+
+#define E1000_TRANSLATE_REG(a, reg) (reg)
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
+
+#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
+
+#define E1000_WRITE_REG_IO(a, reg, offset) do { \
+    outl(reg, ((a)->io_base));                  \
+    outl(offset, ((a)->io_base + 4));      } while (0)
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
+
+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/igb_param.c	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,735 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/netdevice.h>
+
+#include "igb.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IGB_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
+#define IGB_PARAM(X, desc) \
+	static int __devinitdata X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+	static int num_##X; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-4096
+ *
+ * Default Value: 256
+ */
+IGB_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-4096
+ *
+ * Default Value: 256
+ */
+IGB_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+IGB_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+IGB_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+IGB_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    3
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+/* LLIPort (Low Latency Interrupt TCP Port)
+ *
+ * Valid Range: 0 - 65535
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port");
+
+#define DEFAULT_LLIPORT                0
+#define MAX_LLIPORT               0xFFFF
+#define MIN_LLIPORT                    0
+
+/* LLIPush (Low Latency Interrupt on TCP Push flag)
+ *
+ * Valid Range: 0,1
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag");
+
+#define DEFAULT_LLIPUSH                0
+#define MAX_LLIPUSH                    1
+#define MIN_LLIPUSH                    0
+
+/* LLISize (Low Latency Interrupt on Packet Size)
+ *
+ * Valid Range: 0 - 1500
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size");
+
+#define DEFAULT_LLISIZE                0
+#define MAX_LLISIZE                 1500
+#define MIN_LLISIZE                    0
+
+struct igb_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int  def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct igb_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int __devinit igb_validate_option(int *value, struct igb_option *opt,
+                                         struct igb_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct igb_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void igb_check_fiber_options(struct igb_adapter *adapter);
+static void igb_check_copper_options(struct igb_adapter *adapter);
+
+/**
+ * igb_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit igb_check_options(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int bd = adapter->bd_number;
+	if (bd >= IGB_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct igb_option opt = {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(IGB_DEFAULT_TXD),
+			.def  = IGB_DEFAULT_TXD,
+			.arg  = { .r = { .min = IGB_MIN_TXD }}
+		};
+		struct igb_ring *tx_ring = adapter->tx_ring;
+		int i;
+		opt.arg.r.max = IGB_MAX_TXD;
+
+		if (num_TxDescriptors > bd) {
+			int count = TxDescriptors[bd];
+			igb_validate_option(&count, &opt, adapter);
+			tx_ring->count = count;
+			tx_ring->count = ALIGN(tx_ring->count,
+			                       REQ_TX_DESCRIPTOR_MULTIPLE);
+		} else {
+			tx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct igb_option opt = {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(IGB_DEFAULT_RXD),
+			.def  = IGB_DEFAULT_RXD,
+			.arg  = { .r = { .min = IGB_MIN_RXD }}
+		};
+		struct igb_ring *rx_ring = adapter->rx_ring;
+		int i;
+		opt.arg.r.max = IGB_MAX_RXD;
+
+		if (num_RxDescriptors > bd) {
+			int count = RxDescriptors[bd];
+			igb_validate_option(&count, &opt, adapter);
+			rx_ring->count = count;
+			rx_ring->count = ALIGN(rx_ring->count,
+			                       REQ_RX_DESCRIPTOR_MULTIPLE);
+		} else {
+			rx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		struct igb_option opt = {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_XsumRX > bd) {
+			int rx_csum = XsumRX[bd];
+			igb_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+	}
+	{ /* Flow Control */
+
+		struct igb_opt_list fc_list[] = {
+			{ e1000_fc_none,     "Flow Control Disabled" },
+			{ e1000_fc_rx_pause, "Flow Control Receive Only" },
+			{ e1000_fc_tx_pause, "Flow Control Transmit Only" },
+			{ e1000_fc_full,     "Flow Control Enabled" },
+			{ e1000_fc_default,  "Flow Control Hardware Default" }
+		};
+
+		struct igb_option opt = {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = e1000_fc_default,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+		if (num_FlowControl > bd) {
+			int fc = FlowControl[bd];
+			igb_validate_option(&fc, &opt, adapter);
+			hw->mac.original_fc = fc;
+			hw->mac.fc = fc;
+		} else {
+			hw->mac.original_fc = opt.def;
+			hw->mac.fc = opt.def;
+		}
+	}
+	{ /* Interrupt Throttling Rate */
+		struct igb_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+		if (num_InterruptThrottleRate > bd) {
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+				        opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = IGB_START_ITR;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = IGB_START_ITR;
+				break;
+			default:
+				igb_validate_option(&adapter->itr, &opt,
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				/* clear the lower two bits because they are
+				 * used as control */
+				adapter->itr_setting = adapter->itr & ~3;
+				break;
+			}
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 8000;
+		}
+	}
+	{ /* Low Latency Interrupt TCP Port*/
+		struct igb_option opt = {
+			.type = range_option,
+			.name = "Low Latency Interrupt TCP Port",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
+			.def  = DEFAULT_LLIPORT,
+			.arg  = { .r = { .min = MIN_LLIPORT,
+					 .max = MAX_LLIPORT }}
+		};
+
+		if (num_LLIPort > bd) {
+			adapter->lli_port = LLIPort[bd];
+			if (adapter->lli_port) {
+				igb_validate_option(&adapter->lli_port, &opt,
+				        adapter);
+			} else {
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+					opt.name);
+			}
+		} else {
+			adapter->lli_port = opt.def;
+		}
+	}
+	{ /* Low Latency Interrupt on Packet Size */
+		struct igb_option opt = {
+			.type = range_option,
+			.name = "Low Latency Interrupt on Packet Size",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
+			.def  = DEFAULT_LLISIZE,
+			.arg  = { .r = { .min = MIN_LLISIZE,
+					 .max = MAX_LLISIZE }}
+		};
+
+		if (num_LLISize > bd) {
+			adapter->lli_size = LLISize[bd];
+			if (adapter->lli_size) {
+				igb_validate_option(&adapter->lli_size, &opt,
+				        adapter);
+			} else {
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+					opt.name);
+			}
+		} else {
+			adapter->lli_size = opt.def;
+		}
+	}
+	{ /*Low Latency Interrupt on TCP Push flag*/
+		struct igb_option opt = {
+			.type = enable_option,
+			.name = "Low Latency Interrupt on TCP Push flag",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+		if (num_LLIPush > bd) {
+			int lli_push = LLIPush[bd];
+			igb_validate_option(&lli_push, &opt, adapter);
+			if (lli_push)
+				adapter->flags |= FLAG_IGB_LLI_PUSH;
+		} else {
+			adapter->lli_port = opt.def;
+		}
+	}
+
+	switch (hw->media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		igb_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		igb_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * igb_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit igb_check_fiber_options(struct igb_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	if (num_Speed > bd) {
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if (num_Duplex > bd) {
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * igb_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit igb_check_copper_options(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int speed, dplx, an;
+	int bd = adapter->bd_number;
+
+	{ /* Speed */
+		struct igb_opt_list speed_list[] = {{          0, "" },
+						    {   SPEED_10, "" },
+						    {  SPEED_100, "" },
+						    { SPEED_1000, "" }};
+
+		struct igb_option opt = {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+		if (num_Speed > bd) {
+			speed = Speed[bd];
+			igb_validate_option(&speed, &opt, adapter);
+		} else {
+			speed = opt.def;
+		}
+	}
+	{ /* Duplex */
+		struct igb_opt_list dplx_list[] = {{           0, "" },
+						   { HALF_DUPLEX, "" },
+						   { FULL_DUPLEX, "" }};
+
+		struct igb_option opt = {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_reset_block(hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+		if (num_Duplex > bd) {
+			dplx = Duplex[bd];
+			igb_validate_option(&dplx, &opt, adapter);
+		} else {
+			dplx = opt.def;
+		}
+	}
+
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		struct igb_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		struct igb_option opt = {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+		if (num_AutoNeg > bd) {
+			an = AutoNeg[bd];
+			igb_validate_option(&an, &opt, adapter);
+		} else {
+			an = opt.def;
+		}
+		hw->phy.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		hw->phy.autoneg_advertised = ADVERTISE_10_FULL |
+		                             ADVERTISE_100_FULL |
+		                             ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		hw->mac.autoneg = 0;
+		adapter->fc_autoneg = 0;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		hw->mac.autoneg = 0;
+		adapter->fc_autoneg = 0;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		hw->phy.autoneg_advertised = ADVERTISE_100_HALF |
+		                             ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		hw->mac.autoneg = 0;
+		adapter->fc_autoneg = 0;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		hw->mac.autoneg = 0;
+		adapter->fc_autoneg = 0;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		goto full_duplex_only;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		/* fall through */
+	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		hw->mac.autoneg = 1;
+		adapter->fc_autoneg = 1;
+		hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_nvm.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,46 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32  e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
+                          u16 words, u16 *data);
+s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE  0xDB00
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_api.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,121 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+void e1000_remove_device(struct e1000_hw *hw);
+s32  e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32  e1000_force_mac_fc(struct e1000_hw *hw);
+s32  e1000_check_for_link(struct e1000_hw *hw);
+s32  e1000_reset_hw(struct e1000_hw *hw);
+s32  e1000_init_hw(struct e1000_hw *hw);
+s32  e1000_setup_link(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
+                                u16 *duplex);
+s32  e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_mc_addr_list_update(struct e1000_hw *hw,
+                               u8 *mc_addr_list, u32 mc_addr_count,
+                               u32 rar_used_count, u32 rar_count);
+s32  e1000_cleanup_led(struct e1000_hw *hw);
+s32  e1000_check_reset_block(struct e1000_hw *hw);
+s32  e1000_blink_led(struct e1000_hw *hw);
+s32  e1000_led_on(struct e1000_hw *hw);
+s32  e1000_led_off(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32  e1000_get_cable_length(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+                               u32 offset, u8 data);
+s32  e1000_get_phy_info(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset(struct e1000_hw *hw);
+s32  e1000_phy_commit(struct e1000_hw *hw);
+s32  e1000_read_mac_addr(struct e1000_hw *hw);
+s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+                     u16 *data);
+s32  e1000_wait_autoneg(struct e1000_hw *hw);
+s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the RX descriptor with EOP set
+ *      error = the 8 bit error field of the RX descriptor with EOP set
+ *      length = the sum of all the length fields of the RX descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = 1;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = 0;
+ *  }
+ *  ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte) \
+    (e1000_tbi_sbp_enabled_82543(a) && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > ((a)->mac.min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= ((a)->mac.max_frame_size + 1))) : \
+          (((length) > (a)->mac.min_frame_size) && \
+           ((length) <= ((a)->mac.max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_mac.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,71 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+/* Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32  e1000_blink_led_generic(struct e1000_hw *hw);
+s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+s32  e1000_id_led_init_generic(struct e1000_hw *hw);
+s32  e1000_led_on_generic(struct e1000_hw *hw);
+s32  e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_mc_addr_list_update_generic(struct e1000_hw *hw,
+	                               u8 *mc_addr_list, u32 mc_addr_count,
+	                               u32 rar_used_count, u32 rar_count);
+s32  e1000_setup_led_generic(struct e1000_hw *hw);
+s32  e1000_setup_link_generic(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                       u32 offset, u8 data);
+
+u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000_remove_device_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
--- /dev/null	2007-08-07 08:02:19.913610636 -0400
+++ linux-2.6.18.i386/drivers/net/igb/e1000_hw.h	2007-08-07 17:33:29.000000000 -0400
@@ -0,0 +1,613 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82575EB_COPPER           0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+
+enum e1000_mac_type {
+	e1000_undefined = 0,
+	e1000_82575,
+	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_eeprom_microwire,
+	e1000_nvm_flash_hw,
+	e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large,
+	e1000_nvm_override_microwire_small,
+	e1000_nvm_override_microwire_large
+};
+
+enum e1000_phy_type {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+};
+
+enum e1000_bus_type {
+	e1000_bus_type_unknown = 0,
+	e1000_bus_type_pci,
+	e1000_bus_type_pcix,
+	e1000_bus_type_pci_express,
+	e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+	e1000_bus_speed_unknown = 0,
+	e1000_bus_speed_33,
+	e1000_bus_speed_66,
+	e1000_bus_speed_100,
+	e1000_bus_speed_120,
+	e1000_bus_speed_133,
+	e1000_bus_speed_2500,
+	e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+};
+
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+	u64 buffer_addr; /* Address of the descriptor's data buffer */
+	u16 length;      /* Length of data DMAed into data buffer */
+	u16 csum;        /* Packet checksum */
+	u8  status;      /* Descriptor status */
+	u8  errors;      /* Descriptor Errors */
+	u16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+	struct {
+		u64 buffer_addr;
+		u64 reserved;
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;            /* RSS Hash */
+				struct {
+					u16 ip_id;  /* IP id */
+					u16 csum;   /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length;
+			u16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+	struct {
+		/* one buffer for protocol header(s), three data buffers */
+		u64 buffer_addr[MAX_PS_BUFFERS];
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;              /* RSS Hash */
+				struct {
+					u16 ip_id;    /* IP id */
+					u16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length0;          /* length of buffer 0 */
+			u16 vlan;             /* VLAN tag */
+		} middle;
+		struct {
+			u16 header_status;
+			u16 length[3];        /* length of buffers 1-3 */
+		} upper;
+		u64 reserved;
+	} wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+	u64 buffer_addr;      /* Address of the descriptor's data buffer */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 cso;        /* Checksum offset */
+			u8 cmd;        /* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css;        /* Checksum start */
+			u16 special;
+		} fields;
+	} upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+	union {
+		u32 ip_config;
+		struct {
+			u8 ipcss;      /* IP checksum start */
+			u8 ipcso;      /* IP checksum offset */
+			u16 ipcse;     /* IP checksum end */
+		} ip_fields;
+	} lower_setup;
+	union {
+		u32 tcp_config;
+		struct {
+			u8 tucss;      /* TCP checksum start */
+			u8 tucso;      /* TCP checksum offset */
+			u16 tucse;     /* TCP checksum end */
+		} tcp_fields;
+	} upper_setup;
+	u32 cmd_and_length;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 hdr_len;    /* Header length */
+			u16 mss;       /* Maximum segment size */
+		} fields;
+	} tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+	u64 buffer_addr;   /* Address of the descriptor's buffer address */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 typ_len_ext;
+			u8 cmd;
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 popts;      /* Packet Options */
+			u16 special;   /* */
+		} fields;
+	} upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorcl;
+	u64 gorch;
+	u64 gotcl;
+	u64 gotch;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 torl;
+	u64 torh;
+	u64 totl;
+	u64 toth;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+	u64 cbtmpc;
+	u64 htdpmc;
+	u64 cbrdpc;
+	u64 cbrmpc;
+	u64 rpthc;
+	u64 hgptc;
+	u64 htcbdpc;
+	u64 hgorcl;
+	u64 hgorch;
+	u64 hgotcl;
+	u64 hgotch;
+	u64 lenerrs;
+	u64 scvpc;
+	u64 hrmpc;
+};
+
+struct e1000_phy_stats {
+	u32 idle_errors;
+	u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+
+struct e1000_functions {
+	/* Function pointers for the MAC. */
+	s32  (*init_mac_params)(struct e1000_hw *);
+	s32  (*blink_led)(struct e1000_hw *);
+	s32  (*check_for_link)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *hw);
+	s32  (*cleanup_led)(struct e1000_hw *);
+	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
+	s32  (*get_bus_info)(struct e1000_hw *);
+	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*led_on)(struct e1000_hw *);
+	s32  (*led_off)(struct e1000_hw *);
+	void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32,
+	                                 u32);
+	void (*remove_device)(struct e1000_hw *);
+	s32  (*reset_hw)(struct e1000_hw *);
+	s32  (*init_hw)(struct e1000_hw *);
+	s32  (*setup_link)(struct e1000_hw *);
+	s32  (*setup_physical_interface)(struct e1000_hw *);
+	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
+	void (*mta_set)(struct e1000_hw *, u32);
+	void (*config_collision_dist)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
+	s32  (*validate_mdi_setting)(struct e1000_hw *);
+	s32  (*mng_host_if_write)(struct e1000_hw *, u8 *, u16, u16, u8 *);
+	s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
+                           struct e1000_host_mng_command_header *);
+	s32  (*mng_enable_host_if)(struct e1000_hw *);
+	s32  (*wait_autoneg)(struct e1000_hw *);
+
+	/* Function pointers for the PHY. */
+	s32  (*init_phy_params)(struct e1000_hw *);
+	s32  (*acquire_phy)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
+	s32  (*check_reset_block)(struct e1000_hw *);
+	s32  (*commit_phy)(struct e1000_hw *);
+	s32  (*force_speed_duplex)(struct e1000_hw *);
+	s32  (*get_cfg_done)(struct e1000_hw *hw);
+	s32  (*get_cable_length)(struct e1000_hw *);
+	s32  (*get_phy_info)(struct e1000_hw *);
+	s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+	void (*release_phy)(struct e1000_hw *);
+	s32  (*reset_phy)(struct e1000_hw *);
+	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
+
+	/* Function pointers for the NVM. */
+	s32  (*init_nvm_params)(struct e1000_hw *);
+	s32  (*acquire_nvm)(struct e1000_hw *);
+	s32  (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release_nvm)(struct e1000_hw *);
+	void (*reload_nvm)(struct e1000_hw *);
+	s32  (*update_nvm)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+	s32  (*validate_nvm)(struct e1000_hw *);
+	s32  (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	enum e1000_mac_type type;
+	enum e1000_fc_mode  fc;
+	enum e1000_fc_mode  original_fc;
+
+	u32 collision_delta;
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 max_frame_size;
+	u32 mc_filter_type;
+	u32 min_frame_size;
+	u32 tx_packet_delta;
+	u32 txcw;
+
+	u16 current_ifs_val;
+	u16 ifs_max_val;
+	u16 ifs_min_val;
+	u16 ifs_ratio;
+	u16 ifs_step_size;
+	u16 mta_reg_count;
+	u16 rar_entry_count;
+	u16 fc_high_water;
+	u16 fc_low_water;
+	u16 fc_pause_time;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool arc_subsystem_valid;
+	bool asf_firmware_present;
+	bool autoneg;
+	bool autoneg_failed;
+	bool disable_av;
+	bool disable_hw_init_bits;
+	bool fc_send_xon;
+	bool fc_strict_ieee;
+	bool get_link_status;
+	bool ifs_params_forced;
+	bool in_ifs_mode;
+	bool report_tx_early;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+	enum e1000_phy_type type;
+
+	enum e1000_1000t_rx_status local_rx;
+	enum e1000_1000t_rx_status remote_rx;
+	enum e1000_ms_type ms_type;
+	enum e1000_ms_type original_ms_type;
+	enum e1000_rev_polarity cable_polarity;
+	enum e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool reset_disable;
+	bool speed_downgraded;
+	bool wait_for_link;
+};
+
+struct e1000_nvm_info {
+	enum e1000_nvm_type type;
+	enum e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	enum e1000_bus_type type;
+	enum e1000_bus_speed speed;
+	enum e1000_bus_width width;
+
+	u32 snoop;
+
+	u16 func;
+	u16 pci_cmd_word;
+};
+
+struct e1000_hw {
+	void *back;
+	void *dev_spec;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+	unsigned long io_base;
+
+	struct e1000_functions func;
+	struct e1000_mac_info  mac;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	enum e1000_media_type media_type;
+
+	u32 dev_spec_size;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+};
+
+/* These functions must be implemented by drivers */
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+s32  e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size);
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_free_dev_spec_struct(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif